diff --git a/.editorconfig b/.editorconfig index f511aad460790b8fdf756eab15a7f2b3def16531..6b736d884f229f6f4f479e9ead5bedebf412099a 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,12 +1,13 @@ root = true -[*] + +[*.rs] indent_style=tab indent_size=tab tab_width=4 +max_line_length=120 end_of_line=lf charset=utf-8 trim_trailing_whitespace=true -max_line_length=120 insert_final_newline=true [*.yml] @@ -14,3 +15,12 @@ indent_style=space indent_size=2 tab_width=8 end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline=true + +[*.sh] +indent_style=space +indent_size=2 +tab_width=8 +end_of_line=lf diff --git a/.github/workflows/burnin-label-notification.yml b/.github/workflows/burnin-label-notification.yml new file mode 100644 index 0000000000000000000000000000000000000000..203685b706d59da2dbc6f5f6a7c26c01f9e6723c --- /dev/null +++ b/.github/workflows/burnin-label-notification.yml @@ -0,0 +1,17 @@ +name: Notify devops when burn-in label applied +on: + pull_request: + types: [labeled] + +jobs: + notify-devops: + runs-on: ubuntu-latest + steps: + - name: Notify devops + if: github.event.label.name == 'A1-needsburnin' + uses: s3krit/matrix-message-action@v0.0.2 + with: + room_id: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ROOM_ID }} + access_token: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ACCESS_TOKEN }} + message: "@room Burn-in request received for the following PR: ${{ github.event.pull_request.html_url }}" + server: "matrix.parity.io" diff --git a/.github/workflows/publish-draft-release.yml b/.github/workflows/publish-draft-release.yml new file mode 100644 index 0000000000000000000000000000000000000000..b81c9f739e6d98ef3da13d0c6f545107aecf00a9 --- /dev/null +++ b/.github/workflows/publish-draft-release.yml @@ -0,0 +1,143 @@ +name: Publish draft release + +on: + push: + tags: + - v**.**.** + +jobs: + build-runtimes: + runs-on: ubuntu-latest + strategy: + matrix: + runtime: ['polkadot', 'kusama'] + container: + image: chevdor/srtool:nightly-2020-07-20 + volumes: + - ${{ github.workspace }}:/build + env: + PACKAGE: ${{ matrix.runtime }}-runtime + RUSTC_VERSION: nightly-2020-07-20 + steps: + - uses: actions/checkout@v2 + - name: Cache target dir + uses: actions/cache@v2 + with: + path: '${{ github.workspace }}/target' + key: srtool-target-${{ matrix.runtime }}-${{ github.sha }} + restore-keys: | + srtool-target-${{ matrix.runtime }}- + srtool-target- + - name: Build ${{ matrix.runtime }} runtime + id: build-runtime + shell: bash + run: | + cd /build + pwd + ls -la + build --json | tee srtool_output.json + cat srtool_output.json + while IFS= read -r line; do + echo "::set-output name=$line::$(jq -r ".$line" < srtool_output.json)" + done <<< "$(jq -r 'keys[]' < srtool_output.json)" + - name: Upload ${{ matrix.runtime }} srtool json + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.runtime }}-srtool-json + path: srtool_output.json + - name: Upload ${{ matrix.runtime }} runtime + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.runtime }}-runtime + path: "${{ steps.build-runtime.outputs.wasm }}" + + get-rust-versions: + runs-on: ubuntu-latest + container: + image: paritytech/ci-linux:production + outputs: + rustc-stable: ${{ steps.get-rust-versions.outputs.stable }} + rustc-nightly: ${{ steps.get-rust-versions.outputs.nightly }} + steps: + - id: get-rust-versions + run: | + echo "::set-output name=stable::$(rustc +stable --version)" + echo "::set-output name=nightly::$(rustc +nightly --version)" + + publish-draft-release: + runs-on: ubuntu-latest + needs: ['get-rust-versions', 'build-runtimes'] + outputs: + release_url: ${{ steps.create-release.outputs.html_url }} + asset_upload_url: ${{ steps.create-release.outputs.upload_url }} + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + path: polkadot + - name: Set up Ruby 2.7 + uses: actions/setup-ruby@v1 + with: + ruby-version: 2.7 + - name: Download srtool json output + uses: actions/download-artifact@v2 + - name: Generate release text + env: + RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }} + RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gem install changelogerator git toml + ruby $GITHUB_WORKSPACE/polkadot/scripts/github/generate_release_text.rb | tee release_text.md + - name: Create draft release + id: create-release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Polkadot ${{ github.ref }} + body_path: ./release_text.md + draft: true + + post_to_matrix: + runs-on: ubuntu-latest + needs: publish-draft-release + steps: + - name: Internal polkadot channel + uses: s3krit/matrix-message-action@v0.0.2 + with: + room_id: ${{ secrets.INTERNAL_POLKADOT_MATRIX_ROOM_ID }} + access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} + message: "**New version of polkadot tagged**: ${{ github.ref }}
Gav: Draft release created: ${{ needs.publish-draft-release.outputs.release_url }}" + server: "matrix.parity.io" + + publish-runtimes: + runs-on: ubuntu-latest + needs: ['publish-draft-release'] + strategy: + matrix: + runtime: ['polkadot', 'kusama'] + steps: + - uses: actions/checkout@v2 + - uses: actions/download-artifact@v2 + with: + name: ${{ matrix.runtime }}-runtime + - name: Set up Ruby 2.7 + uses: actions/setup-ruby@v1 + with: + ruby-version: 2.7 + - name: Get runtime version + id: get-runtime-ver + run: | + runtime_ver="$(ruby -e 'require "./scripts/github/lib.rb"; puts get_runtime("${{ matrix.runtime }}")')" + echo "::set-output name=runtime_ver::$runtime_ver" + - name: Upload ${{ matrix.runtime }} wasm + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }} + asset_path: ./${{ matrix.runtime }}_runtime.compact.wasm + asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.wasm + asset_content_type: application/wasm diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f2de0fa0a86aeea9a271ecfd46a3acb31ac2523c..1f2ef6ded2a7ff86d38a907aaf45a25ce65e623b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -94,16 +94,12 @@ check-line-width: interruptible: true allow_failure: true -publish-draft-release: +test-deterministic-wasm: stage: test - only: - - tags - - /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ # i.e. v1.0.1, v2.1.0rc1 + <<: *docker-env + except: script: - - apt-get -y update; apt-get -y install jq - - ./scripts/gitlab/publish_draft_release.sh - interruptible: true - allow_failure: true + - ./scripts/gitlab/test_deterministic_wasm.sh test-linux-stable: &test stage: test @@ -117,7 +113,7 @@ test-linux-stable: &test RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" TARGET: native script: - - time cargo test --all --release --verbose --locked --features runtime-benchmarks + - ./scripts/gitlab/test_linux_stable.sh - sccache -s check-web-wasm: &test @@ -128,14 +124,7 @@ check-web-wasm: &test script: # WASM support is in progress. As more and more crates support WASM, we # should add entries here. See https://github.com/paritytech/polkadot/issues/625 - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path runtime/polkadot/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path runtime/kusama/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path erasure-coding/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path parachain/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path primitives/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path rpc/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path statement-table/Cargo.toml - - time cargo build --locked --target=wasm32-unknown-unknown --manifest-path cli/Cargo.toml --no-default-features --features browser + - ./scripts/gitlab/check_web_wasm.sh - sccache -s check-runtime-benchmarks: &test @@ -145,7 +134,7 @@ check-runtime-benchmarks: &test <<: *compiler_info script: # Check that the node will compile with `runtime-benchmarks` feature flag. - - time cargo check --features runtime-benchmarks + - ./scripts/gitlab/check_runtime_benchmarks.sh - sccache -s build-wasm-release: @@ -195,7 +184,7 @@ generate-impl-guide: name: michaelfbryan/mdbook-docker-image:latest entrypoint: [""] script: - - mdbook build roadmap/implementors-guide + - mdbook build roadmap/implementers-guide .publish-build: &publish-build stage: publish @@ -253,8 +242,8 @@ publish-s3-release: - echo "uploading objects to https://${BUCKET}/${PREFIX}/${VERSION}" - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/${VERSION}/ - echo "update objects at https://${BUCKET}/${PREFIX}/${EXTRATAG}" - - for file in ./artifacts/*; do - name="$(basename ${file})"; + - find ./artifacts -type f | while read file; do + name="${file#./artifacts/}"; aws s3api copy-object --copy-source ${BUCKET}/${PREFIX}/${VERSION}/${name} --bucket ${BUCKET} --key ${PREFIX}/${EXTRATAG}/${name}; diff --git a/Cargo.lock b/Cargo.lock index 27217d39daa9d7a9aa1e64b860c9b9ebb83bc491..14aed8918d942727a837d1ea4e4de0f8da6762e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,18 +12,106 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a49806b9dadc843c61e7c97e72490ad7f7220ae249012fbda9ad0609457c0543" +checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" dependencies = [ - "gimli 0.21.0", + "gimli 0.22.0", ] [[package]] -name = "adler32" -version = "1.1.0" +name = "adler" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccc9a9dd069569f212bc4330af9f17c4afb5e8ce185e83dbb14f1349dda18b10" + +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array 0.14.2", +] + +[[package]] +name = "aes" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" +dependencies = [ + "aes-soft 0.4.0", + "aesni 0.7.0", + "block-cipher", +] + +[[package]] +name = "aes-ctr" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2e5b0458ea3beae0d1d8c0f3946564f8e10f90646cf78c06b4351052058d1ee" +dependencies = [ + "aes-soft 0.3.3", + "aesni 0.6.0", + "ctr", + "stream-cipher 0.3.2", +] + +[[package]] +name = "aes-gcm" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" +dependencies = [ + "aead", + "aes", + "block-cipher", + "ghash", + "subtle 2.2.3", +] + +[[package]] +name = "aes-soft" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" +dependencies = [ + "block-cipher-trait", + "byteorder 1.3.4", + "opaque-debug 0.2.3", +] + +[[package]] +name = "aes-soft" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" +dependencies = [ + "block-cipher", + "byteorder 1.3.4", + "opaque-debug 0.2.3", +] + +[[package]] +name = "aesni" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" +dependencies = [ + "block-cipher-trait", + "opaque-debug 0.2.3", + "stream-cipher 0.3.2", +] + +[[package]] +name = "aesni" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d" +checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" +dependencies = [ + "block-cipher", + "opaque-debug 0.2.3", +] [[package]] name = "ahash" @@ -34,11 +122,17 @@ dependencies = [ "const-random", ] +[[package]] +name = "ahash" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" + [[package]] name = "aho-corasick" -version = "0.7.10" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" dependencies = [ "memchr", ] @@ -60,7 +154,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -69,7 +163,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -130,7 +224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -153,47 +247,84 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +[[package]] +name = "async-channel" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee81ba99bee79f3c8ae114ae4baa7eaa326f63447cf2ec65e4393618b63f8770" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f47c78ea98277cb1f5e6f60ba4fc762f5eafe9f6511bc2f7dfd8b75c225650" +dependencies = [ + "async-io", + "futures-lite", + "multitask", + "parking", + "scoped-tls 1.0.0", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8126ef9fb99355c6fd27575d691be4887b884137a5b6f48c2d961f13590c51" +dependencies = [ + "cfg-if", + "concurrent-queue", + "futures-lite", + "libc", + "once_cell 1.4.0", + "parking", + "socket2", + "vec-arena", + "wepoll-sys-stjepang", + "winapi 0.3.9", +] + [[package]] name = "async-std" -version = "1.5.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538ecb01eb64eecd772087e5b6f7540cbc917f047727339a472dafed2185b267" +checksum = "00d68a33ebc8b57800847d00787307f84a562224a14db069b0acefe4c2abbf5d" dependencies = [ "async-task", - "broadcaster", - "crossbeam-channel", - "crossbeam-deque", "crossbeam-utils", + "futures-channel", "futures-core", "futures-io", - "futures-timer 2.0.2", + "futures-timer 3.0.2", "kv-log-macro", - "log 0.4.8", + "log 0.4.11", "memchr", - "mio", - "mio-uds", "num_cpus", - "once_cell", + "once_cell 1.4.0", "pin-project-lite", "pin-utils", "slab", + "smol 0.1.18", + "wasm-bindgen-futures", ] [[package]] name = "async-task" -version = "1.3.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ac2c016b079e771204030951c366db398864f5026f84a44dafb0ff20f02085d" -dependencies = [ - "libc", - "winapi 0.3.8", -] +checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3" [[package]] name = "async-tls" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fd83426b89b034bf4e9ceb9c533c2f2386b813fd3dcae0a425ec6f1837d78a" +checksum = "df097e3f506bec0e1a24f06bb3c962c228f36671de841ff579cb99f371772634" dependencies = [ "futures 0.3.5", "rustls", @@ -201,6 +332,29 @@ dependencies = [ "webpki-roots 0.19.0", ] +[[package]] +name = "async-trait" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", +] + +[[package]] +name = "atomic" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" + +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + [[package]] name = "atty" version = "0.2.14" @@ -209,7 +363,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -226,9 +380,9 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.49" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c" +checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" dependencies = [ "addr2line", "cfg-if", @@ -252,17 +406,17 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" [[package]] name = "base64" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e223af0dc48c96d4f8342ec01a4974f139df863896b316681efd36742f22cc67" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "bincode" -version = "1.2.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" dependencies = [ - "byteorder", + "byteorder 1.3.4", "serde", ] @@ -280,7 +434,7 @@ dependencies = [ "env_logger", "lazy_static", "lazycell", - "log 0.4.8", + "log 0.4.11", "peeking_take_while", "proc-macro2 1.0.18", "quote 1.0.7", @@ -290,6 +444,21 @@ dependencies = [ "which", ] +[[package]] +name = "bip39" +version = "0.6.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" +dependencies = [ + "failure", + "hashbrown 0.1.8", + "hmac", + "once_cell 0.1.8", + "pbkdf2", + "rand 0.6.5", + "sha2 0.8.2", +] + [[package]] name = "bitflags" version = "1.2.1" @@ -314,14 +483,15 @@ dependencies = [ [[package]] name = "blake2" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" +checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" dependencies = [ "byte-tools", - "crypto-mac", - "digest", - "opaque-debug", + "byteorder 1.3.4", + "crypto-mac 0.8.0", + "digest 0.9.0", + "opaque-debug 0.2.3", ] [[package]] @@ -364,8 +534,35 @@ checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ "block-padding", "byte-tools", - "byteorder", - "generic-array", + "byteorder 1.3.4", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.2", +] + +[[package]] +name = "block-cipher" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" +dependencies = [ + "generic-array 0.14.2", +] + +[[package]] +name = "block-cipher-trait" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" +dependencies = [ + "generic-array 0.12.3", ] [[package]] @@ -378,17 +575,30 @@ dependencies = [ ] [[package]] -name = "broadcaster" -version = "1.0.0" +name = "blocking" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c972e21e0d055a36cf73e4daae870941fe7a8abcd5ac3396aab9e4c126bd87" +checksum = "d2468ff7bf85066b4a3678fede6fe66db31846d753ff0adfbfab2c6a6e81612b" dependencies = [ - "futures-channel", - "futures-core", - "futures-sink", - "futures-util", - "parking_lot 0.10.2", - "slab", + "async-channel", + "atomic-waker", + "futures-lite", + "once_cell 1.4.0", + "parking", + "waker-fn", +] + +[[package]] +name = "blocking" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76e94bf99b692f54c9d05f97454d3faf11134523fe5b180564a3fb6ed63bcc0a" +dependencies = [ + "async-channel", + "atomic-waker", + "futures-lite", + "once_cell 1.4.0", + "waker-fn", ] [[package]] @@ -424,6 +634,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "byteorder" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" + [[package]] name = "byteorder" version = "1.3.4" @@ -436,16 +652,19 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "either", "iovec", ] [[package]] name = "bytes" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" +checksum = "118cf036fbb97d0816e3c34b2d7a1e8cfc60f68fcf63d550ddbe9bd5f59c213b" +dependencies = [ + "loom", +] [[package]] name = "c_linked_list" @@ -453,11 +672,17 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" +[[package]] +name = "cache-padded" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" + [[package]] name = "cc" -version = "1.0.54" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311" +checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" dependencies = [ "jobserver", ] @@ -478,19 +703,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] -name = "chacha20-poly1305-aead" -version = "0.1.2" +name = "chacha20" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77d2058ba29594f69c75e8a9018e0485e3914ca5084e3613cd64529042f5423b" +checksum = "086c0f07ac275808b7bf9a39f2fd013aae1498be83632814c8c4e0bd53f2dc58" dependencies = [ - "constant_time_eq", + "stream-cipher 0.4.1", + "zeroize", +] + +[[package]] +name = "chacha20poly1305" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18b0c90556d8e3fec7cf18d84a2f53d27b21288f2fe481b830fadcf809e48205" +dependencies = [ + "aead", + "chacha20", + "poly1305", + "stream-cipher 0.4.1", + "zeroize", ] [[package]] name = "chrono" -version = "0.4.11" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" +checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" dependencies = [ "js-sys", "num-integer", @@ -526,23 +765,32 @@ dependencies = [ ] [[package]] -name = "clear_on_drop" -version = "0.2.4" +name = "cloudabi" +version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9cc5db465b294c3fa986d5bbb0f3017cd850bff6dd6c52f9ccff8b4d21b7b08" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "cc", + "bitflags", ] [[package]] name = "cloudabi" -version = "0.0.3" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" dependencies = [ "bitflags", ] +[[package]] +name = "concurrent-queue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83c06aff61f2d899eb87c379df3cbf7876f14471dcab474e0b6dc90ab96c080" +dependencies = [ + "cache-padded", +] + [[package]] name = "console_error_panic_hook" version = "0.1.6" @@ -559,7 +807,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" dependencies = [ - "log 0.4.8", + "log 0.4.11", "web-sys", ] @@ -605,40 +853,46 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +[[package]] +name = "cpuid-bool" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d375c433320f6c5057ae04a04376eef4d04ce2801448cf8863a78da99107be4" + [[package]] name = "cranelift-bforest" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4425bb6c3f3d2f581c650f1a1fdd3196a975490149cf59bea9d34c3bea79eda" +checksum = "8dcc286b052ee24a1e5a222e7c1125e6010ad35b0f248709b9b3737a8fedcfdf" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d166b289fd30062ee6de86284750fc3fe5d037c6b864b3326ce153239b0626e1" +checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" dependencies = [ - "byteorder", + "byteorder 1.3.4", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli 0.20.0", - "log 0.4.8", + "gimli 0.21.0", + "log 0.4.11", "regalloc", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", "target-lexicon", "thiserror", ] [[package]] name = "cranelift-codegen-meta" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c9fb2306a36d41c5facd4bf3400bc6c157185c43a96eaaa503471c34c5144b" +checksum = "3c3f460031861e4f4ad510be62b2ae50bba6cc886b598a36f9c0a970feab9598" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -646,36 +900,36 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44e0cfe9b1f97d9f836bca551618106c7d53b93b579029ecd38e73daa7eb689e" +checksum = "76ad12409e922e7697cd0bdc7dc26992f64a77c31880dfe5e3c7722f4710206d" [[package]] name = "cranelift-entity" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926a73c432e5ba9c891171ff50b75e7d992cd76cd271f0a0a0ba199138077472" +checksum = "d97cdc58972ea065d107872cfb9079f4c92ade78a8af85aaff519a65b5d13f71" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e45f82e3446dd1ebb8c2c2f6a6b0e6cd6cd52965c7e5f7b1b35e9a9ace31ccde" +checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" dependencies = [ "cranelift-codegen", - "log 0.4.8", - "smallvec 1.4.0", + "log 0.4.11", + "smallvec 1.4.1", "target-lexicon", ] [[package]] name = "cranelift-native" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488b5d481bb0996a143e55a9d1739ef425efa20d4a5e5e98c859a8573c9ead9a" +checksum = "6e69d44d59826eef6794066ac2c0f4ad3975f02d97030c60dbc04e3886adf36e" dependencies = [ "cranelift-codegen", "raw-cpuid", @@ -684,17 +938,17 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.63.0" +version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00aa8dde71fd9fdb1958e7b0ef8f524c1560e2c6165e4ea54bc302b40551c161" +checksum = "979df666b1304624abe99738e9e0e7c7479ee5523ba4b8b237df9ff49996acbb" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "log 0.4.8", + "log 0.4.11", "serde", "thiserror", - "wasmparser 0.51.4", + "wasmparser 0.59.0", ] [[package]] @@ -706,16 +960,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-channel" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061" -dependencies = [ - "crossbeam-utils", - "maybe-uninit", -] - [[package]] name = "crossbeam-deque" version = "0.7.3" @@ -739,7 +983,7 @@ dependencies = [ "lazy_static", "maybe-uninit", "memoffset", - "scopeguard", + "scopeguard 1.1.0", ] [[package]] @@ -776,27 +1020,57 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" dependencies = [ - "generic-array", + "generic-array 0.12.3", "subtle 1.0.0", ] +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.2", + "subtle 2.2.3", +] + [[package]] name = "ct-logs" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3686f5fa27dbc1d76c751300376e167c5a43387f44bb451fd1c24776e49113" +checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" dependencies = [ "sct", ] +[[package]] +name = "ctr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022cd691704491df67d25d006fe8eca083098253c4d43516c2206479c58c6736" +dependencies = [ + "block-cipher-trait", + "stream-cipher 0.3.2", +] + +[[package]] +name = "cuckoofilter" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" +dependencies = [ + "byteorder 0.5.3", + "rand 0.3.23", +] + [[package]] name = "curve25519-dalek" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" dependencies = [ - "byteorder", - "digest", + "byteorder 1.3.4", + "digest 0.8.1", "rand_core 0.5.1", "subtle 2.2.3", "zeroize", @@ -836,13 +1110,13 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.8" +version = "0.99.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc655351f820d774679da6cdc23355a93de496867d8203496675162e17b1d671" +checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -857,7 +1131,16 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array", + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.2", ] [[package]] @@ -878,7 +1161,7 @@ checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ "libc", "redox_users", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -896,7 +1179,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ - "byteorder", + "byteorder 1.3.4", "quick-error", ] @@ -907,36 +1190,69 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] -name = "ed25519-dalek" -version = "1.0.0-pre.3" +name = "dyn-clonable" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" dependencies = [ - "clear_on_drop", - "curve25519-dalek", - "rand 0.7.3", - "sha2", + "dyn-clonable-impl", + "dyn-clone", ] [[package]] -name = "either" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" - -[[package]] -name = "enum-primitive-derive" -version = "0.1.2" +name = "dyn-clonable-impl" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b90e520ec62c1864c8c78d637acbfe8baf5f63240f2fb8165b8325c07812dd" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "num-traits 0.1.43", - "quote 0.3.15", - "syn 0.11.11", + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", ] [[package]] -name = "enum_primitive" +name = "dyn-clone" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c53dc3a653e0f64081026e4bf048d48fec9fce90c66e8326ca7292df0ff2d82" + +[[package]] +name = "easy-parallel" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd4afd79212583ff429b913ad6605242ed7eec277e950b1438f300748f948f4" + +[[package]] +name = "ed25519" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.0-pre.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.8.2", + "zeroize", +] + +[[package]] +name = "either" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" + +[[package]] +name = "enum_primitive" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" @@ -961,7 +1277,7 @@ checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -972,7 +1288,7 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime", - "log 0.4.8", + "log 0.4.11", "regex", "termcolor", ] @@ -1000,7 +1316,7 @@ checksum = "b480f641ccf0faf324e20c1d3e53d81b7484c698b42ea677f6907ae4db195371" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1020,11 +1336,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74cf96bec282dcdb07099f7e31d9fed323bca9435a09aba7b6d99b7617bca96d" dependencies = [ "lazy_static", - "log 0.4.8", + "log 0.4.11", "serde", "serde_json", ] +[[package]] +name = "event-listener" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "699d84875f1b72b4da017e6b0f77dfa88c0137f089958a88974d15938cbc2976" + [[package]] name = "exit-future" version = "0.2.0" @@ -1034,21 +1356,6 @@ dependencies = [ "futures 0.3.5", ] -[[package]] -name = "faerie" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfef65b0e94693295c5d2fe2506f0ee6f43465342d4b5331659936aee8b16084" -dependencies = [ - "goblin", - "indexmap", - "log 0.4.8", - "scroll", - "string-interner", - "target-lexicon", - "thiserror", -] - [[package]] name = "failure" version = "0.1.8" @@ -1067,7 +1374,7 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "synstructure", ] @@ -1083,6 +1390,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fastrand" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36a9cb09840f81cd211e435d00a4e487edd263dc3c8ff815c32dd76ad668ebed" + [[package]] name = "fdlimit" version = "0.1.4" @@ -1100,7 +1413,7 @@ checksum = "7b6b21baebbed15551f2170010ca4101b9ed3fdc05822791c8bd4631840eab81" dependencies = [ "cfg-if", "js-sys", - "log 0.4.8", + "log 0.4.11", "serde", "serde_derive", "wasm-bindgen", @@ -1114,7 +1427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" dependencies = [ "env_logger", - "log 0.4.8", + "log 0.4.11", ] [[package]] @@ -1126,7 +1439,7 @@ dependencies = [ "either", "futures 0.3.5", "futures-timer 2.0.2", - "log 0.4.8", + "log 0.4.11", "num-traits 0.2.12", "parity-scale-codec", "parking_lot 0.9.0", @@ -1138,7 +1451,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" dependencies = [ - "byteorder", + "byteorder 1.3.4", "rand 0.7.3", "rustc-hex", "static_assertions", @@ -1152,9 +1465,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" +checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ "cfg-if", "crc32fast", @@ -1171,16 +1484,16 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -1192,12 +1505,13 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-std", + "sp-storage", ] [[package]] name = "frame-benchmarking-cli" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "parity-scale-codec", @@ -1214,8 +1528,8 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -1229,8 +1543,8 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "11.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "serde", @@ -1240,19 +1554,19 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "bitmask", "frame-metadata", "frame-support-procedural", "impl-trait-for-tuples", - "log 0.4.8", - "once_cell", + "log 0.4.11", + "once_cell 1.4.0", "parity-scale-codec", "paste", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-arithmetic", "sp-core", "sp-inherents", @@ -1265,41 +1579,41 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support-procedural-tools", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "frame-support-procedural-tools" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "frame-system" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -1314,8 +1628,8 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -1328,8 +1642,8 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-api", @@ -1344,7 +1658,7 @@ dependencies = [ "lazy_static", "libc", "libloading", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1446,7 +1760,7 @@ dependencies = [ "futures 0.1.29", "futures 0.3.5", "lazy_static", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.9.0", "pin-project", "serde", @@ -1471,6 +1785,21 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +[[package]] +name = "futures-lite" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe71459749b2e8e66fb95df721b22fa08661ad384a0c5b519e11d3893b4692a" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.5" @@ -1480,7 +1809,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1495,7 +1824,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" dependencies = [ - "once_cell", + "once_cell 1.4.0", ] [[package]] @@ -1549,11 +1878,11 @@ dependencies = [ [[package]] name = "futures_codec" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0a73299e4718f5452e45980fc1d6957a070abe308d3700b63b8673f47e1c2b3" +checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "futures 0.3.5", "memchr", "pin-project", @@ -1565,6 +1894,19 @@ version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" +[[package]] +name = "generator" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add72f17bb81521258fcc8a7a3245b1e184e916bfbe34f0ea89558f440df5c68" +dependencies = [ + "cc", + "libc", + "log 0.4.11", + "rustc_version", + "winapi 0.3.9", +] + [[package]] name = "generic-array" version = "0.12.3" @@ -1574,6 +1916,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "generic-array" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac746a5f3bbfdadd6106868134545e684693d54d9d44f6e9588a7d54af0bf980" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "get_if_addrs" version = "0.5.3" @@ -1608,25 +1960,31 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6e27f0689a6e15944bdce7e45425efb87eaa8ab0c6e87f11d0987a9133e2531" +dependencies = [ + "polyval", +] + [[package]] name = "gimli" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dd6190aad0f05ddbbf3245c54ed14ca4aa6dd32f22312b70d8f168c3e3e633" +checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" dependencies = [ - "arrayvec 0.5.1", - "byteorder", "fallible-iterator", "indexmap", - "smallvec 1.4.0", "stable_deref_trait", ] [[package]] name = "gimli" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc8e0c9bce37868955864dbecd2b1ab2bdf967e6f28066d65aaac620444b65c" +checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" [[package]] name = "glob" @@ -1643,7 +2001,7 @@ dependencies = [ "aho-corasick", "bstr", "fnv", - "log 0.4.8", + "log 0.4.11", "regex", ] @@ -1660,30 +2018,19 @@ dependencies = [ "web-sys", ] -[[package]] -name = "goblin" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3081214398d39e4bd7f2c1975f0488ed04614ffdd976c6fc7a0708278552c0da" -dependencies = [ - "log 0.4.8", - "plain", - "scroll", -] - [[package]] name = "h2" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" dependencies = [ - "byteorder", + "byteorder 1.3.4", "bytes 0.4.12", "fnv", "futures 0.1.29", "http 0.1.21", "indexmap", - "log 0.4.8", + "log 0.4.11", "slab", "string", "tokio-io", @@ -1695,14 +2042,14 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.1", "indexmap", - "log 0.4.8", + "log 0.4.11", "slab", "tokio 0.2.21", "tokio-util", @@ -1723,16 +2070,36 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hashbrown" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" +dependencies = [ + "byteorder 1.3.4", + "scopeguard 0.3.3", +] + [[package]] name = "hashbrown" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" dependencies = [ - "ahash", + "ahash 0.2.18", "autocfg 0.1.7", ] +[[package]] +name = "hashbrown" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" +dependencies = [ + "ahash 0.3.8", + "autocfg 1.0.0", +] + [[package]] name = "heck" version = "0.3.1" @@ -1744,9 +2111,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909" +checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" dependencies = [ "libc", ] @@ -1767,6 +2134,12 @@ dependencies = [ "proc-macro-hack", ] +[[package]] +name = "hex-literal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" + [[package]] name = "hex-literal-impl" version = "0.2.2" @@ -1776,14 +2149,20 @@ dependencies = [ "proc-macro-hack", ] +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + [[package]] name = "hmac" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" dependencies = [ - "crypto-mac", - "digest", + "crypto-mac 0.7.0", + "digest 0.8.1", ] [[package]] @@ -1792,8 +2171,8 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ - "digest", - "generic-array", + "digest 0.8.1", + "generic-array 0.12.3", "hmac", ] @@ -1814,7 +2193,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "fnv", "itoa", ] @@ -1837,7 +2216,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "http 0.2.1", ] @@ -1871,7 +2250,7 @@ dependencies = [ "httparse", "iovec", "itoa", - "log 0.4.8", + "log 0.4.11", "net2", "rustc_version", "time", @@ -1892,7 +2271,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "futures-channel", "futures-core", "futures-util", @@ -1901,7 +2280,7 @@ dependencies = [ "http-body 0.3.1", "httparse", "itoa", - "log 0.4.8", + "log 0.4.11", "pin-project", "socket2", "time", @@ -1912,15 +2291,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac965ea399ec3a25ac7d13b8affd4b8f39325cca00858ddf5eb29b79e6b14b08" +checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "ct-logs", "futures-util", "hyper 0.13.6", - "log 0.4.8", + "log 0.4.11", "rustls", "rustls-native-certs", "tokio 0.2.21", @@ -1959,15 +2338,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-serde" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" -dependencies = [ - "serde", -] - [[package]] name = "impl-serde" version = "0.3.1" @@ -1985,7 +2355,7 @@ checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -1995,8 +2365,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" dependencies = [ "autocfg 1.0.0", + "serde", ] +[[package]] +name = "instant" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b141fdc7836c525d4d594027d318c84161ca17aaf8113ab1f81ab93ae897485" + [[package]] name = "integer-sqrt" version = "0.1.3" @@ -2090,9 +2467,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.40" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177" +checksum = "c4b9172132a62451e56142bff9afc91c8e4a4500aa5b847da36815b63bfda916" dependencies = [ "wasm-bindgen", ] @@ -2107,7 +2484,7 @@ dependencies = [ "futures 0.1.29", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.8", + "log 0.4.11", "serde", "serde_json", "url 1.7.2", @@ -2120,7 +2497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0747307121ffb9703afd93afbd0fb4f854c38fb873f2c8b90e0e902f27c7b62" dependencies = [ "futures 0.1.29", - "log 0.4.8", + "log 0.4.11", "serde", "serde_derive", "serde_json", @@ -2144,7 +2521,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -2156,7 +2533,7 @@ dependencies = [ "hyper 0.12.35", "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.8", + "log 0.4.11", "net2", "parking_lot 0.10.2", "unicase", @@ -2170,7 +2547,7 @@ checksum = "dedccd693325d833963b549e959137f30a7a0ea650cde92feda81dc0c1393cb5" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.8", + "log 0.4.11", "parity-tokio-ipc", "parking_lot 0.10.2", "tokio-service", @@ -2183,7 +2560,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d44f5602a11d657946aac09357956d2841299ed422035edf140c552cb057986" dependencies = [ "jsonrpc-core", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.10.2", "rand 0.7.3", "serde", @@ -2199,7 +2576,7 @@ dependencies = [ "globset", "jsonrpc-core", "lazy_static", - "log 0.4.8", + "log 0.4.11", "tokio 0.1.22", "tokio-codec", "unicase", @@ -2213,7 +2590,7 @@ checksum = "903d3109fe7c4acb932b567e1e607e0f524ed04741b09fb0e61841bc40a022fc" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.10.2", "slab", "ws", @@ -2237,7 +2614,7 @@ dependencies = [ [[package]] name = "kusama-runtime" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-benchmarking", @@ -2246,7 +2623,7 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", "pallet-authority-discovery", @@ -2288,7 +2665,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -2306,34 +2683,34 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-wasm-builder-runner", "tiny-keccak 1.5.0", ] [[package]] name = "kv-log-macro" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff57d6d215f7ca7eb35a9a64d656ba4d9d2bef114d741dc08048e75e2f5d418" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log 0.4.8", + "log 0.4.11", ] [[package]] name = "kvdb" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e763b2a9b500ba47948061d1e8bc3b5f03a8a1f067dbcf822a4d2c84d2b54a3a" +checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" dependencies = [ "parity-util-mem", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] name = "kvdb-memorydb" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73027d5e228de6f503b5b7335d530404fc26230a6ae3e09b33ec6e45408509a4" +checksum = "73de822b260a3bdfb889dbbb65bb2d473eee2253973d6fa4a5d149a2a4a7c66e" dependencies = [ "kvdb", "parity-util-mem", @@ -2342,33 +2719,33 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84384eca250c7ff67877eda5336f28a86586aaee24acb945643590671f6bfce1" +checksum = "7c341ef15cfb1f923fa3b5138bfbd2d4813a2c1640b473727a53351c7f0b0fa2" dependencies = [ "fs-swap", "kvdb", - "log 0.4.8", + "log 0.4.11", "num_cpus", "owning_ref", "parity-util-mem", "parking_lot 0.10.2", "regex", "rocksdb", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] name = "kvdb-web" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c7f36acb1841d4c701d30ae1f2cfd242e805991443f75f6935479ed3de64903" +checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ "futures 0.3.5", "js-sys", "kvdb", "kvdb-memorydb", - "log 0.4.8", + "log 0.4.11", "parity-util-mem", "send_wrapper 0.3.0", "wasm-bindgen", @@ -2395,27 +2772,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.71" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49" - -[[package]] -name = "libflate" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784f4ec5908a9d7f4e53658906386667e8b02e9389a47cfebf45d324ba9e8d25" -dependencies = [ - "adler32", - "crc32fast", - "libflate_lz77", - "rle-decode-fast", -] - -[[package]] -name = "libflate_lz77" -version = "1.0.0" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3286f09f7d4926fc486334f28d8d2e6ebe4f7f9994494b6dab27ddfad2c9b11b" +checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" [[package]] name = "libloading" @@ -2424,7 +2783,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2435,40 +2794,49 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.19.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057eba5432d3e740e313c6e13c9153d0cb76b4f71bfc2e5242ae5bdb7d41af67" +checksum = "76c101edbb9c06955fd4085b77d2abc31cf3650134d77068b35c44967756ada8" dependencies = [ - "bytes 0.5.4", + "atomic", + "bytes 0.5.5", "futures 0.3.5", "lazy_static", "libp2p-core", "libp2p-core-derive", + "libp2p-deflate", "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-mdns", "libp2p-mplex", "libp2p-noise", "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-request-response", + "libp2p-secio", "libp2p-swarm", "libp2p-tcp", + "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multihash", - "parity-multiaddr 0.9.0", + "parity-multiaddr", "parking_lot 0.10.2", "pin-project", - "smallvec 1.4.0", + "smallvec 1.4.1", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.19.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5e30dcd8cb13a02ad534e214da234eca1595a76b5788b645dfa5c734d2124b" +checksum = "17cea54ea4a846a7c47e4347db0fc7a4129dcb0fb57f07f57e473820edbfcbde" dependencies = [ "asn1_der", "bs58", @@ -2479,10 +2847,10 @@ dependencies = [ "futures-timer 3.0.2", "lazy_static", "libsecp256k1", - "log 0.4.8", + "log 0.4.11", "multihash", "multistream-select", - "parity-multiaddr 0.9.0", + "parity-multiaddr", "parking_lot 0.10.2", "pin-project", "prost", @@ -2490,83 +2858,137 @@ dependencies = [ "rand 0.7.3", "ring", "rw-stream-sink", - "sha2", - "smallvec 1.4.0", + "sha2 0.8.2", + "smallvec 1.4.1", "thiserror", - "unsigned-varint", + "unsigned-varint 0.4.0", "void", "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.19.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09548626b737ed64080fde595e06ce1117795b8b9fc4d2629fa36561c583171" +checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" dependencies = [ "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", +] + +[[package]] +name = "libp2p-deflate" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6174d6addc9cc5fd84af7099480774035dd1a7cdf48dd31b23dea45cf57638" +dependencies = [ + "flate2", + "futures 0.3.5", + "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.19.0" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fce8769cfe677a567d2677dc02a9e5be27a24acf1ff78a59cef425caae009a6a" +dependencies = [ + "futures 0.3.5", + "libp2p-core", + "log 0.4.11", +] + +[[package]] +name = "libp2p-floodsub" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f2342965ac7ea4b85f4df5288089796421f9297ba4020dc9692f4ef728590dc" +dependencies = [ + "cuckoofilter", + "fnv", + "futures 0.3.5", + "libp2p-core", + "libp2p-swarm", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.4.1", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc186d9a941fd0207cf8f08ef225a735e2d7296258f570155e525f6ee732f87" +checksum = "0828b4f0c76c2edc68da574e391ce981bac5316d65785cddfe8c273d4c9bd4bb" dependencies = [ + "base64 0.11.0", + "byteorder 1.3.4", + "bytes 0.5.5", + "fnv", "futures 0.3.5", + "futures_codec", + "hex_fmt", "libp2p-core", - "log 0.4.8", + "libp2p-swarm", + "log 0.4.11", + "lru_time_cache", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.8.2", + "smallvec 1.4.1", + "unsigned-varint 0.4.0", + "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.19.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6438ed8ca240c7635c9caa3be6c5258bc0058553ae97ba81737f04e5d33804f5" +checksum = "41efcb5b521b65d2c45432a244ce6427cdd3649228cd192f397d1fa67682aef2" dependencies = [ "futures 0.3.5", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log 0.4.11", "prost", "prost-build", - "smallvec 1.4.0", + "smallvec 1.4.1", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.19.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d6c1d5100973527ae70d82687465b17049c1b717a7964de38b8e65000878ff" +checksum = "ca9b4ccc868863317af3f65eb241811ceadd971d133183040140f5496037e0ae" dependencies = [ "arrayvec 0.5.1", - "bytes 0.5.4", + "bytes 0.5.5", "either", "fnv", "futures 0.3.5", "futures_codec", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log 0.4.11", "multihash", "prost", "prost-build", "rand 0.7.3", - "sha2", - "smallvec 1.4.0", + "sha2 0.8.2", + "smallvec 1.4.1", "uint", - "unsigned-varint", + "unsigned-varint 0.4.0", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.19.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b00163d13f705aae67c427bea0575f8aaf63da6524f9bd4a5a093b8bda0b38" +checksum = "d4fe5614c2c5af74ef5870aad0fce73c9e4707716c4ee7cdf06cf9a0376d3815" dependencies = [ "async-std", "data-encoding", @@ -2576,45 +2998,46 @@ dependencies = [ "lazy_static", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log 0.4.11", "net2", "rand 0.7.3", - "smallvec 1.4.0", + "smallvec 1.4.1", "void", "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.19.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ce63313ad4bce2d76e54c292a1293ea47a0ebbe16708f1513fa62184992f53" +checksum = "df9e79541e71590846f773efce1b6d0538804992ee54ff2f407e05d63a9ddc23" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "fnv", "futures 0.3.5", "futures_codec", "libp2p-core", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.10.2", - "unsigned-varint", + "unsigned-varint 0.4.0", ] [[package]] name = "libp2p-noise" -version = "0.19.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84fd504e27b0eadd451e06b67694ef714bd8374044e7db339bb0cdb83755ddf4" +checksum = "0beba6459d06153f5f8e23da3df1d2183798b1f457c7c9468ff99760bcbcc60b" dependencies = [ + "bytes 0.5.5", "curve25519-dalek", "futures 0.3.5", "lazy_static", "libp2p-core", - "log 0.4.8", + "log 0.4.11", "prost", "prost-build", "rand 0.7.3", - "sha2", + "sha2 0.8.2", "snow", "static_assertions", "x25519-dalek", @@ -2623,80 +3046,170 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.19.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb3c4f9273313357d4977799aec69f581cfe9568854919c5b8066018ccf59f5" +checksum = "670261ef938567b614746b078e049b03b55617538a8d415071c518f97532d043" dependencies = [ "futures 0.3.5", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log 0.4.11", "rand 0.7.3", "void", "wasm-timer", ] [[package]] -name = "libp2p-swarm" -version = "0.19.0" +name = "libp2p-plaintext" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a8101a0e0d5f04562137a476bf5f5423cd5bdab2f7e43a75909668e63cb102" +checksum = "b3a61dfd53d1264ddff1206e4827193efaa72bab27782dfcd63c0dec120a1875" dependencies = [ + "bytes 0.5.5", "futures 0.3.5", + "futures_codec", "libp2p-core", - "log 0.4.8", - "rand 0.7.3", - "smallvec 1.4.0", + "log 0.4.11", + "prost", + "prost-build", + "rw-stream-sink", + "unsigned-varint 0.4.0", "void", - "wasm-timer", ] [[package]] -name = "libp2p-tcp" +name = "libp2p-pnet" version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309f95fce9bec755eff5406f8b822fd3969990830c2b54f752e1fc181d5ace3e" +checksum = "37d0db10e139d22d7af0b23ed7949449ec86262798aa0fd01595abdbcb02dc87" dependencies = [ - "async-std", "futures 0.3.5", - "futures-timer 3.0.2", - "get_if_addrs", - "ipnet", - "libp2p-core", - "log 0.4.8", - "socket2", + "log 0.4.11", + "pin-project", + "rand 0.7.3", + "salsa20", + "sha3", ] [[package]] -name = "libp2p-wasm-ext" -version = "0.19.0" +name = "libp2p-request-response" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59fdbb5706f2723ca108c088b1c7a37f735a8c328021f0508007162627e9885" +checksum = "4af0de0e56a11d46c5191a61019733b5618dc955c0a36f82866bb6d5d81a7f8f" dependencies = [ + "async-trait", "futures 0.3.5", - "js-sys", "libp2p-core", - "parity-send-wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", + "libp2p-swarm", + "log 0.4.11", + "lru 0.6.0", + "rand 0.7.3", + "smallvec 1.4.1", + "wasm-timer", ] [[package]] -name = "libp2p-websocket" -version = "0.19.0" +name = "libp2p-secio" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "085fbe4c05c4116c2164ab4d5a521eb6e00516c444f61b3ee9f68c7b1e53580b" +checksum = "a04b320cc0394554e8d0adca21f4efd9f8c2da4930211d92e411a19a4dfd769e" dependencies = [ - "async-tls", - "bytes 0.5.4", - "either", + "aes-ctr", + "ctr", "futures 0.3.5", + "hmac", + "js-sys", + "lazy_static", "libp2p-core", - "log 0.4.8", + "log 0.4.11", + "parity-send-wrapper", + "pin-project", + "prost", + "prost-build", "quicksink", - "rustls", + "rand 0.7.3", + "ring", "rw-stream-sink", - "soketto", + "sha2 0.8.2", + "static_assertions", + "twofish", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "libp2p-swarm" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e4a7e64156e9d1a2daae36b5d791f057b9c53c9364a8e75f7f9848b54f9d68" +dependencies = [ + "futures 0.3.5", + "libp2p-core", + "log 0.4.11", + "rand 0.7.3", + "smallvec 1.4.1", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-tcp" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f65400ccfbbf9a356733bceca6c519c9db0deb5fbcc0b81f89837c4cd53997" +dependencies = [ + "async-std", + "futures 0.3.5", + "futures-timer 3.0.2", + "get_if_addrs", + "ipnet", + "libp2p-core", + "log 0.4.11", + "socket2", +] + +[[package]] +name = "libp2p-uds" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95bc8b0ca1dda4cccb1bb156d47a32e45cfa447ef18f737209f014a63f94a4a2" +dependencies = [ + "async-std", + "futures 0.3.5", + "libp2p-core", + "log 0.4.11", +] + +[[package]] +name = "libp2p-wasm-ext" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2f7b06d80d036ac5763a811185b7fe6951ad71c00544b17cc378a9069bb7c2" +dependencies = [ + "futures 0.3.5", + "js-sys", + "libp2p-core", + "parity-send-wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", +] + +[[package]] +name = "libp2p-websocket" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b350db65cf0a7c83a539a596ea261caae1552c0df2245df0f916ed2fd04572" +dependencies = [ + "async-tls", + "either", + "futures 0.3.5", + "libp2p-core", + "log 0.4.11", + "quicksink", + "rustls", + "rw-stream-sink", + "soketto", "url 2.1.1", "webpki", "webpki-roots 0.18.0", @@ -2704,9 +3217,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.19.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b305d3a8981e68f11c0e17f2d11d5c52fae95e0d7c283f9e462b5b2dab413b2" +checksum = "b3969ead4ce530efb6f304623924245caf410f3b0b0139bd7007f205933788aa" dependencies = [ "futures 0.3.5", "libp2p-core", @@ -2735,10 +3248,10 @@ checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" dependencies = [ "arrayref", "crunchy", - "digest", + "digest 0.8.1", "hmac-drbg", "rand 0.7.3", - "sha2", + "sha2 0.8.2", "subtle 2.2.3", "typenum", ] @@ -2781,13 +3294,31 @@ dependencies = [ "statrs", ] +[[package]] +name = "lock_api" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" +dependencies = [ + "scopeguard 0.3.3", +] + [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard", + "scopeguard 1.1.0", +] + +[[package]] +name = "lock_api" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +dependencies = [ + "scopeguard 1.1.0", ] [[package]] @@ -2796,27 +3327,53 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" dependencies = [ - "log 0.4.8", + "log 0.4.11", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if", ] +[[package]] +name = "loom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ecc775857611e1df29abba5c41355cdf540e7e9d4acfdf0f355eefee82330b7" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls 0.1.2", +] + [[package]] name = "lru" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" dependencies = [ - "hashbrown", + "hashbrown 0.6.3", +] + +[[package]] +name = "lru" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +dependencies = [ + "hashbrown 0.8.0", ] +[[package]] +name = "lru_time_cache" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb241df5c4caeb888755363fc95f8a896618dc0d435e9e775f7930cb099beab" + [[package]] name = "mach" version = "0.3.2" @@ -2826,6 +3383,21 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.8" @@ -2860,27 +3432,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "memoffset" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" dependencies = [ "autocfg 1.0.0", ] [[package]] name = "memory-db" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2999ff7a65d5a1d72172f6d51fa2ea03024b51aee709ba5ff81c3c629a2410" +checksum = "0777fbb396f666701d939e9b3876c18ada6b3581257d88631f2590bc366d8ebe" dependencies = [ - "ahash", "hash-db", - "hashbrown", + "hashbrown 0.8.0", "parity-util-mem", ] @@ -2905,7 +3476,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ - "byteorder", + "byteorder 1.3.4", "keccak", "rand_core 0.5.1", "zeroize", @@ -2913,11 +3484,11 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.3.7" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" dependencies = [ - "adler32", + "adler", ] [[package]] @@ -2932,7 +3503,7 @@ dependencies = [ "iovec", "kernel32-sys", "libc", - "log 0.4.8", + "log 0.4.11", "miow 0.2.1", "net2", "slab", @@ -2946,21 +3517,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", - "log 0.4.8", + "log 0.4.11", "mio", "slab", ] [[package]] name = "mio-named-pipes" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" +checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ - "log 0.4.8", + "log 0.4.11", "mio", "miow 0.3.5", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2993,7 +3564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" dependencies = [ "socket2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3010,11 +3581,11 @@ checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" dependencies = [ "blake2b_simd", "blake2s_simd", - "digest", + "digest 0.8.1", "sha-1", - "sha2", + "sha2 0.8.2", "sha3", - "unsigned-varint", + "unsigned-varint 0.3.3", ] [[package]] @@ -3025,16 +3596,27 @@ checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" [[package]] name = "multistream-select" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991c33683908c588b8f2cf66c221d8f390818c1bdcd13fce55208408e027a796" +checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "futures 0.3.5", - "log 0.4.8", + "log 0.4.11", "pin-project", - "smallvec 1.4.0", - "unsigned-varint", + "smallvec 1.4.1", + "unsigned-varint 0.4.0", +] + +[[package]] +name = "multitask" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c09c35271e7dcdb5f709779111f2c8e8ab8e06c1b587c1c6a9e179d865aaa5b4" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", ] [[package]] @@ -3045,7 +3627,7 @@ checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" dependencies = [ "alga", "approx", - "generic-array", + "generic-array 0.12.3", "matrixmultiply", "num-complex", "num-rational", @@ -3071,21 +3653,7 @@ checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ "cfg-if", "libc", - "winapi 0.3.8", -] - -[[package]] -name = "netstat2" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29449d242064c48d3057a194b049a2bdcccadda16faa18a91468677b44e8d422" -dependencies = [ - "bitflags", - "byteorder", - "enum-primitive-derive", - "libc", - "num-traits 0.2.12", - "thiserror", + "winapi 0.3.9", ] [[package]] @@ -3137,15 +3705,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "ntapi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a31937dea023539c72ddae0e3571deadc1414b300483fa7aaec176168cfa9d2" -dependencies = [ - "winapi 0.3.8", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -3220,18 +3779,29 @@ dependencies = [ [[package]] name = "object" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5666bbb90bc4d1e5bdcb26c0afda1822d25928341e9384ab187a9b37ab69e36" -dependencies = [ - "target-lexicon", -] +checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2" [[package]] name = "object" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +dependencies = [ + "crc32fast", + "indexmap", + "wasmparser 0.57.0", +] + +[[package]] +name = "once_cell" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" +dependencies = [ + "parking_lot 0.7.1", +] [[package]] name = "once_cell" @@ -3248,6 +3818,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "openssl-probe" version = "0.1.2" @@ -3265,8 +3841,8 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3281,8 +3857,8 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3296,11 +3872,13 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-authorship", "pallet-session", "pallet-timestamp", "parity-scale-codec", @@ -3311,6 +3889,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", + "sp-session", "sp-staking", "sp-std", "sp-timestamp", @@ -3318,8 +3897,8 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3332,8 +3911,8 @@ dependencies = [ [[package]] name = "pallet-collective" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3348,8 +3927,8 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3363,8 +3942,8 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3378,8 +3957,8 @@ dependencies = [ [[package]] name = "pallet-finality-tracker" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3394,11 +3973,13 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", + "pallet-authorship", "pallet-finality-tracker", "pallet-session", "parity-scale-codec", @@ -3414,8 +3995,8 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "enumflags2", "frame-benchmarking", @@ -3430,8 +4011,8 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3450,8 +4031,8 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3466,8 +4047,8 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3480,8 +4061,8 @@ dependencies = [ [[package]] name = "pallet-multisig" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3495,8 +4076,8 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3509,8 +4090,8 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3524,8 +4105,8 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3545,8 +4126,8 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3560,8 +4141,8 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3573,8 +4154,8 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "enumflags2", "frame-support", @@ -3588,8 +4169,8 @@ dependencies = [ [[package]] name = "pallet-scheduler" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3603,8 +4184,8 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3623,22 +4204,24 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "pallet-session", "pallet-staking", + "rand 0.7.3", "sp-runtime", + "sp-session", "sp-std", ] [[package]] name = "pallet-society" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3651,8 +4234,8 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3673,19 +4256,19 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "pallet-sudo" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", @@ -3698,8 +4281,8 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3716,23 +4299,25 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "frame-system", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", - "smallvec 1.4.0", + "smallvec 1.4.1", + "sp-core", + "sp-io", "sp-runtime", "sp-std", ] [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -3749,8 +4334,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-support", "parity-scale-codec", @@ -3762,8 +4347,8 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3777,8 +4362,8 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-benchmarking", "frame-support", @@ -3793,8 +4378,8 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "enumflags2", "frame-benchmarking", @@ -3815,67 +4400,34 @@ dependencies = [ "blake2-rfc", "crc32fast", "libc", - "log 0.4.8", + "log 0.4.11", "memmap", "parking_lot 0.10.2", ] [[package]] name = "parity-multiaddr" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f77055f9e81921a8cc7bebeb6cded3d128931d51f1e3dd6251f0770a6d431477" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "parity-multihash", - "percent-encoding 2.1.0", - "serde", - "static_assertions", - "unsigned-varint", - "url 2.1.1", -] - -[[package]] -name = "parity-multiaddr" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ca96399f4a01aa89c59220c4f52ac371940eb4e53e3ce990da796f364bdf69" +checksum = "cc20af3143a62c16e7c9e92ea5c6ae49f7d271d97d4d8fe73afc28f0514a3d0f" dependencies = [ "arrayref", "bs58", - "byteorder", + "byteorder 1.3.4", "data-encoding", "multihash", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.4.0", "url 2.1.1", ] -[[package]] -name = "parity-multihash" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1cd2ba02391b81367bec529fb209019d718684fdc8ad6a712c2b536e46f775" -dependencies = [ - "blake2", - "bytes 0.5.4", - "rand 0.7.3", - "sha-1", - "sha2", - "sha3", - "unsigned-varint", -] - [[package]] name = "parity-scale-codec" -version = "1.3.1" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a74f02beb35d47e0706155c9eac554b50c671e0d868fe8296bcdf44a9a4847bf" +checksum = "34d38aeaffc032ec69faa476b3caaca8d4dd7f3f798137ff30359e5c7869ceb6" dependencies = [ "arrayvec 0.5.1", "bitvec", @@ -3886,14 +4438,14 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" +checksum = "cd20ff7e0399b274a5f5bb37b712fccb5b3a64b9128200d1c3cc40fe709cb073" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -3911,30 +4463,31 @@ dependencies = [ "bytes 0.4.12", "futures 0.1.29", "libc", - "log 0.4.8", + "log 0.4.11", "mio-named-pipes", "miow 0.3.5", "rand 0.7.3", "tokio 0.1.22", "tokio-named-pipes", "tokio-uds", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "parity-util-mem" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6e2583649a3ca84894d1d71da249abcfda54d5aca24733d72ca10d0f02361c" +checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ "cfg-if", + "hashbrown 0.8.0", "impl-trait-for-tuples", "jemallocator", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", - "smallvec 1.4.0", - "winapi 0.3.8", + "smallvec 1.4.1", + "winapi 0.3.9", ] [[package]] @@ -3944,7 +4497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2 1.0.18", - "syn 1.0.31", + "syn 1.0.33", "synstructure", ] @@ -3954,13 +4507,29 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +[[package]] +name = "parking" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50d4a6da31f8144a32532fe38fe8fb439a6842e0ec633f0037f0144c14e7f907" + +[[package]] +name = "parking_lot" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" +dependencies = [ + "lock_api 0.1.5", + "parking_lot_core 0.4.0", +] + [[package]] name = "parking_lot" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ - "lock_api", + "lock_api 0.3.4", "parking_lot_core 0.6.2", "rustc_version", ] @@ -3971,10 +4540,34 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api", + "lock_api 0.3.4", "parking_lot_core 0.7.2", ] +[[package]] +name = "parking_lot" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +dependencies = [ + "instant", + "lock_api 0.4.1", + "parking_lot_core 0.8.0", +] + +[[package]] +name = "parking_lot_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" +dependencies = [ + "libc", + "rand 0.6.5", + "rustc_version", + "smallvec 0.6.13", + "winapi 0.3.9", +] + [[package]] name = "parking_lot_core" version = "0.6.2" @@ -3982,12 +4575,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ "cfg-if", - "cloudabi", + "cloudabi 0.0.3", "libc", "redox_syscall", "rustc_version", "smallvec 0.6.13", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3997,18 +4590,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ "cfg-if", - "cloudabi", + "cloudabi 0.0.3", "libc", "redox_syscall", - "smallvec 1.4.0", - "winapi 0.3.8", + "smallvec 1.4.1", + "winapi 0.3.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +dependencies = [ + "cfg-if", + "cloudabi 0.1.0", + "instant", + "libc", + "redox_syscall", + "smallvec 1.4.1", + "winapi 0.3.9", ] [[package]] name = "paste" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "026c63fe245362be0322bfec5a9656d458d13f9cfb1785d1b38458b9968e8080" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" dependencies = [ "paste-impl", "proc-macro-hack", @@ -4016,9 +4624,9 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9281a268ec213237dcd2aa3c3d0f46681b04ced37c1616fd36567a9e6954b0" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" dependencies = [ "proc-macro-hack", ] @@ -4029,8 +4637,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ - "byteorder", - "crypto-mac", + "byteorder 1.3.4", + "crypto-mac 0.7.0", + "rayon", ] [[package]] @@ -4069,22 +4678,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -4105,12 +4714,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" -[[package]] -name = "plain" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" - [[package]] name = "platforms" version = "0.2.1" @@ -4119,51 +4722,75 @@ checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" [[package]] name = "polkadot" -version = "0.8.12" +version = "0.8.22" dependencies = [ "assert_cmd", "futures 0.3.5", "nix 0.17.0", "parity-util-mem", "polkadot-cli", - "polkadot-collator", "polkadot-service", "tempfile", ] [[package]] -name = "polkadot-availability-store" -version = "0.8.12" +name = "polkadot-availability-bitfield-distribution" +version = "0.1.0" dependencies = [ - "derive_more 0.99.8", - "exit-future", + "assert_matches", + "bitvec", + "env_logger", "futures 0.3.5", - "kvdb", - "kvdb-memorydb", - "kvdb-rocksdb", - "log 0.4.8", + "futures-timer 3.0.2", + "log 0.4.11", + "maplit", "parity-scale-codec", - "parking_lot 0.9.0", + "parking_lot 0.11.0", + "polkadot-network-bridge", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "sc-network", + "smol 0.3.3", + "sp-core", + "streamunordered", +] + +[[package]] +name = "polkadot-availability-distribution" +version = "0.1.0" +dependencies = [ + "assert_matches", + "bitvec", + "derive_more 0.99.9", + "env_logger", + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.11", + "parity-scale-codec", + "parking_lot 0.11.0", "polkadot-erasure-coding", + "polkadot-network-bridge", + "polkadot-node-network-protocol", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", "polkadot-primitives", - "sc-client-api", "sc-keystore", - "sc-network", - "sp-api", - "sp-blockchain", - "sp-consensus", + "smallvec 1.4.1", "sp-core", - "sp-runtime", - "tokio 0.2.21", + "sp-keyring", + "streamunordered", ] [[package]] name = "polkadot-cli" -version = "0.8.12" +version = "0.8.22" dependencies = [ "frame-benchmarking-cli", "futures 0.3.5", - "log 0.4.8", + "log 0.4.11", "polkadot-service", "polkadot-service-new", "sc-cli", @@ -4174,6 +4801,7 @@ dependencies = [ "sp-api", "sp-core", "sp-runtime", + "sp-trie", "structopt", "substrate-browser-utils", "substrate-build-script-utils", @@ -4183,36 +4811,18 @@ dependencies = [ ] [[package]] -name = "polkadot-collator" -version = "0.8.12" +name = "polkadot-core-primitives" +version = "0.7.30" dependencies = [ - "futures 0.3.5", - "futures-timer 2.0.2", - "log 0.4.8", "parity-scale-codec", - "polkadot-cli", - "polkadot-network", - "polkadot-primitives", - "polkadot-service", - "polkadot-service-new", - "polkadot-validation", - "sc-cli", - "sc-client-api", - "sc-executor", - "sc-network", - "sc-service", - "sp-api", - "sp-blockchain", - "sp-consensus", "sp-core", - "sp-keyring", "sp-runtime", - "tokio 0.2.21", + "sp-std", ] [[package]] name = "polkadot-erasure-coding" -version = "0.8.12" +version = "0.8.22" dependencies = [ "derive_more 0.15.0", "parity-scale-codec", @@ -4223,121 +4833,357 @@ dependencies = [ ] [[package]] -name = "polkadot-network" -version = "0.8.12" +name = "polkadot-network-bridge" +version = "0.1.0" dependencies = [ - "arrayvec 0.4.12", - "bytes 0.5.4", - "derive_more 0.14.1", - "exit-future", + "assert_matches", "futures 0.3.5", - "futures-timer 2.0.2", - "log 0.4.8", + "futures-timer 3.0.2", + "log 0.4.11", "parity-scale-codec", - "parking_lot 0.9.0", - "polkadot-availability-store", - "polkadot-erasure-coding", + "parking_lot 0.10.2", + "polkadot-node-network-protocol", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", "polkadot-primitives", - "polkadot-validation", "sc-network", - "sc-network-gossip", + "sp-core", + "sp-keyring", + "sp-runtime", + "streamunordered", +] + +[[package]] +name = "polkadot-node-collation-generation" +version = "0.1.0" +dependencies = [ + "derive_more 0.99.9", + "futures 0.3.5", + "log 0.4.11", + "polkadot-erasure-coding", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sp-core", +] + +[[package]] +name = "polkadot-node-core-av-store" +version = "0.1.0" +dependencies = [ + "assert_matches", + "derive_more 0.99.9", + "futures 0.3.5", + "kvdb", + "kvdb-memorydb", + "kvdb-rocksdb", + "log 0.4.11", + "parity-scale-codec", + "polkadot-erasure-coding", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-overseer", + "polkadot-primitives", + "sp-core", +] + +[[package]] +name = "polkadot-node-core-backing" +version = "0.1.0" +dependencies = [ + "assert_matches", + "bitvec", + "derive_more 0.99.9", + "futures 0.3.5", + "log 0.4.11", + "polkadot-erasure-coding", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "polkadot-statement-table", + "sc-client-api", + "sc-keystore", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-keyring", +] + +[[package]] +name = "polkadot-node-core-bitfield-signing" +version = "0.1.0" +dependencies = [ + "bitvec", + "derive_more 0.99.9", + "futures 0.3.5", + "log 0.4.11", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sc-keystore", + "wasm-timer", +] + +[[package]] +name = "polkadot-node-core-candidate-validation" +version = "0.1.0" +dependencies = [ + "assert_matches", + "derive_more 0.99.9", + "futures 0.3.5", + "log 0.4.11", + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-parachain", + "polkadot-primitives", + "sp-blockchain", + "sp-core", + "sp-keyring", +] + +[[package]] +name = "polkadot-node-core-chain-api" +version = "0.1.0" +dependencies = [ + "futures 0.3.5", + "maplit", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "sp-blockchain", + "sp-core", +] + +[[package]] +name = "polkadot-node-core-proposer" +version = "0.1.0" +dependencies = [ + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.11", + "parity-scale-codec", + "polkadot-node-subsystem", + "polkadot-overseer", + "polkadot-primitives", + "sc-basic-authorship", + "sc-block-builder", + "sc-client-api", + "sc-telemetry", "sp-api", "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-transaction-pool", + "tokio-executor 0.2.0-alpha.6", + "wasm-timer", +] + +[[package]] +name = "polkadot-node-core-provisioner" +version = "0.1.0" +dependencies = [ + "bitvec", + "derive_more 0.99.9", + "futures 0.3.5", + "lazy_static", + "log 0.4.11", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sp-core", + "tokio 0.2.21", +] + +[[package]] +name = "polkadot-node-core-runtime-api" +version = "0.1.0" +dependencies = [ + "futures 0.3.5", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "sp-api", + "sp-blockchain", + "sp-core", +] + +[[package]] +name = "polkadot-node-network-protocol" +version = "0.1.0" +dependencies = [ + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-primitives", + "sc-network", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "polkadot-node-primitives" +version = "0.1.0" +dependencies = [ + "futures 0.3.5", + "parity-scale-codec", + "polkadot-primitives", + "polkadot-statement-table", "sp-core", - "sp-keyring", "sp-runtime", - "sp-state-machine", - "wasm-timer", ] [[package]] -name = "polkadot-network-test" -version = "0.8.12" +name = "polkadot-node-subsystem" +version = "0.1.0" dependencies = [ + "assert_matches", + "async-trait", + "derive_more 0.99.9", "futures 0.3.5", - "log 0.4.8", + "futures-timer 3.0.2", + "log 0.4.11", + "parity-scale-codec", "parking_lot 0.10.2", - "polkadot-test-runtime-client", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-consensus", + "pin-project", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "polkadot-statement-table", "sc-network", - "sc-network-test", - "sc-service", - "sp-blockchain", - "sp-consensus", + "smallvec 1.4.1", "sp-core", - "sp-runtime", + "substrate-prometheus-endpoint", ] [[package]] -name = "polkadot-node-messages" +name = "polkadot-node-subsystem-test-helpers" version = "0.1.0" dependencies = [ + "async-trait", + "derive_more 0.99.9", "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.11", + "parity-scale-codec", + "parking_lot 0.10.2", + "pin-project", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-primitives", "polkadot-statement-table", "sc-network", + "smallvec 1.4.1", + "sp-core", ] [[package]] -name = "polkadot-node-primitives" +name = "polkadot-node-subsystem-util" version = "0.1.0" dependencies = [ + "assert_matches", + "async-trait", + "derive_more 0.99.9", + "env_logger", + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.11", "parity-scale-codec", + "parking_lot 0.10.2", + "pin-project", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", "polkadot-primitives", "polkadot-statement-table", - "sp-runtime", + "sc-keystore", + "sc-network", + "smallvec 1.4.1", + "sp-core", + "streamunordered", ] [[package]] name = "polkadot-overseer" version = "0.1.0" dependencies = [ + "async-trait", "femme", "futures 0.3.5", "futures-timer 3.0.2", "kv-log-macro", - "log 0.4.8", - "polkadot-node-messages", + "log 0.4.11", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-primitives", "sc-client-api", + "sp-core", "streamunordered", ] [[package]] name = "polkadot-parachain" -version = "0.8.12" +version = "0.8.22" dependencies = [ - "derive_more 0.99.8", - "log 0.4.8", + "derive_more 0.99.9", + "futures 0.3.5", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", + "polkadot-core-primitives", "sc-executor", "serde", "shared_memory", "sp-core", "sp-externalities", "sp-io", - "sp-runtime-interface", "sp-std", "sp-wasm-interface", ] +[[package]] +name = "polkadot-pov-distribution" +version = "0.1.0" +dependencies = [ + "assert_matches", + "futures 0.3.5", + "futures-timer 3.0.2", + "log 0.4.11", + "parity-scale-codec", + "parking_lot 0.10.2", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "sp-core", + "sp-runtime", + "streamunordered", +] + [[package]] name = "polkadot-primitives" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-system", "parity-scale-codec", + "polkadot-core-primitives", "polkadot-parachain", "pretty_assertions", "serde", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-core", "sp-inherents", "sp-runtime", @@ -4350,9 +5196,10 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.8.12" +version = "0.8.22" dependencies = [ "jsonrpc-core", + "jsonrpc-pubsub", "pallet-transaction-payment-rpc", "parity-scale-codec", "polkadot-primitives", @@ -4376,7 +5223,7 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-benchmarking", @@ -4385,7 +5232,7 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", "pallet-authority-discovery", @@ -4412,7 +5259,6 @@ dependencies = [ "pallet-session-benchmarking", "pallet-staking", "pallet-staking-reward-curve", - "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -4426,13 +5272,14 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-api", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", "sp-core", "sp-inherents", + "sp-io", "sp-keyring", "sp-offchain", "sp-runtime", @@ -4443,20 +5290,20 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-wasm-builder-runner", "tiny-keccak 1.5.0", - "trie-db 0.20.1", + "trie-db", ] [[package]] name = "polkadot-runtime-common" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", "pallet-authorship", @@ -4473,6 +5320,7 @@ dependencies = [ "pallet-vesting", "parity-scale-codec", "polkadot-primitives", + "polkadot-runtime-parachains", "rustc-hex", "serde", "serde_derive", @@ -4489,7 +5337,7 @@ dependencies = [ "sp-std", "sp-trie", "static_assertions", - "trie-db 0.21.0", + "trie-db", ] [[package]] @@ -4500,7 +5348,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", "pallet-authorship", @@ -4533,28 +5381,27 @@ dependencies = [ "sp-staking", "sp-std", "sp-trie", + "sp-version", ] [[package]] name = "polkadot-service" -version = "0.8.12" +version = "0.8.22" dependencies = [ "env_logger", "frame-benchmarking", "frame-system-rpc-runtime-api", "futures 0.3.5", - "hex-literal", + "hex-literal 0.2.1", "kusama-runtime", "lazy_static", - "log 0.4.8", + "log 0.4.11", "pallet-babe", "pallet-im-online", "pallet-staking", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "parking_lot 0.9.0", - "polkadot-availability-store", - "polkadot-network", "polkadot-primitives", "polkadot-rpc", "polkadot-runtime", @@ -4590,6 +5437,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-transaction-pool", + "sp-trie", "substrate-prometheus-endpoint", "westend-runtime", ] @@ -4602,24 +5450,24 @@ dependencies = [ "frame-benchmarking", "frame-system-rpc-runtime-api", "futures 0.3.5", - "hex-literal", + "hex-literal 0.2.1", "kusama-runtime", "lazy_static", - "log 0.4.8", + "log 0.4.11", "pallet-babe", "pallet-im-online", "pallet-staking", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "parking_lot 0.9.0", - "polkadot-network", + "polkadot-node-core-proposer", + "polkadot-node-subsystem", "polkadot-overseer", "polkadot-primitives", "polkadot-rpc", "polkadot-runtime", "polkadot-test-runtime-client", "sc-authority-discovery", - "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -4649,13 +5497,38 @@ dependencies = [ "sp-runtime", "sp-session", "sp-transaction-pool", + "sp-trie", "substrate-prometheus-endpoint", "westend-runtime", ] +[[package]] +name = "polkadot-statement-distribution" +version = "0.1.0" +dependencies = [ + "arrayvec 0.5.1", + "assert_matches", + "futures 0.3.5", + "futures-timer 3.0.2", + "indexmap", + "log 0.4.11", + "parity-scale-codec", + "parking_lot 0.10.2", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-primitives", + "sp-core", + "sp-keyring", + "sp-runtime", + "sp-staking", + "streamunordered", +] + [[package]] name = "polkadot-statement-table" -version = "0.8.12" +version = "0.8.22" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -4664,16 +5537,17 @@ dependencies = [ [[package]] name = "polkadot-test-runtime" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-executive", "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", + "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-balances", @@ -4686,6 +5560,7 @@ dependencies = [ "pallet-session", "pallet-staking", "pallet-staking-reward-curve", + "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -4698,14 +5573,16 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-api", + "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", "sp-core", "sp-inherents", "sp-io", "sp-keyring", + "sp-offchain", "sp-runtime", "sp-session", "sp-staking", @@ -4713,7 +5590,7 @@ dependencies = [ "sp-transaction-pool", "sp-trie", "sp-version", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-wasm-builder-runner", "tiny-keccak 1.5.0", ] @@ -4727,6 +5604,7 @@ dependencies = [ "polkadot-primitives", "polkadot-runtime-common", "polkadot-test-runtime", + "polkadot-test-service", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -4739,29 +5617,67 @@ dependencies = [ "substrate-test-client", ] +[[package]] +name = "polkadot-test-service" +version = "0.8.2" +dependencies = [ + "frame-benchmarking", + "frame-system", + "futures 0.1.29", + "futures 0.3.5", + "hex", + "log 0.4.11", + "pallet-balances", + "pallet-staking", + "pallet-transaction-payment", + "polkadot-primitives", + "polkadot-rpc", + "polkadot-runtime-common", + "polkadot-service", + "polkadot-test-runtime", + "rand 0.7.3", + "sc-authority-discovery", + "sc-chain-spec", + "sc-client-api", + "sc-consensus", + "sc-consensus-babe", + "sc-executor", + "sc-finality-grandpa", + "sc-informant", + "sc-network", + "sc-service", + "sc-transaction-pool", + "serde_json", + "sp-arithmetic", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-state-machine", + "substrate-test-client", + "substrate-test-utils", + "tempfile", + "tokio 0.2.21", +] + [[package]] name = "polkadot-validation" -version = "0.8.12" +version = "0.8.22" dependencies = [ - "bitvec", "derive_more 0.14.1", - "exit-future", "futures 0.3.5", - "futures-timer 2.0.2", - "log 0.4.8", - "pallet-babe", + "log 0.4.11", "parity-scale-codec", - "parking_lot 0.9.0", - "polkadot-availability-store", - "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", - "polkadot-statement-table", "sc-basic-authorship", "sc-block-builder", "sc-client-api", "sc-finality-grandpa", - "sc-keystore", "sp-api", "sp-blockchain", "sp-consensus", @@ -4774,7 +5690,25 @@ dependencies = [ "sp-transaction-pool", "sp-trie", "substrate-prometheus-endpoint", - "tokio 0.2.21", +] + +[[package]] +name = "poly1305" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b42192ab143ed7619bf888a7f9c6733a9a2153b218e2cd557cfdb52fbf9bb1" +dependencies = [ + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" +dependencies = [ + "cfg-if", + "universal-hash", ] [[package]] @@ -4827,7 +5761,7 @@ checksum = "c55c21c64d0eaa4d7ed885d959ef2d62d9e488c27c0e02d9aa5ce6c877b7d5f8" dependencies = [ "fixed-hash", "impl-codec", - "impl-serde 0.3.1", + "impl-serde", "uint", ] @@ -4842,26 +5776,26 @@ dependencies = [ [[package]] name = "proc-macro-error" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678" +checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "version_check", ] [[package]] name = "proc-macro-error-attr" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53" +checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "syn-mid", "version_check", ] @@ -4893,34 +5827,18 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" dependencies = [ - "unicode-xid 0.2.0", -] - -[[package]] -name = "procfs" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c434e93ef69c216e68e4f417c927b4f31502c3560b72cfdb6827e2321c5c6b3e" -dependencies = [ - "bitflags", - "byteorder", - "chrono", - "hex", - "lazy_static", - "libc", - "libflate", + "unicode-xid 0.2.1", ] [[package]] name = "prometheus" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0575e258dab62268e7236d7307caa38848acbda7ec7ab87bd9093791e999d20" +checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd" dependencies = [ "cfg-if", "fnv", "lazy_static", - "protobuf", "spin", "thiserror", ] @@ -4931,7 +5849,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "prost-derive", ] @@ -4941,10 +5859,10 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "heck", "itertools 0.8.2", - "log 0.4.8", + "log 0.4.11", "multimap", "petgraph", "prost", @@ -4963,7 +5881,7 @@ dependencies = [ "itertools 0.8.2", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -4972,15 +5890,20 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "prost", ] [[package]] -name = "protobuf" -version = "2.14.0" +name = "pwasm-utils" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485" +checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" +dependencies = [ + "byteorder 1.3.4", + "log 0.4.11", + "parity-wasm", +] [[package]] name = "quick-error" @@ -4999,12 +5922,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "quote" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" - [[package]] name = "quote" version = "0.6.13" @@ -5049,7 +5966,7 @@ dependencies = [ "libc", "rand_core 0.3.1", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5058,11 +5975,11 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" dependencies = [ - "cloudabi", + "cloudabi 0.0.3", "fuchsia-cprng", "libc", "rand_core 0.3.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5081,7 +5998,7 @@ dependencies = [ "rand_os", "rand_pcg 0.1.2", "rand_xorshift", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5177,7 +6094,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5186,13 +6103,13 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi", + "cloudabi 0.0.3", "fuchsia-cprng", "libc", "rand_core 0.4.2", "rdrand", "wasm-bindgen", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5297,7 +6214,7 @@ version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" dependencies = [ - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -5317,18 +6234,18 @@ checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "regalloc" -version = "0.0.21" +version = "0.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27b256b41986ac5141b37b8bbba85d314fbf546c182eb255af6720e07e4f804" +checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" dependencies = [ - "log 0.4.8", + "log 0.4.11", "rustc-hash", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -5343,6 +6260,16 @@ dependencies = [ "thread_local", ] +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder 1.3.4", + "regex-syntax", +] + [[package]] name = "regex-syntax" version = "0.6.18" @@ -5358,7 +6285,7 @@ dependencies = [ "bitflags", "libc", "mach", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5367,7 +6294,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5388,9 +6315,15 @@ checksum = "475e68978dc5b743f2f40d8e0a8fdc83f1c5e78cbf4b8fa5e74e73beebc340de" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] +[[package]] +name = "retain_mut" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e005d658ad26eacc2b6c506dfde519f4e277e328d0eb3379ca61647d70a8f531" + [[package]] name = "ring" version = "0.16.15" @@ -5399,19 +6332,13 @@ checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", "libc", - "once_cell", + "once_cell 1.4.0", "spin", "untrusted", "web-sys", - "winapi 0.3.8", + "winapi 0.3.9", ] -[[package]] -name = "rle-decode-fast" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" - [[package]] name = "rocksdb" version = "0.14.0" @@ -5422,6 +6349,53 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rococo-v1-runtime" +version = "0.8.22" +dependencies = [ + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-balances", + "pallet-grandpa", + "pallet-im-online", + "pallet-indices", + "pallet-offences", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "polkadot-parachain", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "serde", + "serde_derive", + "smallvec 1.4.1", + "sp-api", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder-runner", +] + [[package]] name = "rpassword" version = "4.0.5" @@ -5429,7 +6403,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5479,12 +6453,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" +checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" dependencies = [ - "base64 0.11.0", - "log 0.4.8", + "base64 0.12.3", + "log 0.4.11", "ring", "sct", "webpki", @@ -5492,9 +6466,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75ffeb84a6bd9d014713119542ce415db3a3e4748f0bfce1e1416cd224a23a5" +checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" dependencies = [ "openssl-probe", "rustls", @@ -5523,22 +6497,43 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" name = "safe-mix" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" +checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "salsa20" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2324b0e8c3bb9a586a571fdb3136f70e7e2c748de00a78043f86e0cff91f91fe" +dependencies = [ + "byteorder 1.3.4", + "salsa20-core", + "stream-cipher 0.3.2", +] + +[[package]] +name = "salsa20-core" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fe6cc1b9f5a5867853ade63099de70f042f7679e408d1ffe52821c9248e6e69" dependencies = [ - "rustc_version", + "stream-cipher 0.3.2", ] [[package]] name = "sc-authority-discovery" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "bytes 0.5.4", - "derive_more 0.99.8", + "bytes 0.5.5", + "derive_more 0.99.9", + "either", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "prost", "prost-build", @@ -5557,12 +6552,12 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -5581,8 +6576,8 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -5591,16 +6586,18 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-inherents", "sp-runtime", "sp-state-machine", ] [[package]] name = "sc-chain-spec" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "impl-trait-for-tuples", + "parity-scale-codec", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -5613,40 +6610,47 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "sc-cli" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "ansi_term 0.12.1", "atty", + "bip39", "chrono", - "derive_more 0.99.8", + "derive_more 0.99.9", "env_logger", "fdlimit", "futures 0.3.5", + "hex", "lazy_static", - "log 0.4.8", + "libp2p", + "log 0.4.11", "names", "nix 0.17.0", + "parity-scale-codec", "parity-util-mem", + "rand 0.7.3", "regex", "rpassword", "sc-client-api", "sc-informant", + "sc-keystore", "sc-network", "sc-service", "sc-telemetry", "sc-tracing", + "serde", "serde_json", "sp-blockchain", "sp-core", @@ -5664,17 +6668,17 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "fnv", "futures 0.3.5", "hash-db", - "hex-literal", + "hex-literal 0.3.1", "kvdb", "lazy_static", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "sc-executor", @@ -5700,8 +6704,8 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "blake2-rfc", "hash-db", @@ -5709,7 +6713,7 @@ dependencies = [ "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", - "log 0.4.8", + "log 0.4.11", "parity-db", "parity-scale-codec", "parity-util-mem", @@ -5717,6 +6721,7 @@ dependencies = [ "sc-client-api", "sc-executor", "sc-state-db", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-core", @@ -5729,8 +6734,8 @@ dependencies = [ [[package]] name = "sc-consensus" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "sc-client-api", "sp-blockchain", @@ -5740,14 +6745,14 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "fork-tree", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.8", + "log 0.4.11", "merlin", "num-bigint", "num-rational", @@ -5756,6 +6761,7 @@ dependencies = [ "parking_lot 0.10.2", "pdqselect", "rand 0.7.3", + "retain_mut", "sc-client-api", "sc-consensus-epochs", "sc-consensus-slots", @@ -5776,16 +6782,17 @@ dependencies = [ "sp-io", "sp-runtime", "sp-timestamp", + "sp-utils", "sp-version", "substrate-prometheus-endpoint", ] [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", @@ -5806,8 +6813,8 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "fork-tree", "parity-scale-codec", @@ -5819,12 +6826,12 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "sc-client-api", @@ -5833,6 +6840,7 @@ dependencies = [ "sp-application-crypto", "sp-blockchain", "sp-consensus", + "sp-consensus-slots", "sp-core", "sp-inherents", "sp-runtime", @@ -5841,10 +6849,10 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "log 0.4.8", + "log 0.4.11", "sc-client-api", "sp-authorship", "sp-consensus", @@ -5855,13 +6863,13 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "lazy_static", "libsecp256k1", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parity-wasm", "parking_lot 0.10.2", @@ -5883,11 +6891,11 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", - "log 0.4.8", + "derive_more 0.99.9", + "log 0.4.11", "parity-scale-codec", "parity-wasm", "sp-allocator", @@ -5900,10 +6908,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "sc-executor-common", "sp-allocator", @@ -5915,37 +6923,33 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "cranelift-codegen", - "cranelift-wasm", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parity-wasm", + "pwasm-utils", "sc-executor-common", - "scoped-tls", + "scoped-tls 1.0.0", "sp-allocator", "sp-core", "sp-runtime-interface", "sp-wasm-interface", - "substrate-wasmtime", - "substrate-wasmtime-runtime", - "wasmtime-environ", + "wasmtime", ] [[package]] name = "sc-finality-grandpa" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "assert_matches", - "derive_more 0.99.8", + "derive_more 0.99.9", "finality-grandpa", "fork-tree", "futures 0.3.5", "futures-timer 3.0.2", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "pin-project", @@ -5974,31 +6978,35 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "finality-grandpa", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log 0.4.8", + "jsonrpc-pubsub", + "log 0.4.11", + "parity-scale-codec", "sc-finality-grandpa", + "sc-rpc", "serde", "serde_json", + "sp-core", + "sp-runtime", ] [[package]] name = "sc-informant" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "ansi_term 0.12.1", "futures 0.3.5", - "log 0.4.8", + "log 0.4.11", "parity-util-mem", - "parking_lot 0.10.2", "sc-client-api", "sc-network", "sp-blockchain", @@ -6010,10 +7018,10 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "hex", "merlin", "parking_lot 0.10.2", @@ -6026,8 +7034,8 @@ dependencies = [ [[package]] name = "sc-light" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "hash-db", "lazy_static", @@ -6045,13 +7053,14 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ + "async-std", "bitflags", "bs58", - "bytes 0.5.4", - "derive_more 0.99.8", + "bytes 0.5.5", + "derive_more 0.99.9", "either", "erased-serde", "fnv", @@ -6064,8 +7073,8 @@ dependencies = [ "libp2p", "linked-hash-map", "linked_hash_set", - "log 0.4.8", - "lru", + "log 0.4.11", + "lru 0.4.3", "nohash-hasher", "parity-scale-codec", "parking_lot 0.10.2", @@ -6089,7 +7098,7 @@ dependencies = [ "sp-utils", "substrate-prometheus-endpoint", "thiserror", - "unsigned-varint", + "unsigned-varint 0.4.0", "void", "wasm-timer", "zeroize", @@ -6097,58 +7106,31 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", - "lru", + "log 0.4.11", + "lru 0.4.3", "sc-network", "sp-runtime", "wasm-timer", ] -[[package]] -name = "sc-network-test" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" -dependencies = [ - "env_logger", - "futures 0.3.5", - "futures-timer 3.0.2", - "libp2p", - "log 0.4.8", - "parking_lot 0.10.2", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-network", - "sc-service", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-core", - "sp-runtime", - "substrate-test-runtime", - "substrate-test-runtime-client", - "tempfile", -] - [[package]] name = "sc-offchain" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "fnv", "futures 0.3.5", "futures-timer 3.0.2", "hyper 0.13.6", "hyper-rustls", - "log 0.4.8", + "log 0.4.11", "num_cpus", "parity-scale-codec", "parking_lot 0.10.2", @@ -6166,12 +7148,12 @@ dependencies = [ [[package]] name = "sc-peerset" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "libp2p", - "log 0.4.8", + "log 0.4.11", "serde_json", "sp-utils", "wasm-timer", @@ -6179,23 +7161,23 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "log 0.4.8", + "log 0.4.11", "substrate-prometheus-endpoint", ] [[package]] name = "sc-rpc" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "sc-block-builder", @@ -6220,16 +7202,16 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "serde", @@ -6244,15 +7226,15 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-ipc-server", "jsonrpc-pubsub", "jsonrpc-ws-server", - "log 0.4.8", + "log 0.4.11", "serde", "serde_json", "sp-runtime", @@ -6260,26 +7242,24 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "directories", "exit-future", "futures 0.1.29", "futures 0.3.5", "futures-timer 3.0.2", "hash-db", + "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", - "log 0.4.8", - "netstat2", - "parity-multiaddr 0.7.3", + "log 0.4.11", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", "pin-project", - "procfs", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -6306,6 +7286,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-externalities", + "sp-inherents", "sp-io", "sp-runtime", "sp-session", @@ -6315,7 +7296,6 @@ dependencies = [ "sp-utils", "sp-version", "substrate-prometheus-endpoint", - "sysinfo", "tempfile", "tracing", "wasm-timer", @@ -6323,10 +7303,10 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", @@ -6337,14 +7317,13 @@ dependencies = [ [[package]] name = "sc-telemetry" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "bytes 0.5.4", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.10.2", "pin-project", "rand 0.7.3", @@ -6359,11 +7338,11 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "erased-serde", - "log 0.4.8", + "log 0.4.11", "parking_lot 0.10.2", "rustc-hash", "sc-telemetry", @@ -6371,20 +7350,22 @@ dependencies = [ "serde_json", "slog", "sp-tracing", - "tracing-core", + "tracing", + "tracing-subscriber", ] [[package]] name = "sc-transaction-graph" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "linked-hash-map", - "log 0.4.8", + "log 0.4.11", "parity-util-mem", "parking_lot 0.10.2", + "retain_mut", "serde", "sp-blockchain", "sp-core", @@ -6396,14 +7377,14 @@ dependencies = [ [[package]] name = "sc-transaction-pool" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "futures-diagnose", "intervalier", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", @@ -6427,7 +7408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6443,17 +7424,29 @@ dependencies = [ "merlin", "rand 0.7.3", "rand_core 0.5.1", - "sha2", + "sha2 0.8.2", "subtle 2.2.3", "zeroize", ] +[[package]] +name = "scoped-tls" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" + [[package]] name = "scoped-tls" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +[[package]] +name = "scopeguard" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" + [[package]] name = "scopeguard" version = "1.1.0" @@ -6477,7 +7470,7 @@ checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -6490,11 +7483,20 @@ dependencies = [ "untrusted", ] +[[package]] +name = "secrecy" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +dependencies = [ + "zeroize", +] + [[package]] name = "security-framework" -version = "0.4.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" dependencies = [ "bitflags", "core-foundation", @@ -6505,9 +7507,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" dependencies = [ "core-foundation-sys", "libc", @@ -6548,29 +7550,29 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.112" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736aac72d1eafe8e5962d1d1c3d99b0df526015ba40915cb3c49d042e92ec243" +checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.112" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf0343ce212ac0d3d6afd9391ac8e9c9efe06b533c8d33f660f6390cc4093f57" +checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "serde_json" -version = "1.0.55" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226" +checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" dependencies = [ "itoa", "ryu", @@ -6583,28 +7585,35 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.7.3", + "digest 0.8.1", "fake-simd", - "opaque-debug", + "opaque-debug 0.2.3", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.7.3", + "digest 0.8.1", "fake-simd", - "opaque-debug", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] @@ -6613,11 +7622,20 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" dependencies = [ - "block-buffer", + "block-buffer 0.7.3", "byte-tools", - "digest", + "digest 0.8.1", "keccak", - "opaque-debug", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sharded-slab" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +dependencies = [ + "lazy_static", ] [[package]] @@ -6629,14 +7647,14 @@ dependencies = [ "cfg-if", "enum_primitive", "libc", - "log 0.4.8", + "log 0.4.11", "memrange", "nix 0.10.0", "quick-error", "rand 0.4.6", "shared_memory_derive", "theban_interval_tree", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -6666,6 +7684,12 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" + [[package]] name = "slab" version = "0.4.2" @@ -6713,7 +7737,7 @@ checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -6727,24 +7751,61 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.0" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3757cb9d89161a2f24e1cf78efa0c1fcff485d18e3f55e0aa3480824ddaa0f3f" + +[[package]] +name = "smol" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4" +checksum = "620cbb3c6e34da57d3a248cda0cd01cd5848164dc062e764e65d06fe3ea7aed5" +dependencies = [ + "async-task", + "blocking 0.4.7", + "concurrent-queue", + "fastrand", + "futures-io", + "futures-util", + "libc", + "once_cell 1.4.0", + "scoped-tls 1.0.0", + "slab", + "socket2", + "wepoll-sys-stjepang", + "winapi 0.3.9", +] + +[[package]] +name = "smol" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67583f4ccc13bbb105a0752058d8ad66c47753d85445952809bcaca891954f83" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "blocking 0.5.0", + "cfg-if", + "easy-parallel", + "futures-lite", + "num_cpus", +] [[package]] name = "snow" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb767eee7d257ba202f0b9b08673bc13b22281632ef45267b19f13100accd2f" +checksum = "32bf8474159a95551661246cda4976e89356999e3cbfef36f493dacc3fae1e8e" dependencies = [ - "arrayref", - "blake2-rfc", - "chacha20-poly1305-aead", + "aes-gcm", + "blake2", + "chacha20poly1305", "rand 0.7.3", "rand_core 0.5.1", "ring", "rustc_version", - "sha2", + "sha2 0.9.1", "subtle 2.2.3", "x25519-dalek", ] @@ -6758,36 +7819,32 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "soketto" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c9dab3f95c9ebdf3a88268c19af668f637a3c5039c2c56ff2d40b1b2d64a25b" +checksum = "85457366ae0c6ce56bf05a958aef14cd38513c236568618edbcd9a8c52cb80b0" dependencies = [ - "base64 0.11.0", - "bytes 0.5.4", + "base64 0.12.3", + "bytes 0.5.5", "flate2", "futures 0.3.5", - "http 0.2.1", "httparse", - "log 0.4.8", + "log 0.4.11", "rand 0.7.3", - "sha1", - "smallvec 1.4.0", - "static_assertions", - "thiserror", + "sha-1", ] [[package]] name = "sp-allocator" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", - "log 0.4.8", + "derive_more 0.99.9", + "log 0.4.11", "sp-core", "sp-std", "sp-wasm-interface", @@ -6795,8 +7852,8 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "hash-db", "parity-scale-codec", @@ -6810,20 +7867,20 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "blake2-rfc", "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "sp-application-crypto" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "serde", @@ -6834,8 +7891,8 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "integer-sqrt", "num-traits 0.2.12", @@ -6847,8 +7904,8 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-api", @@ -6859,8 +7916,8 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -6870,8 +7927,8 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-api", @@ -6882,24 +7939,25 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", - "log 0.4.8", - "lru", + "derive_more 0.99.9", + "log 0.4.11", + "lru 0.4.3", "parity-scale-codec", "parking_lot 0.10.2", "sp-block-builder", "sp-consensus", + "sp-database", "sp-runtime", "sp-state-machine", ] [[package]] name = "sp-chain-spec" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "serde", "serde_json", @@ -6907,22 +7965,24 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "serde", + "sp-api", "sp-core", "sp-inherents", "sp-runtime", "sp-state-machine", "sp-std", + "sp-trie", "sp-utils", "sp-version", "substrate-prometheus-endpoint", @@ -6930,13 +7990,18 @@ dependencies = [ ] [[package]] -name = "sp-consensus-aura" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +name = "sp-consensus-babe" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ + "merlin", "parity-scale-codec", "sp-api", "sp-application-crypto", + "sp-consensus", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", "sp-inherents", "sp-runtime", "sp-std", @@ -6944,27 +8009,18 @@ dependencies = [ ] [[package]] -name = "sp-consensus-babe" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +name = "sp-consensus-slots" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "merlin", "parity-scale-codec", - "sp-api", - "sp-application-crypto", - "sp-consensus", - "sp-consensus-vrf", - "sp-core", - "sp-inherents", "sp-runtime", - "sp-std", - "sp-timestamp", ] [[package]] name = "sp-consensus-vrf" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -6975,22 +8031,23 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "base58", "blake2-rfc", - "byteorder", - "derive_more 0.99.8", + "byteorder 1.3.4", + "derive_more 0.99.9", + "dyn-clonable", "ed25519-dalek", "futures 0.3.5", "hash-db", "hash256-std-hasher", "hex", - "impl-serde 0.3.1", + "impl-serde", "lazy_static", "libsecp256k1", - "log 0.4.8", + "log 0.4.11", "merlin", "num-traits 0.2.12", "parity-scale-codec", @@ -7000,8 +8057,9 @@ dependencies = [ "rand 0.7.3", "regex", "schnorrkel", + "secrecy", "serde", - "sha2", + "sha2 0.8.2", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -7017,8 +8075,8 @@ dependencies = [ [[package]] name = "sp-database" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "kvdb", "parking_lot 0.10.2", @@ -7026,18 +8084,18 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "sp-externalities" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "environmental", "parity-scale-codec", @@ -7047,11 +8105,11 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "finality-grandpa", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "serde", "sp-api", @@ -7063,8 +8121,8 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7073,10 +8131,10 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "parity-scale-codec", "parking_lot 0.10.2", "sp-core", @@ -7085,13 +8143,13 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "hash-db", "libsecp256k1", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parking_lot 0.10.2", "sp-core", @@ -7106,8 +8164,8 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "lazy_static", "sp-core", @@ -7117,8 +8175,8 @@ dependencies = [ [[package]] name = "sp-npos-elections" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "serde", @@ -7129,19 +8187,19 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "sp-offchain" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "sp-api", "sp-core", @@ -7150,17 +8208,17 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "backtrace", - "log 0.4.8", + "log 0.4.11", ] [[package]] name = "sp-rpc" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "serde", "sp-core", @@ -7168,13 +8226,13 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "parity-util-mem", "paste", @@ -7190,14 +8248,15 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", "sp-std", + "sp-storage", "sp-tracing", "sp-wasm-interface", "static_assertions", @@ -7205,20 +8264,20 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "sp-serializer" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "serde", "serde_json", @@ -7226,8 +8285,8 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-api", @@ -7239,8 +8298,8 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -7249,36 +8308,37 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "hash-db", "itertools 0.9.0", - "log 0.4.8", + "log 0.4.11", "num-traits 0.2.12", "parity-scale-codec", "parking_lot 0.10.2", "rand 0.7.3", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-core", "sp-externalities", "sp-panic-handler", "sp-trie", - "trie-db 0.21.0", + "trie-db", "trie-root", ] [[package]] name = "sp-std" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" [[package]] name = "sp-storage" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", @@ -7287,8 +8347,8 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7301,48 +8361,47 @@ dependencies = [ [[package]] name = "sp-tracing" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "log 0.4.8", + "log 0.4.11", "rental", "tracing", ] [[package]] name = "sp-transaction-pool" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "derive_more 0.99.8", + "derive_more 0.99.9", "futures 0.3.5", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "serde", "sp-api", "sp-blockchain", "sp-runtime", - "sp-utils", ] [[package]] name = "sp-trie" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "hash-db", "memory-db", "parity-scale-codec", "sp-core", "sp-std", - "trie-db 0.21.0", + "trie-db", "trie-root", ] [[package]] name = "sp-utils" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", "futures-core", @@ -7353,10 +8412,10 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "impl-serde 0.2.3", + "impl-serde", "parity-scale-codec", "serde", "sp-runtime", @@ -7365,8 +8424,8 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7401,6 +8460,24 @@ dependencies = [ "rand 0.5.6", ] +[[package]] +name = "stream-cipher" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8131256a5896cabcf5eb04f4d6dacbe1aefda854b0d9896e09cb58829ec5638c" +dependencies = [ + "generic-array 0.12.3", +] + +[[package]] +name = "stream-cipher" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" +dependencies = [ + "generic-array 0.14.2", +] + [[package]] name = "streamunordered" version = "0.5.1" @@ -7422,15 +8499,6 @@ dependencies = [ "bytes 0.4.12", ] -[[package]] -name = "string-interner" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd710eadff449a1531351b0e43eb81ea404336fa2f56c777427ab0e32a4cf183" -dependencies = [ - "serde", -] - [[package]] name = "strsim" version = "0.8.0" @@ -7458,7 +8526,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -7479,7 +8547,7 @@ dependencies = [ "heck", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -7491,16 +8559,15 @@ dependencies = [ "hmac", "pbkdf2", "schnorrkel", - "sha2", + "sha2 0.8.2", ] [[package]] name = "substrate-browser-utils" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "chrono", - "clear_on_drop", "console_error_panic_hook", "console_log", "futures 0.1.29", @@ -7509,7 +8576,7 @@ dependencies = [ "js-sys", "kvdb-web", "libp2p-wasm-ext", - "log 0.4.8", + "log 0.4.11", "rand 0.6.5", "rand 0.7.3", "sc-chain-spec", @@ -7523,23 +8590,23 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "platforms", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.5", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log 0.4.8", + "log 0.4.11", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -7554,25 +8621,27 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "async-std", - "derive_more 0.99.8", + "derive_more 0.99.9", "futures-util", "hyper 0.13.6", - "log 0.4.8", + "log 0.4.11", "prometheus", "tokio 0.2.21", ] [[package]] name = "substrate-test-client" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ + "futures 0.1.29", "futures 0.3.5", "hash-db", + "hex", "parity-scale-codec", "sc-client-api", "sc-client-db", @@ -7580,6 +8649,8 @@ dependencies = [ "sc-executor", "sc-light", "sc-service", + "serde", + "serde_json", "sp-blockchain", "sp-consensus", "sp-core", @@ -7589,165 +8660,30 @@ dependencies = [ ] [[package]] -name = "substrate-test-runtime" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" -dependencies = [ - "cfg-if", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "log 0.4.8", - "memory-db", - "pallet-babe", - "pallet-timestamp", - "parity-scale-codec", - "parity-util-mem", - "sc-service", - "serde", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-offchain", - "sp-runtime", - "sp-runtime-interface", - "sp-session", - "sp-std", - "sp-transaction-pool", - "sp-trie", - "sp-version", - "substrate-wasm-builder-runner 1.0.6 (git+https://github.com/paritytech/substrate)", - "trie-db 0.21.0", -] - -[[package]] -name = "substrate-test-runtime-client" -version = "2.0.0-rc3" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" +name = "substrate-test-utils" +version = "2.0.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ "futures 0.3.5", - "parity-scale-codec", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-light", - "sc-service", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-runtime", - "substrate-test-client", - "substrate-test-runtime", -] - -[[package]] -name = "substrate-wasm-builder-runner" -version = "1.0.6" -source = "git+https://github.com/paritytech/substrate#7f5dd736f42a408b62885669f7d76ef5baa13572" - -[[package]] -name = "substrate-wasm-builder-runner" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2a965994514ab35d3893e9260245f2947fd1981cdd4fffd2c6e6d1a9ce02e6a" - -[[package]] -name = "substrate-wasmtime" -version = "0.16.0-threadsafe.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd62264edc1a5f3ef44d86fb0c11c9fb142894b9a2da034f34afae482080d7a" -dependencies = [ - "anyhow", - "backtrace", - "cfg-if", - "lazy_static", - "libc", - "region", - "rustc-demangle", - "substrate-wasmtime-jit", - "substrate-wasmtime-profiling", - "substrate-wasmtime-runtime", - "target-lexicon", - "wasmparser 0.52.2", - "wasmtime-environ", - "wat", - "winapi 0.3.8", -] - -[[package]] -name = "substrate-wasmtime-jit" -version = "0.16.0-threadsafe.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce43c159d4f3ef6b19641e1ae045847fd202d8e2cc74df7ccb2b6475e069d4a" -dependencies = [ - "anyhow", - "cfg-if", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", - "gimli 0.20.0", - "log 0.4.8", - "more-asserts", - "region", - "substrate-wasmtime-profiling", - "substrate-wasmtime-runtime", - "target-lexicon", - "thiserror", - "wasmparser 0.52.2", - "wasmtime-debug", - "wasmtime-environ", - "winapi 0.3.8", + "substrate-test-utils-derive", + "tokio 0.2.21", ] [[package]] -name = "substrate-wasmtime-profiling" -version = "0.16.0-threadsafe.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77f0ce539b5a09a54dc80a1cf0c7cd7e694df11029354fe50a2d5fe889bdb97" +name = "substrate-test-utils-derive" +version = "0.8.0-rc6" +source = "git+https://github.com/paritytech/substrate#243873a76fedc1f63004e8097b38f77470d2eff7" dependencies = [ - "anyhow", - "cfg-if", - "gimli 0.20.0", - "lazy_static", - "libc", - "object 0.18.0", - "scroll", - "serde", - "substrate-wasmtime-runtime", - "target-lexicon", - "wasmtime-environ", + "proc-macro-crate", + "quote 1.0.7", + "syn 1.0.33", ] [[package]] -name = "substrate-wasmtime-runtime" -version = "0.16.0-threadsafe.4" +name = "substrate-wasm-builder-runner" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46516af0a64a7d9b652c5aa7436b6ce13edfa54435a66ef177fc02d2283e2dc2" -dependencies = [ - "backtrace", - "cc", - "cfg-if", - "indexmap", - "lazy_static", - "libc", - "memoffset", - "more-asserts", - "region", - "thiserror", - "wasmtime-environ", - "winapi 0.3.8", -] +checksum = "d2a965994514ab35d3893e9260245f2947fd1981cdd4fffd2c6e6d1a9ce02e6a" [[package]] name = "subtle" @@ -7761,17 +8697,6 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1" -[[package]] -name = "syn" -version = "0.11.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" -dependencies = [ - "quote 0.3.15", - "synom", - "unicode-xid 0.0.4", -] - [[package]] name = "syn" version = "0.15.44" @@ -7785,13 +8710,13 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.31" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5304cfdf27365b7585c25d4af91b35016ed21ef88f17ced89c7093b43dba8b6" +checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "unicode-xid 0.2.0", + "unicode-xid 0.2.1", ] [[package]] @@ -7802,16 +8727,7 @@ checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", -] - -[[package]] -name = "synom" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" -dependencies = [ - "unicode-xid 0.0.4", + "syn 1.0.33", ] [[package]] @@ -7822,23 +8738,8 @@ checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", - "unicode-xid 0.2.0", -] - -[[package]] -name = "sysinfo" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac193374347e7c263c5f547524f36ff8ec6702d56c8799c8331d26dffe8c1e" -dependencies = [ - "cfg-if", - "doc-comment", - "libc", - "ntapi", - "once_cell", - "rayon", - "winapi 0.3.8", + "syn 1.0.33", + "unicode-xid 0.2.1", ] [[package]] @@ -7864,7 +8765,7 @@ dependencies = [ "rand 0.7.3", "redox_syscall", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -7878,48 +8779,22 @@ dependencies = [ [[package]] name = "test-parachain-adder" -version = "0.8.12" -dependencies = [ - "dlmalloc", - "parity-scale-codec", - "polkadot-parachain", - "sp-io", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "test-parachain-adder-collator" -version = "0.1.0" -dependencies = [ - "futures 0.3.5", - "parity-scale-codec", - "parking_lot 0.10.2", - "polkadot-collator", - "polkadot-parachain", - "polkadot-primitives", - "sc-client-api", - "sp-core", - "test-parachain-adder", -] - -[[package]] -name = "test-parachain-code-upgrader" -version = "0.7.22" +version = "0.8.22" dependencies = [ "dlmalloc", "parity-scale-codec", "polkadot-parachain", "sp-io", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std", + "substrate-wasm-builder-runner", "tiny-keccak 1.5.0", ] [[package]] name = "test-parachain-halt" -version = "0.8.12" +version = "0.8.22" dependencies = [ - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-wasm-builder-runner", ] [[package]] @@ -7928,8 +8803,8 @@ version = "0.7.22" dependencies = [ "parity-scale-codec", "polkadot-parachain", + "sp-core", "test-parachain-adder", - "test-parachain-code-upgrader", "test-parachain-halt", "tiny-keccak 1.5.0", ] @@ -7971,7 +8846,7 @@ checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] @@ -7999,7 +8874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8010,11 +8885,11 @@ checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" dependencies = [ "failure", "hmac", - "once_cell", + "once_cell 1.4.0", "pbkdf2", "rand 0.7.3", "rustc-hash", - "sha2", + "sha2 0.8.2", "unicode-normalization", ] @@ -8072,7 +8947,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "fnv", "futures-core", "iovec", @@ -8085,7 +8960,8 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", - "winapi 0.3.8", + "tokio-macros", + "winapi 0.3.9", ] [[package]] @@ -8160,7 +9036,18 @@ checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.8", + "log 0.4.11", +] + +[[package]] +name = "tokio-macros" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +dependencies = [ + "proc-macro2 1.0.18", + "quote 1.0.7", + "syn 1.0.33", ] [[package]] @@ -8185,7 +9072,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.8", + "log 0.4.11", "mio", "num_cpus", "parking_lot 0.9.0", @@ -8197,9 +9084,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15cb62a0d2770787abc96e99c1cd98fcf17f94959f3af63ca85bdfb203f051b4" +checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" dependencies = [ "futures-core", "rustls", @@ -8262,7 +9149,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.8", + "log 0.4.11", "num_cpus", "slab", "tokio-executor 0.1.10", @@ -8288,7 +9175,7 @@ checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.8", + "log 0.4.11", "mio", "tokio-codec", "tokio-io", @@ -8297,15 +9184,15 @@ dependencies = [ [[package]] name = "tokio-uds" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" +checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", "futures 0.1.29", "iovec", "libc", - "log 0.4.8", + "log 0.4.11", "mio", "mio-uds", "tokio-codec", @@ -8319,10 +9206,10 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "futures-core", "futures-sink", - "log 0.4.8", + "log 0.4.11", "pin-project-lite", "tokio 0.2.21", ] @@ -8344,9 +9231,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41f40ed0e162c911ac6fcb53ecdc8134c46905fdbbae8c50add462a538b495f" +checksum = "f0aae59226cf195d8e74d4b34beae1859257efb4e5fed3f147d2dc2c7d372178" dependencies = [ "cfg-if", "tracing-attributes", @@ -8355,54 +9242,83 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0de3fd923c9c3232ead88510b783e5a4d16a6154adffa3d53308de984c" +checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", ] [[package]] name = "tracing-core" -version = "0.1.10" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" +checksum = "d593f98af59ebc017c0648f0117525db358745a8894a8d684e185ba3f45954f9" dependencies = [ "lazy_static", ] [[package]] -name = "treeline" -version = "0.1.0" +name = "tracing-log" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +dependencies = [ + "lazy_static", + "log 0.4.11", + "tracing-core", +] [[package]] -name = "trie-db" -version = "0.20.1" +name = "tracing-serde" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc309f34008563989045a4c4dbcc5770467f3a3785ee80a9b5cc0d83362475f" +checksum = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79" dependencies = [ - "hash-db", - "hashbrown", - "log 0.4.8", - "rustc-hex", - "smallvec 1.4.0", + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd165311cc4d7a555ad11cc77a37756df836182db0d81aac908c8184c584f40" +dependencies = [ + "ansi_term 0.12.1", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec 1.4.1", + "thread_local", + "tracing-core", + "tracing-log", + "tracing-serde", ] +[[package]] +name = "treeline" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" + [[package]] name = "trie-db" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb230c24c741993b04cfccbabb45acff6f6480c5f00d3ed8794ea43db3a9d727" +checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" dependencies = [ "hash-db", - "hashbrown", - "log 0.4.8", + "hashbrown 0.8.0", + "log 0.4.11", "rustc-hex", - "smallvec 1.4.0", + "smallvec 1.4.1", ] [[package]] @@ -8420,6 +9336,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +[[package]] +name = "twofish" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1" +dependencies = [ + "block-cipher-trait", + "byteorder 1.3.4", + "opaque-debug 0.2.3", +] + [[package]] name = "twox-hash" version = "1.5.0" @@ -8441,7 +9368,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" dependencies = [ - "byteorder", + "byteorder 1.3.4", "crunchy", "rustc-hex", "static_assertions", @@ -8482,35 +9409,45 @@ checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" [[package]] name = "unicode-width" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.0.4" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] -name = "unicode-xid" -version = "0.2.0" +name = "universal-hash" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +dependencies = [ + "generic-array 0.14.2", + "subtle 2.2.3", +] [[package]] name = "unsigned-varint" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" + +[[package]] +name = "unsigned-varint" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" dependencies = [ - "bytes 0.5.4", + "bytes 0.5.5", "futures-io", "futures-util", "futures_codec", @@ -8550,6 +9487,12 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +[[package]] +name = "vec-arena" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17dfb54bf57c9043f4616cb03dab30eff012cc26631b797d8354b916708db919" + [[package]] name = "vec_map" version = "0.8.2" @@ -8577,6 +9520,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" + [[package]] name = "want" version = "0.2.0" @@ -8584,7 +9533,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ "futures 0.1.29", - "log 0.4.8", + "log 0.4.11", "try-lock", ] @@ -8594,7 +9543,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.8", + "log 0.4.11", "try-lock", ] @@ -8606,9 +9555,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.63" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0" +checksum = "6a634620115e4a229108b71bde263bb4220c483b3f07f5ba514ee8d15064c4c2" dependencies = [ "cfg-if", "serde", @@ -8618,24 +9567,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.63" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101" +checksum = "3e53963b583d18a5aa3aaae4b4c1cb535218246131ba22a71f05b518098571df" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.8", + "log 0.4.11", "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6" +checksum = "dba48d66049d2a6cc8488702e7259ab7afc9043ad0dc5448444f46f2a453b362" dependencies = [ "cfg-if", "js-sys", @@ -8645,9 +9594,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.63" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3" +checksum = "3fcfd5ef6eec85623b4c6e844293d4516470d8f19cd72d0d12246017eb9060b8" dependencies = [ "quote 1.0.7", "wasm-bindgen-macro-support", @@ -8655,22 +9604,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.63" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92" +checksum = "9adff9ee0e94b926ca81b57f57f86d5545cdcb1d259e21ec9bdd95b901754c75" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.63" +version = "0.2.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd" +checksum = "7f7b90ea6c632dd06fd765d44542e234d5e63d9bb917ecd64d79778a13bd79ae" [[package]] name = "wasm-timer" @@ -8713,84 +9662,194 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.51.4" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeb1956b19469d1c5e63e459d29e7b5aa0f558d9f16fcef09736f8a265e6c10a" +checksum = "32fddd575d477c6e9702484139cf9f23dcd554b06d185ed0f56c857dd3a47aa6" [[package]] name = "wasmparser" -version = "0.52.2" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a950e6a618f62147fd514ff445b2a0b53120d382751960797f85f058c7eda9b9" + +[[package]] +name = "wasmtime" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733954023c0b39602439e60a65126fd31b003196d3a1e8e4531b055165a79b31" +checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" +dependencies = [ + "anyhow", + "backtrace", + "cfg-if", + "lazy_static", + "libc", + "log 0.4.11", + "region", + "rustc-demangle", + "smallvec 1.4.1", + "target-lexicon", + "wasmparser 0.59.0", + "wasmtime-environ", + "wasmtime-jit", + "wasmtime-profiling", + "wasmtime-runtime", + "wat", + "winapi 0.3.9", +] [[package]] name = "wasmtime-debug" -version = "0.16.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39ba645aee700b29ff0093028b4123556dd142a74973f04ed6225eedb40e77d" +checksum = "6e634af9067a3af6cf2c7d33dc3b84767ddaf5d010ba68e80eecbcea73d4a349" dependencies = [ "anyhow", - "faerie", - "gimli 0.20.0", + "gimli 0.21.0", "more-asserts", + "object 0.20.0", "target-lexicon", "thiserror", - "wasmparser 0.51.4", + "wasmparser 0.59.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.16.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed54fd9d64dfeeee7c285fd126174a6b5e6d4efc7e5a1566fdb635e60ff6a74e" +checksum = "08f85619a94ee4034bd5bb87fc3dcf71fd2237b81c840809da1201061eec9ab3" dependencies = [ "anyhow", - "base64 0.12.2", + "base64 0.12.3", "bincode", + "cfg-if", "cranelift-codegen", "cranelift-entity", + "cranelift-frontend", "cranelift-wasm", "directories", "errno", "file-per-thread-logger", "indexmap", "libc", - "log 0.4.8", + "log 0.4.11", "more-asserts", "rayon", "serde", - "sha2", + "sha2 0.8.2", "thiserror", "toml", - "wasmparser 0.51.4", - "winapi 0.3.8", + "wasmparser 0.59.0", + "winapi 0.3.9", "zstd", ] +[[package]] +name = "wasmtime-jit" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" +dependencies = [ + "anyhow", + "cfg-if", + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "cranelift-native", + "cranelift-wasm", + "gimli 0.21.0", + "log 0.4.11", + "more-asserts", + "object 0.20.0", + "region", + "target-lexicon", + "thiserror", + "wasmparser 0.59.0", + "wasmtime-debug", + "wasmtime-environ", + "wasmtime-obj", + "wasmtime-profiling", + "wasmtime-runtime", + "winapi 0.3.9", +] + +[[package]] +name = "wasmtime-obj" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e81d8e02e9bc9fe2da9b6d48bbc217f96e089f7df613f11a28a3958abc44641e" +dependencies = [ + "anyhow", + "more-asserts", + "object 0.20.0", + "target-lexicon", + "wasmtime-debug", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-profiling" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" +dependencies = [ + "anyhow", + "cfg-if", + "gimli 0.21.0", + "lazy_static", + "libc", + "object 0.19.0", + "scroll", + "serde", + "target-lexicon", + "wasmtime-environ", + "wasmtime-runtime", +] + +[[package]] +name = "wasmtime-runtime" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" +dependencies = [ + "backtrace", + "cc", + "cfg-if", + "indexmap", + "lazy_static", + "libc", + "log 0.4.11", + "memoffset", + "more-asserts", + "region", + "thiserror", + "wasmtime-environ", + "winapi 0.3.9", +] + [[package]] name = "wast" -version = "18.0.0" +version = "21.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b1f23531740a81f9300bd2febd397a95c76bfa4aa4bfaf4ca8b1ee3438f337" +checksum = "0b1844f66a2bc8526d71690104c0e78a8e59ffa1597b7245769d174ebb91deb5" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.19" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4006d418d59293172aebfeeadb7673459dc151874a79135946ea7996b6a98515" +checksum = "ce85d72b74242c340e9e3492cfb602652d7bb324c3172dd441b5577e39a2e18c" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.40" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17" +checksum = "863539788676619aac1a23e2df3655e96b32b0e05eb72ca34ba045ad573c625d" dependencies = [ "js-sys", "wasm-bindgen", @@ -8824,9 +9883,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "wepoll-sys-stjepang" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +dependencies = [ + "cc", +] + [[package]] name = "westend-runtime" -version = "0.8.12" +version = "0.8.22" dependencies = [ "bitvec", "frame-benchmarking", @@ -8835,7 +9903,7 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal", + "hex-literal 0.2.1", "libsecp256k1", "log 0.3.9", "pallet-authority-discovery", @@ -8879,7 +9947,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.0", + "smallvec 1.4.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -8897,7 +9965,7 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-wasm-builder-runner", "tiny-keccak 1.5.0", ] @@ -8918,9 +9986,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -8944,7 +10012,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -8959,10 +10027,10 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" dependencies = [ - "byteorder", + "byteorder 1.3.4", "bytes 0.4.12", "httparse", - "log 0.4.8", + "log 0.4.11", "mio", "mio-extras", "rand 0.7.3", @@ -8994,12 +10062,12 @@ dependencies = [ [[package]] name = "yamux" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84300bb493cc878f3638b981c62b4632ec1a5c52daaa3036651e8c106d3b55ea" +checksum = "cd37e58a1256a0b328ce9c67d8b62ecdd02f4803ba443df478835cb1a41a637c" dependencies = [ "futures 0.3.5", - "log 0.4.8", + "log 0.4.11", "nohash-hasher", "parking_lot 0.10.2", "rand 0.7.3", @@ -9023,7 +10091,7 @@ checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2 1.0.18", "quote 1.0.7", - "syn 1.0.31", + "syn 1.0.33", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index f3204924187f93aa889cfbcf40467a1d512d32fb..3f2cc6c258932aabbbe2ba11cb6d008c229cf6b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,14 +4,12 @@ path = "src/main.rs" [package] name = "polkadot" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] cli = { package = "polkadot-cli", path = "cli" } -# It looks like this is the only way to pass features to it -collator = { package = "polkadot-collator", path = "collator" } futures = "0.3.4" service = { package = "polkadot-service", path = "service" } parity-util-mem = { version = "*", default-features = false, features = ["jemalloc-global"] } @@ -23,17 +21,15 @@ tempfile = "3.1.0" [workspace] members = [ - "availability-store", "cli", - "collator", + "core-primitives", "erasure-coding", - "network", - "network/test", "primitives", "runtime/common", "runtime/parachains", "runtime/polkadot", "runtime/kusama", + "runtime/rococo-v1", "runtime/westend", "runtime/test-runtime", "runtime/test-runtime/client", @@ -42,21 +38,31 @@ members = [ "service", "validation", - "node/messages", + "node/collation-generation", + "node/core/av-store", + "node/core/backing", + "node/core/bitfield-signing", + "node/core/candidate-validation", + "node/core/chain-api", + "node/core/proposer", + "node/core/provisioner", + "node/core/runtime-api", + "node/network/bridge", + "node/network/pov-distribution", + "node/network/protocol", + "node/network/statement-distribution", + "node/network/bitfield-distribution", + "node/network/availability-distribution", "node/overseer", "node/primitives", "node/service", + "node/subsystem", + "node/subsystem-test-helpers", + "node/subsystem-util", + "node/test-service", "parachain/test-parachains", "parachain/test-parachains/adder", - "parachain/test-parachains/adder/collator", - "parachain/test-parachains/code-upgrader", -] -exclude = [ - "runtime/polkadot/wasm", - "runtime/kusama/wasm", - "runtime/westend/wasm", - "parachain/test-parachains/adder/wasm", ] [badges] @@ -70,5 +76,4 @@ panic = "unwind" runtime-benchmarks=["cli/runtime-benchmarks"] service-rewr= [ "cli/service-rewr", - "collator/service-rewr", ] diff --git a/Process.json b/Process.json new file mode 100644 index 0000000000000000000000000000000000000000..10dfb219bf0cab783e3e921cddc52f8f610a3543 --- /dev/null +++ b/Process.json @@ -0,0 +1,15 @@ +[{ + "project_name": "Batch: Availability and Validity", + "owner": "rphmeier", + "matrix_room_id": "!wQXGIDhhJQSVXKqPwi:matrix.parity.io" +}, +{ + "project_name": "Batch: Codebase Restructure", + "owner": "rphmeier", + "matrix_room_id": "!wQXGIDhhJQSVXKqPwi:matrix.parity.io" +}, +{ + "project_name": "Cumulus", + "owner": "bkchr", + "matrix_room_id": "!wQXGIDhhJQSVXKqPwi:matrix.parity.io" +}] diff --git a/README.md b/README.md index 496d5933d6ce6009c8169738ce66b6af6a17da68..5d55065cf290095f712791549b237da88985a68f 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ release and download the binary that is provided. If you want to install Polkadot in your PATH, you can do so with with: ```bash -cargo install --force --git https://github.com/paritytech/polkadot --tag polkadot +cargo install --git https://github.com/paritytech/polkadot --tag polkadot ``` ### Build from Source @@ -45,7 +45,7 @@ rustup update Once done, finish installing the support software: ```bash -sudo apt install make clang pkg-config libssl-dev +sudo apt install build-essential git clang libclang-dev pkg-config libssl-dev ``` Build the client by cloning this repository and running the following commands from the root @@ -61,9 +61,9 @@ cargo build --release This repo supports runtimes for Polkadot, Kusama, and Westend. -### Connect to Polkadot Chain Candidate 1 (CC1) +### Connect to Polkadot Mainnet -Connect to the global Polkadot CC1 network by running: +Connect to the global Polkadot Mainnet network by running: ```bash ./target/release/polkadot --chain=polkadot @@ -71,7 +71,7 @@ Connect to the global Polkadot CC1 network by running: You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). -[telemetry]: https://telemetry.polkadot.io/#list/Polkadot%20CC1 +[telemetry]: https://telemetry.polkadot.io/#list/Polkadot ### Connect to the "Kusama" Canary Network diff --git a/availability-store/Cargo.toml b/availability-store/Cargo.toml deleted file mode 100644 index 69defcfeb2657de9ea38fdcc73c06d7554f91bcc..0000000000000000000000000000000000000000 --- a/availability-store/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "polkadot-availability-store" -description = "Persistent database for parachain data" -version = "0.8.12" -authors = ["Parity Technologies "] -edition = "2018" - -[dependencies] -polkadot-primitives = { path = "../primitives" } -polkadot-erasure-coding = { path = "../erasure-coding" } -parking_lot = "0.9.0" -derive_more = "0.99" -log = "0.4.8" -futures = "0.3.4" -tokio = { version = "0.2.13", features = ["rt-core"] } -exit-future = "0.2.0" -codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } -client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -kvdb = "0.6.0" -kvdb-memorydb = "0.6.0" - -[target.'cfg(not(target_os = "unknown"))'.dependencies] -kvdb-rocksdb = "0.8.0" diff --git a/availability-store/src/lib.rs b/availability-store/src/lib.rs deleted file mode 100644 index 8e9feea687e185e1197469990c5091267d9ba0b2..0000000000000000000000000000000000000000 --- a/availability-store/src/lib.rs +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Persistent database for parachain data: PoV block data, erasure-coding chunks and outgoing messages. -//! -//! This will be written into during the block validation pipeline, and queried -//! by networking code in order to circulate required data and maintain availability -//! of it. - -#![warn(missing_docs)] - -use futures::prelude::*; -use futures::{channel::{mpsc, oneshot}, task::Spawn}; -use keystore::KeyStorePtr; -use polkadot_primitives::{ - Hash, Block, - parachain::{ - PoVBlock, AbridgedCandidateReceipt, ErasureChunk, - ParachainHost, AvailableData, OmittedValidationData, - }, -}; -use sp_runtime::traits::HashFor; -use sp_blockchain::{Result as ClientResult}; -use client::{ - BlockchainEvents, BlockBackend, -}; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use codec::{Encode, Decode}; - -use log::warn; - -use std::sync::Arc; -use std::collections::HashSet; -use std::path::PathBuf; -use std::io; -use std::pin::Pin; - -mod worker; -mod store; - -pub use worker::AvailabilityBlockImport; -pub use store::AwaitedFrontierEntry; - -use worker::{ - Worker, WorkerHandle, IncludedParachainBlocks, WorkerMsg, MakeAvailable, Chunks -}; - -use store::{Store as InnerStore}; - -const LOG_TARGET: &str = "availability"; - -/// Configuration for the availability store. -pub struct Config { - /// Cache size in bytes. If `None` default is used. - pub cache_size: Option, - /// Path to the database. - pub path: PathBuf, -} - -/// An abstraction around networking for the availablity-store. -/// -/// Currently it is not possible to use the networking code in the availability store -/// core directly due to a number of loop dependencies it requires: -/// -/// `availability-store` -> `network` -> `availability-store` -/// -/// `availability-store` -> `network` -> `validation` -> `availability-store` -/// -/// So we provide this trait that gets implemented for a type in -/// the [`network`] module or a mock in tests. -/// -/// [`network`]: ../polkadot_network/index.html -pub trait ErasureNetworking { - /// Errors that can occur when fetching erasure chunks. - type Error: std::fmt::Debug + 'static; - - /// Fetch an erasure chunk from the networking service. - fn fetch_erasure_chunk( - &self, - candidate_hash: &Hash, - index: u32, - ) -> Pin> + Send>>; - - /// Distributes an erasure chunk to the correct validator node. - fn distribute_erasure_chunk( - &self, - candidate_hash: Hash, - chunk: ErasureChunk, - ); -} - -/// Data that, when combined with an `AbridgedCandidateReceipt`, is enough -/// to fully re-execute a block. -#[derive(Debug, Encode, Decode, PartialEq)] -pub struct ExecutionData { - /// The `PoVBlock`. - pub pov_block: PoVBlock, - /// The data omitted from the `AbridgedCandidateReceipt`. - pub omitted_validation: OmittedValidationData, -} - -/// Handle to the availability store. -/// -/// This provides a proxying API that -/// * in case of write operations provides async methods that send data to -/// the background worker and resolve when that data is processed by the worker -/// * in case of read opeartions queries the underlying storage synchronously. -#[derive(Clone)] -pub struct Store { - inner: InnerStore, - worker: Arc, - to_worker: mpsc::UnboundedSender, -} - -impl Store { - /// Create a new `Store` with given config on disk. - /// - /// Creating a store among other things starts a background worker thread that - /// handles most of the write operations to the storage. - #[cfg(not(target_os = "unknown"))] - pub fn new(config: Config, network: EN) -> io::Result - where EN: ErasureNetworking + Send + Sync + Clone + 'static - { - let inner = InnerStore::new(config)?; - let worker = Arc::new(Worker::start(inner.clone(), network)); - let to_worker = worker.to_worker().clone(); - - Ok(Self { - inner, - worker, - to_worker, - }) - } - - /// Create a new in-memory `Store`. Useful for tests. - /// - /// Creating a store among other things starts a background worker thread - /// that handles most of the write operations to the storage. - pub fn new_in_memory(network: EN) -> Self - where EN: ErasureNetworking + Send + Sync + Clone + 'static - { - let inner = InnerStore::new_in_memory(); - let worker = Arc::new(Worker::start(inner.clone(), network)); - let to_worker = worker.to_worker().clone(); - - Self { - inner, - worker, - to_worker, - } - } - - /// Obtain a [`BlockImport`] implementation to import blocks into this store. - /// - /// This block import will act upon all newly imported blocks sending information - /// about parachain heads included in them to this `Store`'s background worker. - /// The user may create multiple instances of [`BlockImport`]s with this call. - /// - /// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html - pub fn block_import( - &self, - wrapped_block_import: I, - client: Arc

, - spawner: impl Spawn, - keystore: KeyStorePtr, - ) -> ClientResult> - where - P: ProvideRuntimeApi + BlockchainEvents + BlockBackend + Send + Sync + 'static, - P::Api: ParachainHost, - P::Api: ApiExt, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, - { - let to_worker = self.to_worker.clone(); - - let import = AvailabilityBlockImport::new( - client, - wrapped_block_import, - spawner, - keystore, - to_worker, - ); - - Ok(import) - } - - /// Make some data available provisionally. - /// - /// Validators with the responsibility of maintaining availability - /// for a block or collators collating a block will call this function - /// in order to persist that data to disk and so it can be queried and provided - /// to other nodes in the network. - /// - /// Determination of invalidity is beyond the scope of this function. - /// - /// This method will send the data to the background worker, allowing the caller to - /// asynchronously wait for the result. - pub async fn make_available(&self, candidate_hash: Hash, available_data: AvailableData) - -> io::Result<()> - { - let (s, r) = oneshot::channel(); - let msg = WorkerMsg::MakeAvailable(MakeAvailable { - candidate_hash, - available_data, - result: s, - }); - - let _ = self.to_worker.unbounded_send(msg); - - if let Ok(Ok(())) = r.await { - Ok(()) - } else { - Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) - } - - } - - /// Get a set of all chunks we are waiting for. - pub fn awaited_chunks(&self) -> Option> { - self.inner.awaited_chunks() - } - - /// Adds an erasure chunk to storage. - /// - /// The chunk should be checked for validity against the root of encoding - /// and its proof prior to calling this. - /// - /// This method will send the chunk to the background worker, allowing the caller to - /// asynchronously wait for the result. - pub async fn add_erasure_chunk( - &self, - candidate: AbridgedCandidateReceipt, - n_validators: u32, - chunk: ErasureChunk, - ) -> io::Result<()> { - self.add_erasure_chunks(candidate, n_validators, std::iter::once(chunk)).await - } - - /// Adds a set of erasure chunks to storage. - /// - /// The chunks should be checked for validity against the root of encoding - /// and its proof prior to calling this. - /// - /// This method will send the chunks to the background worker, allowing the caller to - /// asynchronously wait for the result. - pub async fn add_erasure_chunks( - &self, - candidate: AbridgedCandidateReceipt, - n_validators: u32, - chunks: I, - ) -> io::Result<()> - where I: IntoIterator - { - let candidate_hash = candidate.hash(); - - self.add_candidate(candidate).await?; - - let (s, r) = oneshot::channel(); - let chunks = chunks.into_iter().collect(); - - let msg = WorkerMsg::Chunks(Chunks { - candidate_hash, - chunks, - n_validators, - result: s, - }); - - let _ = self.to_worker.unbounded_send(msg); - - if let Ok(Ok(())) = r.await { - Ok(()) - } else { - Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) - } - } - - /// Queries an erasure chunk by the candidate hash and validator index. - pub fn get_erasure_chunk( - &self, - candidate_hash: &Hash, - validator_index: usize, - ) -> Option { - self.inner.get_erasure_chunk(candidate_hash, validator_index) - } - - /// Note a validator's index and a number of validators at a relay parent in the - /// store. - /// - /// This should be done before adding erasure chunks with this relay parent. - pub fn note_validator_index_and_n_validators( - &self, - relay_parent: &Hash, - validator_index: u32, - n_validators: u32, - ) -> io::Result<()> { - self.inner.note_validator_index_and_n_validators( - relay_parent, - validator_index, - n_validators, - ) - } - - // Stores a candidate receipt. - async fn add_candidate( - &self, - candidate: AbridgedCandidateReceipt, - ) -> io::Result<()> { - let (s, r) = oneshot::channel(); - - let msg = WorkerMsg::IncludedParachainBlocks(IncludedParachainBlocks { - blocks: vec![crate::worker::IncludedParachainBlock { - candidate, - available_data: None, - }], - result: s, - }); - - let _ = self.to_worker.unbounded_send(msg); - - if let Ok(Ok(())) = r.await { - Ok(()) - } else { - Err(io::Error::new(io::ErrorKind::Other, format!("adding erasure chunks failed"))) - } - } - - /// Queries a candidate receipt by its hash. - pub fn get_candidate(&self, candidate_hash: &Hash) - -> Option - { - self.inner.get_candidate(candidate_hash) - } - - /// Query execution data by pov-block hash. - pub fn execution_data(&self, candidate_hash: &Hash) - -> Option - { - self.inner.execution_data(candidate_hash) - } -} diff --git a/availability-store/src/store.rs b/availability-store/src/store.rs deleted file mode 100644 index e3b1e35929795211130d09e84454c28a34faeea4..0000000000000000000000000000000000000000 --- a/availability-store/src/store.rs +++ /dev/null @@ -1,623 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -#[cfg(not(target_os = "unknown"))] -use kvdb_rocksdb::{Database, DatabaseConfig}; -use kvdb::{KeyValueDB, DBTransaction}; -use codec::{Encode, Decode}; -use polkadot_erasure_coding::{self as erasure}; -use polkadot_primitives::{ - Hash, - parachain::{ - ErasureChunk, AvailableData, AbridgedCandidateReceipt, - }, -}; -use parking_lot::Mutex; - -use log::{trace, warn}; -use std::collections::HashSet; -use std::sync::Arc; -use std::iter::FromIterator; -use std::io; - -use crate::{LOG_TARGET, Config, ExecutionData}; - -mod columns { - pub const DATA: u32 = 0; - pub const META: u32 = 1; - pub const NUM_COLUMNS: u32 = 2; -} - -#[derive(Clone)] -pub struct Store { - inner: Arc, - candidate_descendents_lock: Arc> -} - -// data keys -fn execution_data_key(candidate_hash: &Hash) -> Vec { - (candidate_hash, 0i8).encode() -} - -fn erasure_chunks_key(candidate_hash: &Hash) -> Vec { - (candidate_hash, 1i8).encode() -} - -fn candidate_key(candidate_hash: &Hash) -> Vec { - (candidate_hash, 2i8).encode() -} - -fn candidates_with_relay_parent_key(relay_block: &Hash) -> Vec { - (relay_block, 4i8).encode() -} - -// meta keys -const AWAITED_CHUNKS_KEY: [u8; 14] = *b"awaited_chunks"; - -fn validator_index_and_n_validators_key(relay_parent: &Hash) -> Vec { - (relay_parent, 1i8).encode() -} - -fn available_chunks_key(candidate_hash: &Hash) -> Vec { - (candidate_hash, 2i8).encode() -} - -/// An entry in the awaited frontier of chunks we are interested in. -#[derive(Encode, Decode, Debug, Hash, PartialEq, Eq, Clone)] -pub struct AwaitedFrontierEntry { - /// The hash of the candidate for which we want to fetch a chunk for. - /// There will be duplicate entries in the case of multiple candidates with - /// the same erasure-root, but this is unlikely. - pub candidate_hash: Hash, - /// Although the relay-parent is implicitly referenced by the candidate hash, - /// we include it here as well for convenience in pruning the set. - pub relay_parent: Hash, - /// The index of the validator we represent. - pub validator_index: u32, -} - -impl Store { - /// Create a new `Store` with given condig on disk. - #[cfg(not(target_os = "unknown"))] - pub(super) fn new(config: Config) -> io::Result { - let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS); - - if let Some(cache_size) = config.cache_size { - let mut memory_budget = std::collections::HashMap::new(); - for i in 0..columns::NUM_COLUMNS { - memory_budget.insert(i, cache_size / columns::NUM_COLUMNS as usize); - } - - db_config.memory_budget = memory_budget; - } - - let path = config.path.to_str().ok_or_else(|| io::Error::new( - io::ErrorKind::Other, - format!("Bad database path: {:?}", config.path), - ))?; - - let db = Database::open(&db_config, &path)?; - - Ok(Store { - inner: Arc::new(db), - candidate_descendents_lock: Arc::new(Mutex::new(())), - }) - } - - /// Create a new `Store` in-memory. Useful for tests. - pub(super) fn new_in_memory() -> Self { - Store { - inner: Arc::new(::kvdb_memorydb::create(columns::NUM_COLUMNS)), - candidate_descendents_lock: Arc::new(Mutex::new(())), - } - } - - /// Make some data available provisionally. - pub(crate) fn make_available(&self, candidate_hash: Hash, available_data: AvailableData) - -> io::Result<()> - { - let mut tx = DBTransaction::new(); - - // at the moment, these structs are identical. later, we will also - // keep outgoing message queues available, and these are not needed - // for execution. - let AvailableData { pov_block, omitted_validation } = available_data; - let execution_data = ExecutionData { - pov_block, - omitted_validation, - }; - - tx.put_vec( - columns::DATA, - execution_data_key(&candidate_hash).as_slice(), - execution_data.encode(), - ); - - self.inner.write(tx) - } - - /// Get a set of all chunks we are waiting for. - pub fn awaited_chunks(&self) -> Option> { - self.query_inner(columns::META, &AWAITED_CHUNKS_KEY).map(|vec: Vec| { - HashSet::from_iter(vec.into_iter()) - }) - } - - /// Adds a set of candidates hashes that were included in a relay block by the block's parent. - /// - /// If we already possess the receipts for these candidates _and_ our position at the specified - /// relay chain the awaited frontier of the erasure chunks will also be extended. - /// - /// This method modifies the erasure chunks awaited frontier by adding this validator's - /// chunks from `candidates` to it. In order to do so the information about this validator's - /// position at parent `relay_parent` should be known to the store prior to calling this - /// method, in other words `note_validator_index_and_n_validators` should be called for - /// the given `relay_parent` before calling this function. - pub(crate) fn note_candidates_with_relay_parent( - &self, - relay_parent: &Hash, - candidates: &[Hash], - ) -> io::Result<()> { - let mut tx = DBTransaction::new(); - let dbkey = candidates_with_relay_parent_key(relay_parent); - - // This call can race against another call to `note_candidates_with_relay_parent` - // with a different set of descendents. - let _lock = self.candidate_descendents_lock.lock(); - - if let Some((validator_index, _)) = self.get_validator_index_and_n_validators(relay_parent) { - let candidates = candidates.clone(); - let awaited_frontier: Vec = self - .query_inner(columns::META, &AWAITED_CHUNKS_KEY) - .unwrap_or_else(|| Vec::new()); - - let mut awaited_frontier: HashSet = - HashSet::from_iter(awaited_frontier.into_iter()); - - awaited_frontier.extend(candidates.iter().cloned().map(|candidate_hash| { - AwaitedFrontierEntry { - relay_parent: relay_parent.clone(), - candidate_hash, - validator_index, - } - })); - let awaited_frontier = Vec::from_iter(awaited_frontier.into_iter()); - tx.put_vec(columns::META, &AWAITED_CHUNKS_KEY, awaited_frontier.encode()); - } - - let mut descendent_candidates = self.get_candidates_with_relay_parent(relay_parent); - descendent_candidates.extend(candidates.iter().cloned()); - tx.put_vec(columns::DATA, &dbkey, descendent_candidates.encode()); - - self.inner.write(tx) - } - - /// Make a validator's index and a number of validators at a relay parent available. - pub(crate) fn note_validator_index_and_n_validators( - &self, - relay_parent: &Hash, - validator_index: u32, - n_validators: u32, - ) -> io::Result<()> { - let mut tx = DBTransaction::new(); - let dbkey = validator_index_and_n_validators_key(relay_parent); - - tx.put_vec(columns::META, &dbkey, (validator_index, n_validators).encode()); - - self.inner.write(tx) - } - - /// Query a validator's index and n_validators by relay parent. - pub(crate) fn get_validator_index_and_n_validators(&self, relay_parent: &Hash) -> Option<(u32, u32)> { - let dbkey = validator_index_and_n_validators_key(relay_parent); - - self.query_inner(columns::META, &dbkey) - } - - /// Add a set of chunks. - /// - /// The same as `add_erasure_chunk` but adds a set of chunks in one atomic transaction. - pub fn add_erasure_chunks( - &self, - n_validators: u32, - candidate_hash: &Hash, - chunks: I, - ) -> io::Result<()> - where I: IntoIterator - { - if let Some(receipt) = self.get_candidate(candidate_hash) { - let mut tx = DBTransaction::new(); - let dbkey = erasure_chunks_key(candidate_hash); - - let mut v = self.query_inner(columns::DATA, &dbkey).unwrap_or(Vec::new()); - - let av_chunks_key = available_chunks_key(candidate_hash); - let mut have_chunks = self.query_inner(columns::META, &av_chunks_key).unwrap_or(Vec::new()); - - let awaited_frontier: Option> = self.query_inner( - columns::META, - &AWAITED_CHUNKS_KEY, - ); - - for chunk in chunks.into_iter() { - if !have_chunks.contains(&chunk.index) { - have_chunks.push(chunk.index); - } - v.push(chunk); - } - - if let Some(mut awaited_frontier) = awaited_frontier { - awaited_frontier.retain(|entry| { - !( - entry.relay_parent == receipt.relay_parent && - &entry.candidate_hash == candidate_hash && - have_chunks.contains(&entry.validator_index) - ) - }); - tx.put_vec(columns::META, &AWAITED_CHUNKS_KEY, awaited_frontier.encode()); - } - - // If there are no block data in the store at this point, - // check that they can be reconstructed now and add them to store if they can. - if self.execution_data(&candidate_hash).is_none() { - if let Ok(available_data) = erasure::reconstruct( - n_validators as usize, - v.iter().map(|chunk| (chunk.chunk.as_ref(), chunk.index as usize)), - ) - { - self.make_available(*candidate_hash, available_data)?; - } - } - - tx.put_vec(columns::DATA, &dbkey, v.encode()); - tx.put_vec(columns::META, &av_chunks_key, have_chunks.encode()); - - self.inner.write(tx) - } else { - trace!(target: LOG_TARGET, "Candidate with hash {} not found", candidate_hash); - Ok(()) - } - } - - /// Queries an erasure chunk by its block's relay-parent, the candidate hash, and index. - pub fn get_erasure_chunk( - &self, - candidate_hash: &Hash, - index: usize, - ) -> Option { - self.query_inner(columns::DATA, &erasure_chunks_key(candidate_hash)) - .and_then(|chunks: Vec| { - chunks.iter() - .find(|chunk: &&ErasureChunk| chunk.index == index as u32) - .map(|chunk| chunk.clone()) - }) - } - - /// Stores a candidate receipt. - pub fn add_candidate( - &self, - receipt: &AbridgedCandidateReceipt, - ) -> io::Result<()> { - let candidate_hash = receipt.hash(); - let dbkey = candidate_key(&candidate_hash); - let mut tx = DBTransaction::new(); - - tx.put_vec(columns::DATA, &dbkey, receipt.encode()); - - self.inner.write(tx) - } - - /// Queries a candidate receipt by the relay parent hash and its hash. - pub(crate) fn get_candidate(&self, candidate_hash: &Hash) - -> Option - { - self.query_inner(columns::DATA, &candidate_key(candidate_hash)) - } - - /// Note that a set of candidates have been included in a finalized block with given hash and parent hash. - pub(crate) fn candidates_finalized( - &self, - relay_parent: Hash, - finalized_candidates: HashSet, - ) -> io::Result<()> { - let mut tx = DBTransaction::new(); - - let awaited_frontier: Option> = self - .query_inner(columns::META, &AWAITED_CHUNKS_KEY); - - if let Some(mut awaited_frontier) = awaited_frontier { - awaited_frontier.retain(|entry| entry.relay_parent != relay_parent); - tx.put_vec(columns::META, &AWAITED_CHUNKS_KEY, awaited_frontier.encode()); - } - - let candidates = self.get_candidates_with_relay_parent(&relay_parent); - - for candidate in candidates.into_iter().filter(|c| !finalized_candidates.contains(c)) { - // we only delete this data for candidates which were not finalized. - // we keep all data for the finalized chain forever at the moment. - tx.delete(columns::DATA, execution_data_key(&candidate).as_slice()); - tx.delete(columns::DATA, &erasure_chunks_key(&candidate)); - tx.delete(columns::DATA, &candidate_key(&candidate)); - - tx.delete(columns::META, &available_chunks_key(&candidate)); - } - - self.inner.write(tx) - } - - /// Query execution data by relay parent and candidate hash. - pub(crate) fn execution_data(&self, candidate_hash: &Hash) -> Option { - self.query_inner(columns::DATA, &execution_data_key(candidate_hash)) - } - - /// Get candidates which pinned to the environment of the given relay parent. - /// Note that this is not necessarily the same as candidates that were included in a direct - /// descendent of the given relay-parent. - fn get_candidates_with_relay_parent(&self, relay_parent: &Hash) -> Vec { - let key = candidates_with_relay_parent_key(relay_parent); - self.query_inner(columns::DATA, &key[..]).unwrap_or_default() - } - - fn query_inner(&self, column: u32, key: &[u8]) -> Option { - match self.inner.get(column, key) { - Ok(Some(raw)) => { - let res = T::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"); - Some(res) - } - Ok(None) => None, - Err(e) => { - warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); - None - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_erasure_coding::{self as erasure}; - use polkadot_primitives::parachain::{ - Id as ParaId, BlockData, AvailableData, PoVBlock, OmittedValidationData, - }; - - fn available_data(block_data: &[u8]) -> AvailableData { - AvailableData { - pov_block: PoVBlock { - block_data: BlockData(block_data.to_vec()), - }, - omitted_validation: OmittedValidationData { - global_validation: Default::default(), - local_validation: Default::default(), - } - } - } - - fn execution_data(available: &AvailableData) -> ExecutionData { - let AvailableData { pov_block, omitted_validation } = available.clone(); - ExecutionData { pov_block, omitted_validation } - } - - #[test] - fn finalization_removes_unneeded() { - let relay_parent = [1; 32].into(); - - let para_id_1 = 5.into(); - let para_id_2 = 6.into(); - - let mut candidate_1 = AbridgedCandidateReceipt::default(); - let mut candidate_2 = AbridgedCandidateReceipt::default(); - - candidate_1.parachain_index = para_id_1; - candidate_1.commitments.erasure_root = [6; 32].into(); - candidate_1.relay_parent = relay_parent; - - candidate_2.parachain_index = para_id_2; - candidate_2.commitments.erasure_root = [6; 32].into(); - candidate_2.relay_parent = relay_parent; - - - let candidate_1_hash = candidate_1.hash(); - let candidate_2_hash = candidate_2.hash(); - - let available_data_1 = available_data(&[1, 2, 3]); - let available_data_2 = available_data(&[4, 5, 6]); - - let erasure_chunk_1 = ErasureChunk { - chunk: vec![10, 20, 30], - index: 1, - proof: vec![], - }; - - let erasure_chunk_2 = ErasureChunk { - chunk: vec![40, 50, 60], - index: 1, - proof: vec![], - }; - - let store = Store::new_in_memory(); - store.make_available(candidate_1_hash, available_data_1.clone()).unwrap(); - - store.make_available(candidate_2_hash, available_data_2.clone()).unwrap(); - - store.add_candidate(&candidate_1).unwrap(); - store.add_candidate(&candidate_2).unwrap(); - - store.note_candidates_with_relay_parent(&relay_parent, &[candidate_1_hash, candidate_2_hash]).unwrap(); - - assert!(store.add_erasure_chunks(3, &candidate_1_hash, vec![erasure_chunk_1.clone()]).is_ok()); - assert!(store.add_erasure_chunks(3, &candidate_2_hash, vec![erasure_chunk_2.clone()]).is_ok()); - - assert_eq!(store.execution_data(&candidate_1_hash).unwrap(), execution_data(&available_data_1)); - assert_eq!(store.execution_data(&candidate_2_hash).unwrap(), execution_data(&available_data_2)); - - assert_eq!(store.get_erasure_chunk(&candidate_1_hash, 1).as_ref(), Some(&erasure_chunk_1)); - assert_eq!(store.get_erasure_chunk(&candidate_2_hash, 1), Some(erasure_chunk_2)); - - assert_eq!(store.get_candidate(&candidate_1_hash), Some(candidate_1.clone())); - assert_eq!(store.get_candidate(&candidate_2_hash), Some(candidate_2.clone())); - - store.candidates_finalized(relay_parent, [candidate_1_hash].iter().cloned().collect()).unwrap(); - - assert_eq!(store.get_erasure_chunk(&candidate_1_hash, 1).as_ref(), Some(&erasure_chunk_1)); - assert!(store.get_erasure_chunk(&candidate_2_hash, 1).is_none()); - - assert_eq!(store.get_candidate(&candidate_1_hash), Some(candidate_1)); - assert_eq!(store.get_candidate(&candidate_2_hash), None); - - assert_eq!(store.execution_data(&candidate_1_hash).unwrap(), execution_data(&available_data_1)); - assert!(store.execution_data(&candidate_2_hash).is_none()); - } - - #[test] - fn erasure_coding() { - let relay_parent: Hash = [1; 32].into(); - let para_id: ParaId = 5.into(); - let available_data = available_data(&[42; 8]); - let n_validators = 5; - - let erasure_chunks = erasure::obtain_chunks( - n_validators, - &available_data, - ).unwrap(); - - let branches = erasure::branches(erasure_chunks.as_ref()); - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.commitments.erasure_root = [6; 32].into(); - candidate.relay_parent = relay_parent; - - let candidate_hash = candidate.hash(); - - let chunks: Vec<_> = erasure_chunks - .iter() - .zip(branches.map(|(proof, _)| proof)) - .enumerate() - .map(|(index, (chunk, proof))| ErasureChunk { - chunk: chunk.clone(), - proof, - index: index as u32, - }) - .collect(); - - let store = Store::new_in_memory(); - - store.add_candidate(&candidate).unwrap(); - store.add_erasure_chunks(n_validators as u32, &candidate_hash, vec![chunks[0].clone()]).unwrap(); - assert_eq!(store.get_erasure_chunk(&candidate_hash, 0), Some(chunks[0].clone())); - - assert!(store.execution_data(&candidate_hash).is_none()); - - store.add_erasure_chunks(n_validators as u32, &candidate_hash, chunks).unwrap(); - assert_eq!(store.execution_data(&candidate_hash), Some(execution_data(&available_data))); - } - - #[test] - fn add_validator_index_works() { - let relay_parent = [42; 32].into(); - let store = Store::new_in_memory(); - - store.note_validator_index_and_n_validators(&relay_parent, 42, 24).unwrap(); - assert_eq!(store.get_validator_index_and_n_validators(&relay_parent).unwrap(), (42, 24)); - } - - #[test] - fn add_candidates_in_relay_block_works() { - let relay_parent = [42; 32].into(); - let store = Store::new_in_memory(); - - let candidates = vec![[1; 32].into(), [2; 32].into(), [3; 32].into()]; - - store.note_candidates_with_relay_parent(&relay_parent, &candidates).unwrap(); - assert_eq!(store.get_candidates_with_relay_parent(&relay_parent), candidates); - } - - #[test] - fn awaited_chunks_works() { - use std::iter::FromIterator; - let validator_index = 3; - let n_validators = 10; - let relay_parent = [42; 32].into(); - let erasure_root_1 = [11; 32].into(); - let erasure_root_2 = [12; 32].into(); - let mut receipt_1 = AbridgedCandidateReceipt::default(); - let mut receipt_2 = AbridgedCandidateReceipt::default(); - - - receipt_1.parachain_index = 1.into(); - receipt_1.commitments.erasure_root = erasure_root_1; - receipt_1.relay_parent = relay_parent; - - receipt_2.parachain_index = 2.into(); - receipt_2.commitments.erasure_root = erasure_root_2; - receipt_2.relay_parent = relay_parent; - - let receipt_1_hash = receipt_1.hash(); - let receipt_2_hash = receipt_2.hash(); - - let chunk = ErasureChunk { - chunk: vec![1, 2, 3], - index: validator_index, - proof: Vec::new(), - }; - let candidates = vec![receipt_1_hash, receipt_2_hash]; - - let store = Store::new_in_memory(); - - store.note_validator_index_and_n_validators( - &relay_parent, - validator_index, - n_validators - ).unwrap(); - store.add_candidate(&receipt_1).unwrap(); - store.add_candidate(&receipt_2).unwrap(); - - // We are waiting for chunks from two candidates. - store.note_candidates_with_relay_parent(&relay_parent, &candidates).unwrap(); - - let awaited_frontier = store.awaited_chunks().unwrap(); - warn!(target: "availability", "awaited {:?}", awaited_frontier); - let expected: HashSet<_> = candidates - .clone() - .into_iter() - .map(|c| AwaitedFrontierEntry { - relay_parent, - candidate_hash: c, - validator_index, - }) - .collect(); - assert_eq!(awaited_frontier, expected); - - // We add chunk from one of the candidates. - store.add_erasure_chunks(n_validators, &receipt_1_hash, vec![chunk]).unwrap(); - - let awaited_frontier = store.awaited_chunks().unwrap(); - // Now we wait for the other chunk that we haven't received yet. - let expected: HashSet<_> = vec![AwaitedFrontierEntry { - relay_parent, - candidate_hash: receipt_2_hash, - validator_index, - }].into_iter().collect(); - - assert_eq!(awaited_frontier, expected); - - // Finalizing removes awaited candidates from frontier. - store.candidates_finalized(relay_parent, HashSet::from_iter(candidates.into_iter())).unwrap(); - - assert_eq!(store.awaited_chunks().unwrap().len(), 0); - } -} diff --git a/availability-store/src/worker.rs b/availability-store/src/worker.rs deleted file mode 100644 index 0ff59a9fca38377bd30c1fd405184f4796cce7f1..0000000000000000000000000000000000000000 --- a/availability-store/src/worker.rs +++ /dev/null @@ -1,942 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use std::collections::{HashMap, HashSet}; -use std::io; -use std::sync::Arc; -use std::thread; - -use log::{error, info, trace, warn}; -use sp_blockchain::{Result as ClientResult}; -use sp_runtime::traits::{Header as HeaderT, Block as BlockT, HashFor, BlakeTwo256}; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use client::{ - BlockchainEvents, BlockBackend, - blockchain::ProvideCache, -}; -use consensus_common::{ - self, BlockImport, BlockCheckParams, BlockImportParams, Error as ConsensusError, - ImportResult, - import_queue::CacheKeyId, -}; -use polkadot_primitives::{Block, BlockId, Hash}; -use polkadot_primitives::parachain::{ - ParachainHost, ValidatorId, AbridgedCandidateReceipt, AvailableData, - ValidatorPair, ErasureChunk, -}; -use futures::{prelude::*, future::select, channel::{mpsc, oneshot}, task::{Spawn, SpawnExt}}; -use futures::future::AbortHandle; -use keystore::KeyStorePtr; - -use tokio::runtime::{Handle, Runtime as LocalRuntime}; - -use crate::{LOG_TARGET, ErasureNetworking}; -use crate::store::Store; - -/// Errors that may occur. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub(crate) enum Error { - #[from] - StoreError(io::Error), - #[display(fmt = "Validator's id and number of validators at block with parent {} not found", relay_parent)] - IdAndNValidatorsNotFound { relay_parent: Hash }, -} - -/// Used in testing to interact with the worker thread. -#[cfg(test)] -pub(crate) struct WithWorker(Box); - -#[cfg(test)] -impl std::fmt::Debug for WithWorker { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "") - } -} - -/// Messages sent to the `Worker`. -/// -/// Messages are sent in a number of different scenarios, -/// for instance, when: -/// * importing blocks in `BlockImport` implementation, -/// * recieving finality notifications, -/// * when the `Store` api is used by outside code. -#[derive(Debug)] -pub(crate) enum WorkerMsg { - IncludedParachainBlocks(IncludedParachainBlocks), - Chunks(Chunks), - CandidatesFinalized(CandidatesFinalized), - MakeAvailable(MakeAvailable), - #[cfg(test)] - WithWorker(WithWorker), -} - -/// A notification of a parachain block included in the relay chain. -#[derive(Debug)] -pub(crate) struct IncludedParachainBlock { - /// The abridged candidate receipt, extracted from a relay-chain block. - pub candidate: AbridgedCandidateReceipt, - /// The data to keep available from the candidate, if known. - pub available_data: Option, -} - -/// The receipts of the heads included into the block with a given parent. -#[derive(Debug)] -pub(crate) struct IncludedParachainBlocks { - /// The blocks themselves. - pub blocks: Vec, - /// A sender to signal the result asynchronously. - pub result: oneshot::Sender>, -} - -/// We have received chunks we requested. -#[derive(Debug)] -pub(crate) struct Chunks { - /// The hash of the parachain candidate these chunks belong to. - pub candidate_hash: Hash, - /// The chunks - pub chunks: Vec, - /// The number of validators present at the candidate's relay-parent. - pub n_validators: u32, - /// A sender to signal the result asynchronously. - pub result: oneshot::Sender>, -} - -/// These candidates have been finalized, so unneded availability may be now pruned -#[derive(Debug)] -pub(crate) struct CandidatesFinalized { - /// The relay parent of the block that was finalized. - relay_parent: Hash, - /// The hashes of candidates that were finalized in this block. - included_candidates: HashSet, -} - -/// The message that corresponds to `make_available` call of the crate API. -#[derive(Debug)] -pub(crate) struct MakeAvailable { - /// The hash of the candidate for which we are publishing data. - pub candidate_hash: Hash, - /// The data to make available. - pub available_data: AvailableData, - /// A sender to signal the result asynchronously. - pub result: oneshot::Sender>, -} - -/// Description of a chunk we are listening for. -#[derive(Hash, Debug, PartialEq, Eq)] -struct ListeningKey { - candidate_hash: Hash, - index: u32, -} - -/// An availability worker with it's inner state. -pub(super) struct Worker { - availability_store: Store, - listening_for: HashMap, - - sender: mpsc::UnboundedSender, -} - -/// The handle to the `Worker`. -pub(super) struct WorkerHandle { - thread: Option>>, - sender: mpsc::UnboundedSender, - exit_signal: Option, -} - -impl WorkerHandle { - pub(crate) fn to_worker(&self) -> &mpsc::UnboundedSender { - &self.sender - } -} - -impl Drop for WorkerHandle { - fn drop(&mut self) { - if let Some(signal) = self.exit_signal.take() { - let _ = signal.fire(); - } - - if let Some(thread) = self.thread.take() { - if let Err(_) = thread.join() { - error!(target: LOG_TARGET, "Errored stopping the thread"); - } - } - } -} - - -fn fetch_candidates

(client: &P, extrinsics: Vec<::Extrinsic>, parent: &BlockId) - -> ClientResult>> -where - P: ProvideRuntimeApi, - P::Api: ParachainHost, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, -{ - let api = client.runtime_api(); - - let candidates = if api.has_api_with::, _>( - parent, - |version| version >= 2, - ).map_err(|e| ConsensusError::ChainLookup(e.to_string()))? { - api.get_heads(&parent, extrinsics) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - } else { - None - }; - - Ok(candidates) -} - -/// Creates a task to prune entries in availability store upon block finalization. -async fn prune_unneeded_availability(client: Arc

, mut sender: S) -where - P: ProvideRuntimeApi + BlockchainEvents + BlockBackend + Send + Sync + 'static, - P::Api: ParachainHost + ApiExt, - S: Sink + Clone + Send + Sync + Unpin, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, -{ - let mut finality_notification_stream = client.finality_notification_stream(); - - while let Some(notification) = finality_notification_stream.next().await { - let hash = notification.hash; - let parent_hash = notification.header.parent_hash; - let extrinsics = match client.block_body(&BlockId::hash(hash)) { - Ok(Some(extrinsics)) => extrinsics, - Ok(None) => { - error!( - target: LOG_TARGET, - "No block body found for imported block {:?}", - hash, - ); - continue; - } - Err(e) => { - error!( - target: LOG_TARGET, - "Failed to get block body for imported block {:?}: {:?}", - hash, - e, - ); - continue; - } - }; - - let included_candidates = match fetch_candidates( - &*client, - extrinsics, - &BlockId::hash(parent_hash), - ) { - Ok(Some(candidates)) => candidates - .into_iter() - .map(|c| c.hash()) - .collect(), - Ok(None) => { - warn!( - target: LOG_TARGET, - "Failed to extract candidates from block body of imported block {:?}", hash - ); - continue; - } - Err(e) => { - warn!( - target: LOG_TARGET, - "Failed to fetch block body for imported block {:?}: {:?}", hash, e - ); - continue; - } - }; - - let msg = WorkerMsg::CandidatesFinalized(CandidatesFinalized { - relay_parent: parent_hash, - included_candidates - }); - - if let Err(_) = sender.send(msg).await { - break; - } - } -} - -impl Worker { - - // Called on startup of the worker to initiate fetch from network for all awaited chunks. - fn initiate_all_fetches( - &mut self, - runtime_handle: &Handle, - erasure_network: &EN, - sender: &mut mpsc::UnboundedSender, - ) { - if let Some(awaited_chunks) = self.availability_store.awaited_chunks() { - for awaited_chunk in awaited_chunks { - if let Err(e) = self.initiate_fetch( - runtime_handle, - erasure_network, - sender, - awaited_chunk.relay_parent, - awaited_chunk.candidate_hash, - ) { - warn!(target: LOG_TARGET, "Failed to register network listener: {}", e); - } - } - } - } - - // initiates a fetch from network for the described chunk, with our local index. - fn initiate_fetch( - &mut self, - runtime_handle: &Handle, - erasure_network: &EN, - sender: &mut mpsc::UnboundedSender, - relay_parent: Hash, - candidate_hash: Hash, - ) -> Result<(), Error> { - let (local_id, n_validators) = self.availability_store - .get_validator_index_and_n_validators(&relay_parent) - .ok_or(Error::IdAndNValidatorsNotFound { relay_parent })?; - - // fast exit for if we already have the chunk. - if self.availability_store.get_erasure_chunk(&candidate_hash, local_id as _).is_some() { - return Ok(()) - } - - trace!( - target: LOG_TARGET, - "Initiating fetch for erasure-chunk at parent {} with candidate-hash {}", - relay_parent, - candidate_hash, - ); - - let fut = erasure_network.fetch_erasure_chunk(&candidate_hash, local_id); - let mut sender = sender.clone(); - let (fut, signal) = future::abortable(async move { - let chunk = match fut.await { - Ok(chunk) => chunk, - Err(e) => { - warn!(target: LOG_TARGET, "Unable to fetch erasure-chunk from network: {:?}", e); - return - } - }; - let (s, _) = oneshot::channel(); - let _ = sender.send(WorkerMsg::Chunks(Chunks { - candidate_hash, - chunks: vec![chunk], - n_validators, - result: s, - })).await; - }.map(drop).boxed()); - - - let key = ListeningKey { - candidate_hash, - index: local_id, - }; - - self.listening_for.insert(key, signal); - let _ = runtime_handle.spawn(fut); - - Ok(()) - } - - fn on_parachain_blocks_received( - &mut self, - runtime_handle: &Handle, - erasure_network: &EN, - sender: &mut mpsc::UnboundedSender, - blocks: Vec, - ) -> Result<(), Error> { - // First we have to add the receipts themselves. - for IncludedParachainBlock { candidate, available_data } - in blocks.into_iter() - { - let _ = self.availability_store.add_candidate(&candidate); - - if let Some(_available_data) = available_data { - // Should we be breaking block into chunks here and gossiping it and so on? - } - - // This leans on the codebase-wide assumption that the `relay_parent` - // of all candidates in a block matches the parent hash of that block. - // - // In the future this will not always be true. - let candidate_hash = candidate.hash(); - let _ = self.availability_store.note_candidates_with_relay_parent( - &candidate.relay_parent, - &[candidate_hash], - ); - - if let Err(e) = self.initiate_fetch( - runtime_handle, - erasure_network, - sender, - candidate.relay_parent, - candidate_hash, - ) { - warn!(target: LOG_TARGET, "Failed to register chunk listener: {}", e); - } - } - - Ok(()) - } - - // Handles chunks that were required. - fn on_chunks( - &mut self, - candidate_hash: Hash, - chunks: Vec, - n_validators: u32, - ) -> Result<(), Error> { - for c in &chunks { - let key = ListeningKey { - candidate_hash, - index: c.index, - }; - - // remove bookkeeping so network does not attempt to fetch - // any longer. - if let Some(exit_signal) = self.listening_for.remove(&key) { - exit_signal.abort(); - } - } - - self.availability_store.add_erasure_chunks( - n_validators, - &candidate_hash, - chunks, - )?; - - Ok(()) - } - - /// Starts a worker with a given availability store and a gossip messages provider. - pub fn start( - availability_store: Store, - erasure_network: EN, - ) -> WorkerHandle { - let (sender, mut receiver) = mpsc::unbounded(); - - let mut worker = Worker { - availability_store, - listening_for: HashMap::new(), - sender: sender.clone(), - }; - - let sender = sender.clone(); - let (signal, exit) = exit_future::signal(); - - let handle = thread::spawn(move || -> io::Result<()> { - let mut runtime = LocalRuntime::new()?; - let mut sender = worker.sender.clone(); - - let runtime_handle = runtime.handle().clone(); - - // On startup, initiates fetch from network for all - // entries in the awaited frontier. - worker.initiate_all_fetches(runtime.handle(), &erasure_network, &mut sender); - - let process_notification = async move { - while let Some(msg) = receiver.next().await { - trace!(target: LOG_TARGET, "Received message {:?}", msg); - - let res = match msg { - WorkerMsg::IncludedParachainBlocks(msg) => { - let IncludedParachainBlocks { - blocks, - result, - } = msg; - - let res = worker.on_parachain_blocks_received( - &runtime_handle, - &erasure_network, - &mut sender, - blocks, - ); - - let _ = result.send(res); - Ok(()) - } - WorkerMsg::Chunks(msg) => { - let Chunks { - candidate_hash, - chunks, - n_validators, - result, - } = msg; - - let res = worker.on_chunks( - candidate_hash, - chunks, - n_validators, - ); - - let _ = result.send(res); - Ok(()) - } - WorkerMsg::CandidatesFinalized(msg) => { - let CandidatesFinalized { relay_parent, included_candidates } = msg; - - worker.availability_store.candidates_finalized( - relay_parent, - included_candidates, - ) - } - WorkerMsg::MakeAvailable(msg) => { - let MakeAvailable { candidate_hash, available_data, result } = msg; - let res = worker.availability_store - .make_available(candidate_hash, available_data) - .map_err(|e| e.into()); - let _ = result.send(res); - Ok(()) - } - #[cfg(test)] - WorkerMsg::WithWorker(with_worker) => { - (with_worker.0)(&mut worker); - Ok(()) - } - }; - - if let Err(_) = res { - warn!(target: LOG_TARGET, "An error occured while processing a message"); - } - } - - }; - - runtime.spawn(select(process_notification.boxed(), exit.clone()).map(drop)); - runtime.block_on(exit); - - info!(target: LOG_TARGET, "Availability worker exiting"); - - Ok(()) - }); - - WorkerHandle { - thread: Some(handle), - sender, - exit_signal: Some(signal), - } - } -} - -/// Implementor of the [`BlockImport`] trait. -/// -/// Used to embed `availability-store` logic into the block imporing pipeline. -/// -/// [`BlockImport`]: https://substrate.dev/rustdocs/v1.0/substrate_consensus_common/trait.BlockImport.html -pub struct AvailabilityBlockImport { - inner: I, - client: Arc

, - keystore: KeyStorePtr, - to_worker: mpsc::UnboundedSender, - exit_signal: AbortHandle, -} - -impl Drop for AvailabilityBlockImport { - fn drop(&mut self) { - self.exit_signal.abort(); - } -} - -impl BlockImport for AvailabilityBlockImport where - I: BlockImport> + Send + Sync, - I::Error: Into, - P: ProvideRuntimeApi + ProvideCache, - P::Api: ParachainHost, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend -{ - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - trace!( - target: LOG_TARGET, - "Importing block #{}, ({})", - block.header.number(), - block.post_hash(), - ); - - if let Some(ref extrinsics) = block.body { - let parent_id = BlockId::hash(*block.header.parent_hash()); - // Extract our local position i from the validator set of the parent. - let validators = self.client.runtime_api().validators(&parent_id) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))?; - - let our_id = self.our_id(&validators); - - // Use a runtime API to extract all included erasure-roots from the imported block. - let candidates = fetch_candidates(&*self.client, extrinsics.clone(), &parent_id) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))?; - - match candidates { - Some(candidates) => { - match our_id { - Some(our_id) => { - trace!( - target: LOG_TARGET, - "Our validator id is {}, the candidates included are {:?}", - our_id, - candidates, - ); - - let (s, _) = oneshot::channel(); - - // Inform the worker about the included parachain blocks. - let blocks = candidates - .into_iter() - .map(|c| IncludedParachainBlock { - candidate: c, - available_data: None, - }) - .collect(); - - let msg = WorkerMsg::IncludedParachainBlocks(IncludedParachainBlocks { - blocks, - result: s, - }); - - let _ = self.to_worker.unbounded_send(msg); - } - None => (), - } - } - None => { - trace!( - target: LOG_TARGET, - "No parachain heads were included in block {}", block.header.hash() - ); - }, - } - } - - self.inner.import_block(block, new_cache).map_err(Into::into) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).map_err(Into::into) - } -} - -impl AvailabilityBlockImport { - pub(crate) fn new( - client: Arc

, - block_import: I, - spawner: impl Spawn, - keystore: KeyStorePtr, - to_worker: mpsc::UnboundedSender, - ) -> Self - where - P: ProvideRuntimeApi + BlockBackend + BlockchainEvents + Send + Sync + 'static, - P::Api: ParachainHost, - P::Api: ApiExt, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, - { - // This is not the right place to spawn the finality future, - // it would be more appropriate to spawn it in the `start` method of the `Worker`. - // However, this would make the type of the `Worker` and the `Store` itself - // dependent on the types of client and executor, which would prove - // not not so handy in the testing code. - let (prune_available, exit_signal) = future::abortable(prune_unneeded_availability( - client.clone(), - to_worker.clone(), - )); - - if let Err(_) = spawner.spawn(prune_available.map(drop)) { - error!(target: LOG_TARGET, "Failed to spawn availability pruning task"); - } - - AvailabilityBlockImport { - client, - inner: block_import, - to_worker, - keystore, - exit_signal, - } - } - - fn our_id(&self, validators: &[ValidatorId]) -> Option { - let keystore = self.keystore.read(); - validators - .iter() - .enumerate() - .find_map(|(i, v)| { - keystore.key_pair::(&v).map(|_| i as u32).ok() - }) - } -} - - -#[cfg(test)] -mod tests { - use super::*; - use futures::channel::oneshot; - use std::sync::Arc; - use std::pin::Pin; - use tokio::runtime::Runtime; - use parking_lot::Mutex; - use crate::store::AwaitedFrontierEntry; - - #[derive(Default, Clone)] - struct TestErasureNetwork { - chunk_receivers: Arc - >>>, - } - - impl TestErasureNetwork { - // adds a receiver. this returns a sender for the erasure-chunk - // along with an exit future that fires when the erasure chunk has - // been fully-processed - fn add_receiver(&self, candidate_hash: Hash, index: u32) - -> oneshot::Sender - { - let (sender, receiver) = oneshot::channel(); - self.chunk_receivers.lock().insert((candidate_hash, index), receiver); - sender - } - } - - impl ErasureNetworking for TestErasureNetwork { - type Error = String; - - fn fetch_erasure_chunk(&self, candidate_hash: &Hash, index: u32) - -> Pin> + Send>> - { - match self.chunk_receivers.lock().remove(&(*candidate_hash, index)) { - Some(receiver) => receiver.then(|x| match x { - Ok(x) => future::ready(Ok(x)).left_future(), - Err(_) => future::pending().right_future(), - }).boxed(), - None => future::pending().boxed(), - } - } - - fn distribute_erasure_chunk( - &self, - _candidate_hash: Hash, - _chunk: ErasureChunk - ) {} - } - - // This test tests that as soon as the worker receives info about new parachain blocks - // included it registers gossip listeners for it's own chunks. Upon receiving the awaited - // chunk messages the corresponding listeners are deregistered and these chunks are removed - // from the awaited chunks set. - #[test] - fn receiving_gossip_chunk_removes_from_frontier() { - let mut runtime = Runtime::new().unwrap(); - let relay_parent = [1; 32].into(); - let local_id = 2; - let n_validators = 4; - - let store = Store::new_in_memory(); - - let mut candidate = AbridgedCandidateReceipt::default(); - - candidate.relay_parent = relay_parent; - let candidate_hash = candidate.hash(); - - // Tell the store our validator's position and the number of validators at given point. - store.note_validator_index_and_n_validators(&relay_parent, local_id, n_validators).unwrap(); - - let network = TestErasureNetwork::default(); - let chunk_sender = network.add_receiver(candidate_hash, local_id); - - // At this point we shouldn't be waiting for any chunks. - assert!(store.awaited_chunks().is_none()); - - let (s, r) = oneshot::channel(); - - let msg = WorkerMsg::IncludedParachainBlocks(IncludedParachainBlocks { - blocks: vec![IncludedParachainBlock { - candidate, - available_data: None, - }], - result: s, - }); - - let handle = Worker::start(store.clone(), network); - - // Tell the worker that the new blocks have been included into the relay chain. - // This should trigger the registration of gossip message listeners for the - // chunk topics. - handle.sender.unbounded_send(msg).unwrap(); - - runtime.block_on(r).unwrap().unwrap(); - - // Make sure that at this point we are waiting for the appropriate chunk. - assert_eq!( - store.awaited_chunks().unwrap(), - vec![AwaitedFrontierEntry { - relay_parent, - candidate_hash, - validator_index: local_id, - }].into_iter().collect() - ); - - // Complete the chunk request. - chunk_sender.send(ErasureChunk { - chunk: vec![1, 2, 3], - index: local_id as u32, - proof: vec![], - }).unwrap(); - - // wait until worker thread has de-registered the listener for a - // particular chunk. - loop { - let (s, r) = oneshot::channel(); - handle.sender.unbounded_send(WorkerMsg::WithWorker(WithWorker(Box::new(move |worker| { - let key = ListeningKey { - candidate_hash, - index: local_id, - }; - - let is_waiting = worker.listening_for.contains_key(&key); - - s.send(!is_waiting).unwrap(); // tell the test thread `true` if we are not waiting. - })))).unwrap(); - - if runtime.block_on(r).unwrap() { - break - } - } - - // The awaited chunk has been received so at this point we no longer wait for any chunks. - assert_eq!(store.awaited_chunks().unwrap().len(), 0); - } - - #[test] - fn included_parachain_blocks_registers_listener() { - let mut runtime = Runtime::new().unwrap(); - let relay_parent = [1; 32].into(); - let erasure_root_1 = [2; 32].into(); - let erasure_root_2 = [3; 32].into(); - let pov_block_hash_1 = [4; 32].into(); - let pov_block_hash_2 = [5; 32].into(); - let local_id = 2; - let n_validators = 4; - - let mut candidate_1 = AbridgedCandidateReceipt::default(); - candidate_1.commitments.erasure_root = erasure_root_1; - candidate_1.pov_block_hash = pov_block_hash_1; - candidate_1.relay_parent = relay_parent; - let candidate_1_hash = candidate_1.hash(); - - let mut candidate_2 = AbridgedCandidateReceipt::default(); - candidate_2.commitments.erasure_root = erasure_root_2; - candidate_2.pov_block_hash = pov_block_hash_2; - candidate_2.relay_parent = relay_parent; - let candidate_2_hash = candidate_2.hash(); - - let store = Store::new_in_memory(); - - // Tell the store our validator's position and the number of validators at given point. - store.note_validator_index_and_n_validators(&relay_parent, local_id, n_validators).unwrap(); - - // Let the store know about the candidates - store.add_candidate(&candidate_1).unwrap(); - store.add_candidate(&candidate_2).unwrap(); - - // And let the store know about the chunk from the second candidate. - store.add_erasure_chunks( - n_validators, - &candidate_2_hash, - vec![ErasureChunk { - chunk: vec![1, 2, 3], - index: local_id, - proof: Vec::default(), - }], - ).unwrap(); - - let network = TestErasureNetwork::default(); - let _ = network.add_receiver(candidate_1_hash, local_id); - let _ = network.add_receiver(candidate_2_hash, local_id); - - let handle = Worker::start(store.clone(), network.clone()); - - { - let (s, r) = oneshot::channel(); - // Tell the worker to listen for chunks from candidate 2 (we alredy have a chunk from it). - let listen_msg_2 = WorkerMsg::IncludedParachainBlocks(IncludedParachainBlocks { - blocks: vec![IncludedParachainBlock { - candidate: candidate_2, - available_data: None, - }], - result: s, - }); - - handle.sender.unbounded_send(listen_msg_2).unwrap(); - - runtime.block_on(r).unwrap().unwrap(); - // The receiver for this chunk left intact => listener not registered. - assert!(network.chunk_receivers.lock().contains_key(&(candidate_2_hash, local_id))); - - // more directly: - let (s, r) = oneshot::channel(); - handle.sender.unbounded_send(WorkerMsg::WithWorker(WithWorker(Box::new(move |worker| { - let key = ListeningKey { - candidate_hash: candidate_2_hash, - index: local_id, - }; - let _ = s.send(worker.listening_for.contains_key(&key)); - })))).unwrap(); - - assert!(!runtime.block_on(r).unwrap()); - } - - { - let (s, r) = oneshot::channel(); - - // Tell the worker to listen for chunks from candidate 1. - // (we don't have a chunk from it yet). - let listen_msg_1 = WorkerMsg::IncludedParachainBlocks(IncludedParachainBlocks { - blocks: vec![IncludedParachainBlock { - candidate: candidate_1, - available_data: None, - }], - result: s, - }); - - handle.sender.unbounded_send(listen_msg_1).unwrap(); - runtime.block_on(r).unwrap().unwrap(); - - // The receiver taken => listener registered. - assert!(!network.chunk_receivers.lock().contains_key(&(candidate_1_hash, local_id))); - - - // more directly: - let (s, r) = oneshot::channel(); - handle.sender.unbounded_send(WorkerMsg::WithWorker(WithWorker(Box::new(move |worker| { - let key = ListeningKey { - candidate_hash: candidate_1_hash, - index: local_id, - }; - let _ = s.send(worker.listening_for.contains_key(&key)); - })))).unwrap(); - - assert!(runtime.block_on(r).unwrap()); - } - } -} diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ff67b236c547a586fe52b7861d1e6dd5bb94b52b..99eb04e9dd6377a9286bf3cf918d796ad9ea36e1 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-cli" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] description = "Polkadot Relay-chain Client Node" edition = "2018" @@ -34,12 +34,15 @@ sc-service = { git = "https://github.com/paritytech/substrate", branch = "master wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } browser-utils = { package = "substrate-browser-utils", git = "https://github.com/paritytech/substrate", branch = "master", optional = true } +# this crate is used only to enable `trie-memory-tracker` feature +# see https://github.com/paritytech/substrate/pull/6745 +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] -default = [ "wasmtime", "db", "cli", "service-old" ] +default = [ "wasmtime", "db", "cli", "service-old", "trie-memory-tracker" ] wasmtime = [ "sc-cli/wasmtime" ] db = [ "service/db" ] cli = [ @@ -57,3 +60,4 @@ browser = [ ] runtime-benchmarks = [ "service/runtime-benchmarks" ] service-rewr = [ "service-new/full-node" ] +trie-memory-tracker = [ "sp-trie/memory-tracker" ] diff --git a/cli/src/browser.rs b/cli/src/browser.rs index 6f3a4000843ae1c2394681f32d8789469bf6cba2..d3523e92a60005ff305963cf7bff516e1a67341a 100644 --- a/cli/src/browser.rs +++ b/cli/src/browser.rs @@ -46,8 +46,7 @@ async fn start_inner(chain_spec: String, log_level: String) -> Result, - #[allow(missing_docs)] #[structopt(flatten)] pub run: RunCmd, } diff --git a/cli/src/command.rs b/cli/src/command.rs index 714e3c2ddacd14f0ab33f20cec42b52459cac8d8..964e13e6d3fd8a6e722cfc5ef342f635c30b5d18 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -19,8 +19,7 @@ use log::info; use service::{IdentifyVariant, self}; #[cfg(feature = "service-rewr")] use service_new::{IdentifyVariant, self as service}; -use sc_executor::NativeExecutionDispatch; -use sc_cli::{SubstrateCli, Result}; +use sc_cli::{SubstrateCli, Result, RuntimeVersion, Role}; use crate::cli::{Cli, Subcommand}; fn get_exec_name() -> Option { @@ -31,19 +30,19 @@ fn get_exec_name() -> Option { } impl SubstrateCli for Cli { - fn impl_name() -> &'static str { "Parity Polkadot" } + fn impl_name() -> String { "Parity Polkadot".into() } - fn impl_version() -> &'static str { env!("SUBSTRATE_CLI_IMPL_VERSION") } + fn impl_version() -> String { env!("SUBSTRATE_CLI_IMPL_VERSION").into() } - fn description() -> &'static str { env!("CARGO_PKG_DESCRIPTION") } + fn description() -> String { env!("CARGO_PKG_DESCRIPTION").into() } - fn author() -> &'static str { env!("CARGO_PKG_AUTHORS") } + fn author() -> String { env!("CARGO_PKG_AUTHORS").into() } - fn support_url() -> &'static str { "https://github.com/paritytech/polkadot/issues/new" } + fn support_url() -> String { "https://github.com/paritytech/polkadot/issues/new".into() } fn copyright_start_year() -> i32 { 2017 } - fn executable_name() -> &'static str { "polkadot" } + fn executable_name() -> String { "polkadot".into() } fn load_spec(&self, id: &str) -> std::result::Result, String> { let id = if id == "" { @@ -54,27 +53,47 @@ impl SubstrateCli for Cli { .unwrap_or("polkadot") } else { id }; Ok(match id { - "polkadot-dev" | "dev" => Box::new(service::chain_spec::polkadot_development_config()), - "polkadot-local" => Box::new(service::chain_spec::polkadot_local_testnet_config()), - "polkadot-staging" => Box::new(service::chain_spec::polkadot_staging_testnet_config()), - "kusama-dev" => Box::new(service::chain_spec::kusama_development_config()), - "kusama-local" => Box::new(service::chain_spec::kusama_local_testnet_config()), - "kusama-staging" => Box::new(service::chain_spec::kusama_staging_testnet_config()), + "polkadot-dev" | "dev" => Box::new(service::chain_spec::polkadot_development_config()?), + "polkadot-local" => Box::new(service::chain_spec::polkadot_local_testnet_config()?), + "polkadot-staging" => Box::new(service::chain_spec::polkadot_staging_testnet_config()?), + "kusama-dev" => Box::new(service::chain_spec::kusama_development_config()?), + "kusama-local" => Box::new(service::chain_spec::kusama_local_testnet_config()?), + "kusama-staging" => Box::new(service::chain_spec::kusama_staging_testnet_config()?), "polkadot" => Box::new(service::chain_spec::polkadot_config()?), "westend" => Box::new(service::chain_spec::westend_config()?), "kusama" => Box::new(service::chain_spec::kusama_config()?), - "westend-dev" => Box::new(service::chain_spec::westend_development_config()), - "westend-local" => Box::new(service::chain_spec::westend_local_testnet_config()), - "westend-staging" => Box::new(service::chain_spec::westend_staging_testnet_config()), - path if self.run.force_kusama => { - Box::new(service::KusamaChainSpec::from_json_file(std::path::PathBuf::from(path))?) + "westend-dev" => Box::new(service::chain_spec::westend_development_config()?), + "westend-local" => Box::new(service::chain_spec::westend_local_testnet_config()?), + "westend-staging" => Box::new(service::chain_spec::westend_staging_testnet_config()?), + path => { + let path = std::path::PathBuf::from(path); + + let starts_with = |prefix: &str| { + path.file_name().map(|f| f.to_str().map(|s| s.starts_with(&prefix))).flatten().unwrap_or(false) + }; + + // When `force_*` is given or the file name starts with the name of one of the known chains, + // we use the chain spec for the specific chain. + if self.run.force_kusama || starts_with("kusama") { + Box::new(service::KusamaChainSpec::from_json_file(path)?) + } else if self.run.force_westend || starts_with("westend") { + Box::new(service::WestendChainSpec::from_json_file(path)?) + } else { + Box::new(service::PolkadotChainSpec::from_json_file(path)?) + } }, - path if self.run.force_westend => { - Box::new(service::WestendChainSpec::from_json_file(std::path::PathBuf::from(path))?) - }, - path => Box::new(service::PolkadotChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } + + fn native_runtime_version(spec: &Box) -> &'static RuntimeVersion { + if spec.is_kusama() { + &service::kusama_runtime::VERSION + } else if spec.is_westend() { + &service::westend_runtime::VERSION + } else { + &service::polkadot_runtime::VERSION + } + } } /// Parses polkadot specific CLI arguments and run the service. @@ -97,8 +116,8 @@ pub fn run() -> Result<()> { match &cli.subcommand { None => { - let runtime = cli.create_runner(&cli.run.base)?; - let chain_spec = &runtime.config().chain_spec; + let runner = cli.create_runner(&cli.run.base)?; + let chain_spec = &runner.config().chain_spec; set_default_ss58_version(chain_spec); @@ -115,87 +134,47 @@ pub fn run() -> Result<()> { info!(" endorsed by the "); info!(" KUSAMA FOUNDATION "); info!("----------------------------"); - - runtime.run_node( - |config| { - service::kusama_new_light(config) - }, - |config| { - service::kusama_new_full( - config, - None, - None, - authority_discovery_enabled, - 6000, - grandpa_pause, - ).map(|(s, _, _)| s) - }, - service::KusamaExecutor::native_version().runtime_version - ) - } else if chain_spec.is_westend() { - runtime.run_node( - |config| { - service::westend_new_light(config) - }, - |config| { - service::westend_new_full( - config, - None, - None, - authority_discovery_enabled, - 6000, - grandpa_pause, - ).map(|(s, _, _)| s) - }, - service::WestendExecutor::native_version().runtime_version - ) - } else { - runtime.run_node( - |config| { - service::polkadot_new_light(config) - }, - |config| { - service::polkadot_new_full( - config, - None, - None, - authority_discovery_enabled, - 6000, - grandpa_pause, - ).map(|(s, _, _)| s) - }, - service::PolkadotExecutor::native_version().runtime_version - ) } + + runner.run_node_until_exit(|config| { + let role = config.role.clone(); + + match role { + Role::Light => service::build_light(config).map(|(task_manager, _)| task_manager), + _ => service::build_full( + config, + None, + authority_discovery_enabled, + grandpa_pause, + ).map(|r| r.0), + } + }) }, Some(Subcommand::Base(subcommand)) => { - let runtime = cli.create_runner(subcommand)?; - let chain_spec = &runtime.config().chain_spec; + let runner = cli.create_runner(subcommand)?; + let chain_spec = &runner.config().chain_spec; set_default_ss58_version(chain_spec); if chain_spec.is_kusama() { - runtime.run_subcommand(subcommand, |config| + runner.run_subcommand(subcommand, |config| service::new_chain_ops::< service::kusama_runtime::RuntimeApi, service::KusamaExecutor, - service::kusama_runtime::UncheckedExtrinsic, >(config) ) } else if chain_spec.is_westend() { - runtime.run_subcommand(subcommand, |config| + runner.run_subcommand(subcommand, |config| service::new_chain_ops::< service::westend_runtime::RuntimeApi, service::WestendExecutor, - service::westend_runtime::UncheckedExtrinsic, >(config) ) } else { - runtime.run_subcommand(subcommand, |config| + runner.run_subcommand(subcommand, |config| service::new_chain_ops::< service::polkadot_runtime::RuntimeApi, service::PolkadotExecutor, - service::polkadot_runtime::UncheckedExtrinsic, >(config) ) } @@ -212,21 +191,21 @@ pub fn run() -> Result<()> { } }, Some(Subcommand::Benchmark(cmd)) => { - let runtime = cli.create_runner(cmd)?; - let chain_spec = &runtime.config().chain_spec; + let runner = cli.create_runner(cmd)?; + let chain_spec = &runner.config().chain_spec; set_default_ss58_version(chain_spec); if chain_spec.is_kusama() { - runtime.sync_run(|config| { + runner.sync_run(|config| { cmd.run::(config) }) } else if chain_spec.is_westend() { - runtime.sync_run(|config| { + runner.sync_run(|config| { cmd.run::(config) }) } else { - runtime.sync_run(|config| { + runner.sync_run(|config| { cmd.run::(config) }) } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index be2f3c6cd646444761e813d005ace4282ef52e19..385a24d364c8582065827054dcd6d52b88da26d8 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -28,14 +28,14 @@ mod command; #[cfg(not(feature = "service-rewr"))] pub use service::{ - AbstractService, ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant, + ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant, Block, self, RuntimeApiCollection, TFullClient }; #[cfg(feature = "service-rewr")] pub use service_new::{ self as service, - AbstractService, ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant, + ProvideRuntimeApi, CoreApi, ParachainHost, IdentifyVariant, Block, self, RuntimeApiCollection, TFullClient }; diff --git a/collator/Cargo.toml b/collator/Cargo.toml deleted file mode 100644 index 2c47f20dbb8f96a2fa1fb50f2b0331011633f225..0000000000000000000000000000000000000000 --- a/collator/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "polkadot-collator" -version = "0.8.12" -authors = ["Parity Technologies "] -description = "Collator node implementation" -edition = "2018" - -[dependencies] -futures = "0.3.4" -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } -polkadot-primitives = { path = "../primitives" } -polkadot-cli = { path = "../cli" } -polkadot-network = { path = "../network" } -polkadot-validation = { path = "../validation" } -polkadot-service = { path = "../service", optional = true} -polkadot-service-new = { path = "../node/service", optional = true } -log = "0.4.8" -tokio = "0.2.13" -futures-timer = "2.0" -codec = { package = "parity-scale-codec", version = "1.3.0" } - -[dev-dependencies] -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["service-old"] -service-old = [ "polkadot-service" ] -service-rewr = [ "polkadot-service-new" ] diff --git a/collator/README.adoc b/collator/README.adoc deleted file mode 100644 index d302cd2af0fe4941a6e24c136bb0df9b2be07f35..0000000000000000000000000000000000000000 --- a/collator/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Collator - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/collator/src/lib.rs b/collator/src/lib.rs deleted file mode 100644 index 063bdb1735c8acfb5037192dd0b98453eea077e2..0000000000000000000000000000000000000000 --- a/collator/src/lib.rs +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Collation node logic. -//! -//! A collator node lives on a distinct parachain and submits a proposal for -//! a state transition, along with a proof for its validity -//! (what we might call a witness or block data). -//! -//! One of collators' other roles is to route messages between chains. -//! Each parachain produces a list of "egress" posts of messages for each other -//! parachain on each block, for a total of N^2 lists all together. -//! -//! We will refer to the egress list at relay chain block X of parachain A with -//! destination B as egress(X)[A -> B] -//! -//! On every block, each parachain will be intended to route messages from some -//! subset of all the other parachains. (NOTE: in practice this is not done until PoC-3) -//! -//! Since the egress information is unique to every block, when routing from a -//! parachain a collator must gather all egress posts from that parachain -//! up to the last point in history that messages were successfully routed -//! from that parachain, accounting for relay chain blocks where no candidate -//! from the collator's parachain was produced. -//! -//! In the case that all parachains route to each other and a candidate for the -//! collator's parachain was included in the last relay chain block, the collator -//! only has to gather egress posts from other parachains one block back in relay -//! chain history. -//! -//! This crate defines traits which provide context necessary for collation logic -//! to be performed, as the collation logic itself. - -use std::collections::HashSet; -use std::fmt; -use std::sync::Arc; -use std::time::Duration; -use std::pin::Pin; - -use futures::{future, Future, Stream, FutureExt, StreamExt, task::Spawn}; -use log::warn; -use sc_client_api::{StateBackend, BlockchainEvents}; -use sp_blockchain::HeaderBackend; -use sp_core::Pair; -use polkadot_primitives::{ - BlockId, Hash, Block, - parachain::{ - self, BlockData, DutyRoster, HeadData, Id as ParaId, - PoVBlock, ValidatorId, CollatorPair, LocalValidationData, GlobalValidationSchedule, - } -}; -use polkadot_cli::{ - ProvideRuntimeApi, AbstractService, ParachainHost, IdentifyVariant, - service::{self, Role} -}; -pub use polkadot_cli::service::Configuration; -pub use polkadot_cli::Cli; -pub use polkadot_validation::SignedStatement; -pub use polkadot_primitives::parachain::CollatorId; -pub use sc_network::PeerId; -pub use service::RuntimeApiCollection; -pub use sc_cli::SubstrateCli; -use sp_api::{ConstructRuntimeApi, ApiExt, HashFor}; -#[cfg(not(feature = "service-rewr"))] -use polkadot_service::{FullNodeHandles, PolkadotClient}; -#[cfg(feature = "service-rewr")] -use polkadot_service_new::{ - self as polkadot_service, - Error as ServiceError, FullNodeHandles, PolkadotClient, -}; - -const COLLATION_TIMEOUT: Duration = Duration::from_secs(30); - -/// An abstraction over the `Network` with useful functions for a `Collator`. -pub trait Network: Send + Sync { - /// Create a `Stream` of checked statements for the given `relay_parent`. - /// - /// The returned stream will not terminate, so it is required to make sure that the stream is - /// dropped when it is not required anymore. Otherwise, it will stick around in memory - /// infinitely. - fn checked_statements(&self, relay_parent: Hash) -> Pin + Send>>; -} - -impl Network for polkadot_network::protocol::Service { - fn checked_statements(&self, relay_parent: Hash) -> Pin + Send>> { - polkadot_network::protocol::Service::checked_statements(self, relay_parent).boxed() - } -} - -/// Collation errors. -#[derive(Debug)] -pub enum Error { - /// Error on the relay-chain side of things. - Polkadot(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Polkadot(ref err) => write!(f, "Polkadot node error: {}", err), - } - } -} - -/// Something that can build a `ParachainContext`. -pub trait BuildParachainContext { - /// The parachain context produced by the `build` function. - type ParachainContext: self::ParachainContext; - - /// Build the `ParachainContext`. - fn build( - self, - client: Arc, - spawner: SP, - network: impl Network + Clone + 'static, - ) -> Result - where - Client: ProvideRuntimeApi + HeaderBackend + BlockchainEvents + Send + Sync + 'static, - Client::Api: RuntimeApiCollection, - >::StateBackend: StateBackend>, - Extrinsic: codec::Codec + Send + Sync + 'static, - SP: Spawn + Clone + Send + Sync + 'static; -} - -/// Parachain context needed for collation. -/// -/// This can be implemented through an externally attached service or a stub. -/// This is expected to be a lightweight, shared type like an Arc. -pub trait ParachainContext: Clone { - type ProduceCandidate: Future>; - - /// Produce a candidate, given the relay parent hash, the latest ingress queue information - /// and the last parachain head. - fn produce_candidate( - &mut self, - relay_parent: Hash, - global_validation: GlobalValidationSchedule, - local_validation: LocalValidationData, - ) -> Self::ProduceCandidate; -} - -/// Produce a candidate for the parachain, with given contexts, parent head, and signing key. -pub async fn collate

( - relay_parent: Hash, - local_id: ParaId, - global_validation: GlobalValidationSchedule, - local_validation_data: LocalValidationData, - mut para_context: P, - key: Arc, -) -> Option - where - P: ParachainContext, - P::ProduceCandidate: Send, -{ - let (block_data, head_data) = para_context.produce_candidate( - relay_parent, - global_validation, - local_validation_data, - ).await?; - - let pov_block = PoVBlock { - block_data, - }; - - let pov_block_hash = pov_block.hash(); - let signature = key.sign(¶chain::collator_signature_payload( - &relay_parent, - &local_id, - &pov_block_hash, - )); - - let info = parachain::CollationInfo { - parachain_index: local_id, - relay_parent, - collator: key.public(), - signature, - head_data, - pov_block_hash, - }; - - let collation = parachain::Collation { - info, - pov: pov_block, - }; - - Some(collation) -} - -#[cfg(feature = "service-rewr")] -fn build_collator_service( - _spawner: SP, - _handles: FullNodeHandles, - _client: Arc, - _para_id: ParaId, - _key: Arc, - _build_parachain_context: P, -) -> Result, polkadot_service::Error> - where - C: PolkadotClient< - service::Block, - service::TFullBackend, - R - > + 'static, - R: ConstructRuntimeApi + Sync + Send, - >::RuntimeApi: - sp_api::ApiExt< - service::Block, - StateBackend = as service::Backend>::State, - > - + RuntimeApiCollection< - Extrinsic, - StateBackend = as service::Backend>::State, - > - + Sync + Send, - P: BuildParachainContext, - P::ParachainContext: Send + 'static, - ::ProduceCandidate: Send, - Extrinsic: service::Codec + Send + Sync + 'static, - SP: Spawn + Clone + Send + Sync + 'static, -{ - Err("Collator is not functional with the new service yet".into()) -} - - -#[cfg(not(feature = "service-rewr"))] -fn build_collator_service( - spawner: SP, - handles: FullNodeHandles, - client: Arc, - para_id: ParaId, - key: Arc, - build_parachain_context: P, -) -> Result + Send + 'static, polkadot_service::Error> - where - C: PolkadotClient< - service::Block, - service::TFullBackend, - R - > + 'static, - R: ConstructRuntimeApi + Sync + Send, - >::RuntimeApi: - sp_api::ApiExt< - service::Block, - StateBackend = as service::Backend>::State, - > - + RuntimeApiCollection< - Extrinsic, - StateBackend = as service::Backend>::State, - > - + Sync + Send, - P: BuildParachainContext, - P::ParachainContext: Send + 'static, - ::ProduceCandidate: Send, - Extrinsic: service::Codec + Send + Sync + 'static, - SP: Spawn + Clone + Send + Sync + 'static, -{ - let polkadot_network = handles.polkadot_network - .ok_or_else(|| "Collator cannot run when Polkadot-specific networking has not been started")?; - - // We don't require this here, but we need to make sure that the validation service is started. - // This service makes sure the collator is joining the correct gossip topics and receives the appropiate - // messages. - handles.validation_service_handle - .ok_or_else(|| "Collator cannot run when validation networking has not been started")?; - - let parachain_context = match build_parachain_context.build( - client.clone(), - spawner, - polkadot_network.clone(), - ) { - Ok(ctx) => ctx, - Err(()) => { - return Err("Could not build the parachain context!".into()) - } - }; - - let work = async move { - let mut notification_stream = client.import_notification_stream(); - - while let Some(notification) = notification_stream.next().await { - macro_rules! try_fr { - ($e:expr) => { - match $e { - Ok(x) => x, - Err(e) => return future::Either::Left(future::err(Error::Polkadot( - format!("{:?}", e) - ))), - } - } - } - - let relay_parent = notification.hash; - let id = BlockId::hash(relay_parent); - - let network = polkadot_network.clone(); - let client = client.clone(); - let key = key.clone(); - let parachain_context = parachain_context.clone(); - - let work = future::lazy(move |_| { - let api = client.runtime_api(); - let global_validation = try_fr!(api.global_validation_schedule(&id)); - let local_validation = match try_fr!(api.local_validation_data(&id, para_id)) { - Some(local_validation) => local_validation, - None => return future::Either::Left(future::ok(())), - }; - - let validators = try_fr!(api.validators(&id)); - - let targets = compute_targets( - para_id, - validators.as_slice(), - try_fr!(api.duty_roster(&id)), - ); - - let collation_work = collate( - relay_parent, - para_id, - global_validation, - local_validation, - parachain_context, - key, - ).map(move |collation| { - match collation { - Some(collation) => network.distribute_collation(targets, collation), - None => log::trace!("Skipping collation as `collate` returned `None`"), - } - - Ok(()) - }); - - future::Either::Right(collation_work) - }); - - let deadlined = future::select( - work.then(|f| f).boxed(), - futures_timer::Delay::new(COLLATION_TIMEOUT) - ); - - let silenced = deadlined - .map(|either| { - if let future::Either::Right(_) = either { - warn!("Collation failure: timeout"); - } - }); - - let future = silenced.map(drop); - - tokio::spawn(future); - } - }.boxed(); - - Ok(work) -} - -/// Async function that will run the collator node with the given `RelayChainContext` and `ParachainContext` -/// built by the given `BuildParachainContext` and arguments to the underlying polkadot node. -pub async fn start_collator

( - build_parachain_context: P, - para_id: ParaId, - key: Arc, - config: Configuration, -) -> Result<(), polkadot_service::Error> -where - P: 'static + BuildParachainContext, - P::ParachainContext: Send + 'static, - ::ProduceCandidate: Send, -{ - if matches!(config.role, Role::Light) { - return Err( - polkadot_service::Error::Other("light nodes are unsupported as collator".into()) - .into()); - } - - if config.chain_spec.is_kusama() { - let (service, client, handlers) = service::kusama_new_full( - config, - Some((key.public(), para_id)), - None, - false, - 6000, - None, - )?; - let spawn_handle = service.spawn_task_handle(); - build_collator_service( - spawn_handle, - handlers, - client, - para_id, - key, - build_parachain_context - )?.await; - } else if config.chain_spec.is_westend() { - let (service, client, handlers) = service::westend_new_full( - config, - Some((key.public(), para_id)), - None, - false, - 6000, - None, - )?; - let spawn_handle = service.spawn_task_handle(); - build_collator_service( - spawn_handle, - handlers, - client, - para_id, - key, - build_parachain_context - )?.await; - } else { - let (service, client, handles) = service::polkadot_new_full( - config, - Some((key.public(), para_id)), - None, - false, - 6000, - None, - )?; - let spawn_handle = service.spawn_task_handle(); - build_collator_service( - spawn_handle, - handles, - client, - para_id, - key, - build_parachain_context, - )?.await; - } - - Ok(()) -} - -#[cfg(not(feature = "service-rewr"))] -fn compute_targets(para_id: ParaId, session_keys: &[ValidatorId], roster: DutyRoster) -> HashSet { - use polkadot_primitives::parachain::Chain; - - roster.validator_duty.iter().enumerate() - .filter(|&(_, c)| c == &Chain::Parachain(para_id)) - .filter_map(|(i, _)| session_keys.get(i)) - .cloned() - .collect() -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Clone)] - struct DummyParachainContext; - - impl ParachainContext for DummyParachainContext { - type ProduceCandidate = future::Ready>; - - fn produce_candidate( - &mut self, - _relay_parent: Hash, - _global: GlobalValidationSchedule, - _local_validation: LocalValidationData, - ) -> Self::ProduceCandidate { - // send messages right back. - future::ready(Some(( - BlockData(vec![1, 2, 3, 4, 5,]), - HeadData(vec![9, 9, 9]), - ))) - } - } - - struct BuildDummyParachainContext; - - impl BuildParachainContext for BuildDummyParachainContext { - type ParachainContext = DummyParachainContext; - - fn build( - self, - _: Arc, - _: SP, - _: impl Network + Clone + 'static, - ) -> Result { - Ok(DummyParachainContext) - } - } - - // Make sure that the future returned by `start_collator` implements `Send`. - #[test] - fn start_collator_is_send() { - fn check_send(_: T) {} - - let cli = Cli::from_iter(&["-dev"]); - let task_executor = |_, _| unimplemented!(); - let config = cli.create_configuration(&cli.run.base, task_executor.into()).unwrap(); - - check_send(start_collator( - BuildDummyParachainContext, - 0.into(), - Arc::new(CollatorPair::generate().0), - config, - )); - } -} diff --git a/core-primitives/Cargo.toml b/core-primitives/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cd4dac0f187d1cf4ced8ba938b48521fc3bf2669 --- /dev/null +++ b/core-primitives/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "polkadot-core-primitives" +version = "0.7.30" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = [ "derive" ] } + +[features] +default = [ "std" ] +std = [ + "sp-core/std", + "sp-runtime/std", + "sp-std/std", + "codec/std", +] diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ffb346467d9e53c0c64ebca4438a9260492fdd8e --- /dev/null +++ b/core-primitives/src/lib.rs @@ -0,0 +1,101 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Core Polkadot types. +//! +//! These core Polkadot types are used by the relay chain and the Parachains. + +use sp_runtime::{generic, MultiSignature, traits::{Verify, BlakeTwo256, IdentifyAccount}}; + +/// The block number type used by Polkadot. +/// 32-bits will allow for 136 years of blocks assuming 1 block per second. +pub type BlockNumber = u32; + +/// An instant or duration in time. +pub type Moment = u64; + +/// Alias to type for a signature for a transaction on the relay chain. This allows one of several +/// kinds of underlying crypto to be used, so isn't a fixed size when encoded. +pub type Signature = MultiSignature; + +/// Alias to the public key used for this chain, actually a `MultiSigner`. Like the signature, this +/// also isn't a fixed size when encoded, as different cryptos have different size public keys. +pub type AccountPublic = ::Signer; + +/// Alias to the opaque account ID type for this chain, actually a `AccountId32`. This is always +/// 32 bytes. +pub type AccountId = ::AccountId; + +/// The type for looking up accounts. We don't expect more than 4 billion of them. +pub type AccountIndex = u32; + +/// Identifier for a chain. 32-bit should be plenty. +pub type ChainId = u32; + +/// A hash of some data used by the relay chain. +pub type Hash = sp_core::H256; + +/// Index of a transaction in the relay chain. 32-bit should be plenty. +pub type Nonce = u32; + +/// The balance of an account. +/// 128-bits (or 38 significant decimal figures) will allow for 10m currency (10^7) at a resolution +/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (10^11 unit +/// denomination), or 10^18 total atomic units, to grow at 50%/year for 51 years (10^9 multiplier) +/// for an eventual total of 10^27 units (27 significant decimal figures). +/// We round denomination to 10^12 (12 sdf), and leave the other redundancy at the upper end so +/// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. +pub type Balance = u128; + +/// Header type. +pub type Header = generic::Header; +/// Block type. +pub type Block = generic::Block; +/// Block ID. +pub type BlockId = generic::BlockId; + +/// Opaque, encoded, unchecked extrinsic. +pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + +/// The information that goes alongside a transfer_into_parachain operation. Entirely opaque, it +/// will generally be used for identifying the reason for the transfer. Typically it will hold the +/// destination account to which the transfer should be credited. If still more information is +/// needed, then this should be a hash with the pre-image presented via an off-chain mechanism on +/// the parachain. +pub type Remark = [u8; 32]; + +/// These are special "control" messages that can be passed from the Relaychain to a parachain. +/// They should be handled by all parachains. +#[derive(codec::Encode, codec::Decode, Clone, sp_runtime::RuntimeDebug, PartialEq)] +pub enum DownwardMessage { + /// Some funds were transferred into the parachain's account. The hash is the identifier that + /// was given with the transfer. + TransferInto(AccountId, Balance, Remark), + /// An opaque blob of data. The relay chain must somehow know how to form this so that the + /// destination parachain does something sensible. + /// + /// NOTE: Be very careful not to allow users to place arbitrary size information in here. + Opaque(sp_std::vec::Vec), + /// XCMP message for the Parachain. + XCMPMessage(sp_std::vec::Vec), +} + +/// V1 primitives. +pub mod v1 { + pub use super::*; +} diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index 6368a1787b11d71ce77dfb34e2358c388c03402f..1965150ca67abef411509179466d50f7ffb964d1 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-erasure-coding" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] primitives = { package = "polkadot-primitives", path = "../primitives" } reed_solomon = { package = "reed-solomon-erasure", version = "4.0.2"} -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" } derive_more = "0.15.0" diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 98a2776d8848a543b31e170e9cb3f56ff4906929..708a167d627675abaf3ec13befed01400bce219c 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -26,8 +26,8 @@ use codec::{Encode, Decode}; use reed_solomon::galois_16::{self, ReedSolomon}; -use primitives::{Hash as H256, BlakeTwo256, HashT}; -use primitives::parachain::AvailableData; +use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT}; +use primitives::v1; use sp_core::Blake2Hasher; use trie::{EMPTY_PREFIX, MemoryDB, Trie, TrieMut, trie_types::{TrieDBMut, TrieDB}}; @@ -124,14 +124,32 @@ fn code_params(n_validators: usize) -> Result { }) } +/// Obtain erasure-coded chunks for v0 `AvailableData`, one for each validator. +/// +/// Works only up to 65536 validators, and `n_validators` must be non-zero. +pub fn obtain_chunks_v0(n_validators: usize, data: &v0::AvailableData) + -> Result>, Error> +{ + obtain_chunks(n_validators, data) +} + +/// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator. +/// +/// Works only up to 65536 validators, and `n_validators` must be non-zero. +pub fn obtain_chunks_v1(n_validators: usize, data: &v1::AvailableData) + -> Result>, Error> +{ + obtain_chunks(n_validators, data) +} + /// Obtain erasure-coded chunks, one for each validator. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. -pub fn obtain_chunks(n_validators: usize, available_data: &AvailableData) +fn obtain_chunks(n_validators: usize, data: &T) -> Result>, Error> { let params = code_params(n_validators)?; - let encoded = available_data.encode(); + let encoded = data.encode(); if encoded.is_empty() { return Err(Error::BadPayload); @@ -145,15 +163,42 @@ pub fn obtain_chunks(n_validators: usize, available_data: &AvailableData) Ok(shards.into_iter().map(|w| w.into_inner()).collect()) } -/// Reconstruct the block data from a set of chunks. +/// Reconstruct the v0 available data from a set of chunks. +/// +/// Provide an iterator containing chunk data and the corresponding index. +/// The indices of the present chunks must be indicated. If too few chunks +/// are provided, recovery is not possible. +/// +/// Works only up to 65536 validators, and `n_validators` must be non-zero. +pub fn reconstruct_v0<'a, I: 'a>(n_validators: usize, chunks: I) + -> Result + where I: IntoIterator +{ + reconstruct(n_validators, chunks) +} + +/// Reconstruct the v1 available data from a set of chunks. +/// +/// Provide an iterator containing chunk data and the corresponding index. +/// The indices of the present chunks must be indicated. If too few chunks +/// are provided, recovery is not possible. +/// +/// Works only up to 65536 validators, and `n_validators` must be non-zero. +pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I) + -> Result + where I: IntoIterator +{ + reconstruct(n_validators, chunks) +} + +/// Reconstruct decodable data from a set of chunks. /// /// Provide an iterator containing chunk data and the corresponding index. /// The indices of the present chunks must be indicated. If too few chunks /// are provided, recovery is not possible. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. -pub fn reconstruct<'a, I: 'a>(n_validators: usize, chunks: I) - -> Result +fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result where I: IntoIterator { let params = code_params(n_validators)?; @@ -343,7 +388,7 @@ impl<'a, I: Iterator> codec::Input for ShardInput<'a, I> { #[cfg(test)] mod tests { use super::*; - use primitives::parachain::{BlockData, PoVBlock}; + use primitives::v0::{AvailableData, BlockData, PoVBlock}; #[test] fn field_order_is_right_size() { @@ -420,7 +465,7 @@ mod tests { assert_eq!(chunks.len(), 10); // any 4 chunks should work. - let reconstructed = reconstruct( + let reconstructed: AvailableData = reconstruct( 10, [ (&*chunks[1], 1), diff --git a/network/Cargo.toml b/network/Cargo.toml deleted file mode 100644 index f3ca97dcdadceb5da30f881b714dcce675a256c5..0000000000000000000000000000000000000000 --- a/network/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "polkadot-network" -version = "0.8.12" -authors = ["Parity Technologies "] -description = "Polkadot-specific networking protocol" -edition = "2018" - -[dependencies] -arrayvec = "0.4.12" -bytes = "0.5" -parking_lot = "0.9.0" -derive_more = "0.14.1" -av_store = { package = "polkadot-availability-store", path = "../availability-store" } -polkadot-validation = { path = "../validation" } -polkadot-primitives = { path = "../primitives" } -polkadot-erasure-coding = { path = "../erasure-coding" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network-gossip = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = "0.3.4" -log = "0.4.8" -exit-future = "0.2.0" -futures-timer = "2.0" -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -wasm-timer = "0.2.4" - -[dev-dependencies] -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/network/README.adoc b/network/README.adoc deleted file mode 100644 index 25b3e003d5b0a4688ddb73a2f59324802c5cc5b7..0000000000000000000000000000000000000000 --- a/network/README.adoc +++ /dev/null @@ -1,5 +0,0 @@ - -= Polkadot Network - -placeholder -//TODO Write content :) (https://github.com/paritytech/polkadot/issues/159) diff --git a/network/src/legacy/collator_pool.rs b/network/src/legacy/collator_pool.rs deleted file mode 100644 index a0c0a0458e908eeb396883aa80304a29a47095ab..0000000000000000000000000000000000000000 --- a/network/src/legacy/collator_pool.rs +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Bridge between the network and consensus service for getting collations to it. - -use codec::{Encode, Decode}; -use polkadot_primitives::Hash; -use polkadot_primitives::parachain::{CollatorId, Id as ParaId, Collation}; -use sc_network::PeerId; -use futures::channel::oneshot; - -use std::collections::hash_map::{HashMap, Entry}; -use std::time::Duration; -use wasm_timer::Instant; - -const COLLATION_LIFETIME: Duration = Duration::from_secs(60 * 5); - -/// The role of the collator. Whether they're the primary or backup for this parachain. -#[derive(PartialEq, Debug, Clone, Copy, Encode, Decode)] -pub enum Role { - /// Primary collators should send collations whenever it's time. - Primary = 0, - /// Backup collators should not. - Backup = 1, -} - -/// A maintenance action for the collator set. -#[derive(PartialEq, Debug)] -#[allow(dead_code)] -pub enum Action { - /// Disconnect the given collator. - Disconnect(CollatorId), - /// Give the collator a new role. - NewRole(CollatorId, Role), -} - -struct CollationSlot { - live_at: Instant, - entries: SlotEntries, -} - -impl CollationSlot { - fn blank_now() -> Self { - CollationSlot { - live_at: Instant::now(), - entries: SlotEntries::Blank, - } - } - - fn stay_alive(&self, now: Instant) -> bool { - self.live_at + COLLATION_LIFETIME > now - } -} - -#[derive(Debug)] -enum SlotEntries { - Blank, - // not queried yet - Pending(Vec), - // waiting for next to arrive. - Awaiting(Vec>), -} - -impl SlotEntries { - fn received_collation(&mut self, collation: Collation) { - *self = match std::mem::replace(self, SlotEntries::Blank) { - SlotEntries::Blank => SlotEntries::Pending(vec![collation]), - SlotEntries::Pending(mut cs) => { - cs.push(collation); - SlotEntries::Pending(cs) - } - SlotEntries::Awaiting(senders) => { - for sender in senders { - let _ = sender.send(collation.clone()); - } - - SlotEntries::Blank - } - }; - } - - fn await_with(&mut self, sender: oneshot::Sender) { - *self = match ::std::mem::replace(self, SlotEntries::Blank) { - SlotEntries::Blank => SlotEntries::Awaiting(vec![sender]), - SlotEntries::Awaiting(mut senders) => { - senders.push(sender); - SlotEntries::Awaiting(senders) - } - SlotEntries::Pending(mut cs) => { - let next_collation = cs.pop().expect("empty variant is always `Blank`; qed"); - let _ = sender.send(next_collation); - - if cs.is_empty() { - SlotEntries::Blank - } else { - SlotEntries::Pending(cs) - } - } - }; - } -} - -struct ParachainCollators { - primary: CollatorId, - backup: Vec, -} - -/// Manages connected collators and role assignments from the perspective of a validator. -#[derive(Default)] -pub struct CollatorPool { - collators: HashMap, - parachain_collators: HashMap, - collations: HashMap<(Hash, ParaId), CollationSlot>, -} - -impl CollatorPool { - /// Create a new `CollatorPool` object. - pub fn new() -> Self { - CollatorPool { - collators: HashMap::new(), - parachain_collators: HashMap::new(), - collations: HashMap::new(), - } - } - - /// Call when a new collator is authenticated. Returns the role. - pub fn on_new_collator(&mut self, collator_id: CollatorId, para_id: ParaId, peer_id: PeerId) -> Role { - self.collators.insert(collator_id.clone(), (para_id, peer_id)); - match self.parachain_collators.entry(para_id) { - Entry::Vacant(vacant) => { - vacant.insert(ParachainCollators { - primary: collator_id, - backup: Vec::new(), - }); - - Role::Primary - }, - Entry::Occupied(mut occupied) => { - occupied.get_mut().backup.push(collator_id); - - Role::Backup - } - } - } - - /// Called when a collator disconnects. If it was the primary, returns a new primary for that - /// parachain. - pub fn on_disconnect(&mut self, collator_id: CollatorId) -> Option { - self.collators.remove(&collator_id).and_then(|(para_id, _)| match self.parachain_collators.entry(para_id) { - Entry::Vacant(_) => None, - Entry::Occupied(mut occ) => { - if occ.get().primary == collator_id { - if occ.get().backup.is_empty() { - occ.remove(); - None - } else { - let mut collators = occ.get_mut(); - collators.primary = collators.backup.pop().expect("backup non-empty; qed"); - Some(collators.primary.clone()) - } - } else { - let pos = occ.get().backup.iter().position(|a| a == &collator_id) - .expect("registered collator always present in backup if not primary; qed"); - - occ.get_mut().backup.remove(pos); - None - } - } - }) - } - - /// Called when a collation is received. - /// The collator should be registered for the parachain of the collation as a precondition of this function. - /// The collation should have been checked for integrity of signature before passing to this function. - pub fn on_collation(&mut self, collator_id: CollatorId, relay_parent: Hash, collation: Collation) { - log::debug!( - target: "collator-pool", "On collation from collator {} for relay parent {}", - collator_id, - relay_parent, - ); - - if let Some((para_id, _)) = self.collators.get(&collator_id) { - debug_assert_eq!(para_id, &collation.info.parachain_index); - - // TODO: punish if not primary? (https://github.com/paritytech/polkadot/issues/213) - - self.collations.entry((relay_parent, para_id.clone())) - .or_insert_with(CollationSlot::blank_now) - .entries - .received_collation(collation); - } - } - - /// Wait for a collation from a parachain. - pub fn await_collation(&mut self, relay_parent: Hash, para_id: ParaId, sender: oneshot::Sender) { - self.collations.entry((relay_parent, para_id)) - .or_insert_with(CollationSlot::blank_now) - .entries - .await_with(sender); - } - - /// Call periodically to perform collator set maintenance. - /// Returns a set of actions to perform on the network level. - pub fn maintain_peers(&mut self) -> Vec { - // TODO: rearrange periodically to new primary, evaluate based on latency etc. - // https://github.com/paritytech/polkadot/issues/214 - Vec::new() - } - - /// called when a block with given hash has been imported. - pub fn collect_garbage(&mut self, chain_head: Option<&Hash>) { - let now = Instant::now(); - self.collations.retain(|&(ref h, _), slot| chain_head != Some(h) && slot.stay_alive(now)); - } - - /// Convert the given `CollatorId` to a `PeerId`. - pub fn collator_id_to_peer_id(&self, collator_id: &CollatorId) -> Option<&PeerId> { - self.collators.get(collator_id).map(|ids| &ids.1) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::crypto::UncheckedInto; - use polkadot_primitives::parachain::{CollationInfo, BlockData, PoVBlock}; - use futures::executor::block_on; - - fn make_pov(block_data: Vec) -> PoVBlock { - PoVBlock { - block_data: BlockData(block_data), - } - } - - #[test] - fn disconnect_primary_gives_new_primary() { - let mut pool = CollatorPool::new(); - let para_id: ParaId = 5.into(); - let bad_primary: CollatorId = [0; 32].unchecked_into(); - let good_backup: CollatorId = [1; 32].unchecked_into(); - - assert_eq!(pool.on_new_collator(bad_primary.clone(), para_id.clone(), PeerId::random()), Role::Primary); - assert_eq!(pool.on_new_collator(good_backup.clone(), para_id.clone(), PeerId::random()), Role::Backup); - assert_eq!(pool.on_disconnect(bad_primary), Some(good_backup.clone())); - assert_eq!(pool.on_disconnect(good_backup), None); - } - - #[test] - fn disconnect_backup_removes_from_pool() { - let mut pool = CollatorPool::new(); - let para_id: ParaId = 5.into(); - let primary = [0; 32].unchecked_into(); - let backup: CollatorId = [1; 32].unchecked_into(); - - assert_eq!(pool.on_new_collator(primary, para_id.clone(), PeerId::random()), Role::Primary); - assert_eq!(pool.on_new_collator(backup.clone(), para_id.clone(), PeerId::random()), Role::Backup); - assert_eq!(pool.on_disconnect(backup), None); - assert!(pool.parachain_collators.get(¶_id).unwrap().backup.is_empty()); - } - - #[test] - fn await_before_collation() { - let mut pool = CollatorPool::new(); - let para_id: ParaId = 5.into(); - let peer_id = PeerId::random(); - let primary: CollatorId = [0; 32].unchecked_into(); - let relay_parent = [1; 32].into(); - - assert_eq!(pool.on_new_collator(primary.clone(), para_id.clone(), peer_id.clone()), Role::Primary); - let (tx1, rx1) = oneshot::channel(); - let (tx2, rx2) = oneshot::channel(); - pool.await_collation(relay_parent, para_id, tx1); - pool.await_collation(relay_parent, para_id, tx2); - let mut collation_info = CollationInfo::default(); - collation_info.parachain_index = para_id; - collation_info.collator = primary.clone().into(); - pool.on_collation(primary.clone(), relay_parent, Collation { - info: collation_info, - pov: make_pov(vec![4, 5, 6]), - }); - - block_on(rx1).unwrap(); - block_on(rx2).unwrap(); - assert_eq!(pool.collators.get(&primary).map(|ids| &ids.1).unwrap(), &peer_id); - } - - #[test] - fn collate_before_await() { - let mut pool = CollatorPool::new(); - let para_id: ParaId = 5.into(); - let primary: CollatorId = [0; 32].unchecked_into(); - let relay_parent = [1; 32].into(); - - assert_eq!(pool.on_new_collator(primary.clone(), para_id.clone(), PeerId::random()), Role::Primary); - - let mut collation_info = CollationInfo::default(); - collation_info.parachain_index = para_id; - collation_info.collator = primary.clone(); - pool.on_collation(primary.clone(), relay_parent, Collation { - info: collation_info, - pov: make_pov(vec![4, 5, 6]), - }); - - let (tx, rx) = oneshot::channel(); - pool.await_collation(relay_parent, para_id, tx); - block_on(rx).unwrap(); - } - - #[test] - fn slot_stay_alive() { - let slot = CollationSlot::blank_now(); - let now = slot.live_at; - - assert!(slot.stay_alive(now)); - assert!(slot.stay_alive(now + Duration::from_secs(10))); - assert!(!slot.stay_alive(now + COLLATION_LIFETIME)); - assert!(!slot.stay_alive(now + COLLATION_LIFETIME + Duration::from_secs(10))); - } -} diff --git a/network/src/legacy/gossip/attestation.rs b/network/src/legacy/gossip/attestation.rs deleted file mode 100644 index a47f75288bf40220bcc639003d30c38ab1e492fb..0000000000000000000000000000000000000000 --- a/network/src/legacy/gossip/attestation.rs +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Gossip messages and structures for dealing with attestations (statements of -//! validity of invalidity on parachain candidates). -//! -//! This follows the same principles as other gossip modules (see parent -//! documentation for more details) by being aware of our current chain -//! heads and accepting only information relative to them. Attestations are localized to -//! relay chain head, so this is easily doable. -//! -//! This module also provides a filter, so we can only broadcast messages to -//! peers that are relevant to chain heads they have advertised. -//! -//! Furthermore, since attestations are bottlenecked by the `Candidate` statement, -//! we only accept attestations which are themselves `Candidate` messages, or reference -//! a `Candidate` we are aware of. Otherwise, it is possible we could be forced to -//! consider an infinite amount of attestations produced by a misbehaving validator. - -use sc_network_gossip::{ValidationResult as GossipValidationResult}; -use sc_network::ReputationChange; -use polkadot_validation::GenericStatement; -use polkadot_primitives::Hash; - -use std::collections::HashMap; - -use log::warn; - -use super::{ - cost, benefit, attestation_topic, MAX_CHAIN_HEADS, LeavesVec, - ChainContext, Known, MessageValidationData, GossipStatement, -}; - -/// Meta-data that we keep about a candidate in the `Knowledge`. -#[derive(Debug, Clone)] -pub(super) struct CandidateMeta { - /// The hash of the pov-block data. - pub(super) pov_block_hash: Hash, -} - -// knowledge about attestations on a single parent-hash. -#[derive(Default)] -pub(super) struct Knowledge { - candidates: HashMap, -} - -impl Knowledge { - // whether the peer is aware of a candidate with given hash. - fn is_aware_of(&self, candidate_hash: &Hash) -> bool { - self.candidates.contains_key(candidate_hash) - } - - // Get candidate meta data for a candidate by hash. - fn candidate_meta(&self, candidate_hash: &Hash) -> Option<&CandidateMeta> { - self.candidates.get(candidate_hash) - } - - // note that the peer is aware of a candidate with given hash. this should - // be done after observing an incoming candidate message via gossip. - fn note_aware(&mut self, candidate_hash: Hash, candidate_meta: CandidateMeta) { - self.candidates.insert(candidate_hash, candidate_meta); - } -} - -#[derive(Default)] -pub(super) struct PeerData { - live: HashMap, -} - -impl PeerData { - /// Update leaves, returning a list of which leaves are new. - pub(super) fn update_leaves(&mut self, leaves: &LeavesVec) -> LeavesVec { - let mut new = LeavesVec::new(); - self.live.retain(|k, _| leaves.contains(k)); - for &leaf in leaves { - self.live.entry(leaf).or_insert_with(|| { - new.push(leaf); - Default::default() - }); - } - - new - } - - #[cfg(test)] - pub(super) fn note_aware_under_leaf( - &mut self, - relay_chain_leaf: &Hash, - candidate_hash: Hash, - meta: CandidateMeta, - ) { - if let Some(knowledge) = self.live.get_mut(relay_chain_leaf) { - knowledge.note_aware(candidate_hash, meta); - } - } - - pub(super) fn knowledge_at_mut(&mut self, parent_hash: &Hash) -> Option<&mut Knowledge> { - self.live.get_mut(parent_hash) - } -} - -/// An impartial view of what topics and data are valid based on attestation session data. -pub(super) struct View { - leaf_work: Vec<(Hash, LeafView)>, // hashes of the best DAG-leaves paired with validation data. - topics: HashMap, // maps topic hashes to block hashes. -} - -impl Default for View { - fn default() -> Self { - View { - leaf_work: Vec::with_capacity(MAX_CHAIN_HEADS), - topics: Default::default(), - } - } -} - -impl View { - fn leaf_view(&self, relay_chain_leaf: &Hash) -> Option<&LeafView> { - self.leaf_work.iter() - .find_map(|&(ref h, ref leaf)| if h == relay_chain_leaf { Some(leaf) } else { None } ) - } - - fn leaf_view_mut(&mut self, relay_chain_leaf: &Hash) -> Option<&mut LeafView> { - self.leaf_work.iter_mut() - .find_map(|&mut (ref h, ref mut leaf)| if h == relay_chain_leaf { Some(leaf) } else { None } ) - } - - /// Get our leaves-set. Guaranteed to have length <= MAX_CHAIN_HEADS. - pub(super) fn neighbor_info<'a>(&'a self) -> impl Iterator + 'a + Clone { - self.leaf_work.iter().take(MAX_CHAIN_HEADS).map(|(p, _)| p.clone()) - } - - /// Note new leaf in our local view and validation data necessary to check signatures - /// of statements issued under this leaf. - /// - /// This will be pruned later on a call to `prune_old_leaves`, when this leaf - /// is not a leaf anymore. - pub(super) fn new_local_leaf( - &mut self, - validation_data: MessageValidationData, - ) { - let relay_chain_leaf = validation_data.signing_context.parent_hash.clone(); - self.leaf_work.push(( - validation_data.signing_context.parent_hash.clone(), - LeafView { - validation_data, - knowledge: Default::default(), - }, - )); - self.topics.insert(attestation_topic(relay_chain_leaf), relay_chain_leaf); - self.topics.insert(super::pov_block_topic(relay_chain_leaf), relay_chain_leaf); - } - - /// Prune old leaf-work that fails the leaf predicate. - pub(super) fn prune_old_leaves bool>(&mut self, is_leaf: F) { - let leaf_work = &mut self.leaf_work; - leaf_work.retain(|&(ref relay_chain_leaf, _)| is_leaf(relay_chain_leaf)); - self.topics.retain(|_, v| leaf_work.iter().find(|(p, _)| p == v).is_some()); - } - - /// Whether a message topic is considered live relative to our view. non-live - /// topics do not pertain to our perceived leaves, and are uninteresting to us. - pub(super) fn is_topic_live(&self, topic: &Hash) -> bool { - self.topics.contains_key(topic) - } - - /// The relay-chain block hash corresponding to a topic. - pub(super) fn topic_block(&self, topic: &Hash) -> Option<&Hash> { - self.topics.get(topic) - } - - #[cfg(test)] - pub(super) fn note_aware_under_leaf( - &mut self, - relay_chain_leaf: &Hash, - candidate_hash: Hash, - meta: CandidateMeta, - ) { - if let Some(view) = self.leaf_view_mut(relay_chain_leaf) { - view.knowledge.note_aware(candidate_hash, meta); - } - } - - /// Validate the signature on an attestation statement of some kind. Should be done before - /// any repropagation of that statement. - pub(super) fn validate_statement_signature( - &mut self, - message: GossipStatement, - chain: &C, - ) - -> (GossipValidationResult, ReputationChange) - { - // message must reference one of our chain heads and - // if message is not a `Candidate` we should have the candidate available - // in `attestation_view`. - match self.leaf_view(&message.relay_chain_leaf) { - None => { - let cost = match chain.is_known(&message.relay_chain_leaf) { - Some(Known::Leaf) => { - warn!( - target: "network", - "Leaf block {} not considered live for attestation", - message.relay_chain_leaf, - ); - cost::NONE - } - Some(Known::Old) => cost::PAST_MESSAGE, - _ => cost::FUTURE_MESSAGE, - }; - - (GossipValidationResult::Discard, cost) - } - Some(view) => { - // first check that we are capable of receiving this message - // in a DoS-proof manner. - let benefit = match message.signed_statement.statement { - GenericStatement::Candidate(_) => benefit::NEW_CANDIDATE, - GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => { - if !view.knowledge.is_aware_of(h) { - let cost = cost::ATTESTATION_NO_CANDIDATE; - return (GossipValidationResult::Discard, cost); - } - - benefit::NEW_ATTESTATION - } - }; - - // validate signature. - let res = view.validation_data.check_statement( - &message.signed_statement, - ); - - match res { - Ok(()) => { - let topic = attestation_topic(message.relay_chain_leaf); - (GossipValidationResult::ProcessAndKeep(topic), benefit) - } - Err(()) => (GossipValidationResult::Discard, cost::BAD_SIGNATURE), - } - } - } - } - - /// Validate a pov-block message. - pub(super) fn validate_pov_block_message( - &mut self, - message: &super::GossipPoVBlock, - chain: &C, - ) - -> (GossipValidationResult, ReputationChange) - { - match self.leaf_view(&message.relay_chain_leaf) { - None => { - let cost = match chain.is_known(&message.relay_chain_leaf) { - Some(Known::Leaf) => { - warn!( - target: "network", - "Leaf block {} not considered live for attestation", - message.relay_chain_leaf, - ); - cost::NONE - } - Some(Known::Old) => cost::POV_BLOCK_UNWANTED, - _ => cost::FUTURE_MESSAGE, - }; - - (GossipValidationResult::Discard, cost) - } - Some(view) => { - // we only accept pov-blocks for candidates that we have - // and consider active. - match view.knowledge.candidate_meta(&message.candidate_hash) { - None => (GossipValidationResult::Discard, cost::POV_BLOCK_UNWANTED), - Some(meta) => { - // check that the pov-block hash is actually correct. - if meta.pov_block_hash == message.pov_block.hash() { - let topic = super::pov_block_topic(message.relay_chain_leaf); - (GossipValidationResult::ProcessAndKeep(topic), benefit::NEW_POV_BLOCK) - } else { - (GossipValidationResult::Discard, cost::POV_BLOCK_BAD_DATA) - } - } - } - } - } - } - - /// whether it's allowed to send a statement to a peer with given knowledge - /// about the relay parent the statement refers to. - pub(super) fn statement_allowed( - &mut self, - statement: &GossipStatement, - peer_knowledge: &mut Knowledge, - ) -> bool { - let signed = &statement.signed_statement; - let relay_chain_leaf = &statement.relay_chain_leaf; - - match signed.statement { - GenericStatement::Valid(ref h) | GenericStatement::Invalid(ref h) => { - // `valid` and `invalid` statements can only be propagated after - // a candidate message is known by that peer. - peer_knowledge.is_aware_of(h) - } - GenericStatement::Candidate(ref c) => { - // if we are sending a `Candidate` message we should make sure that - // attestation_view and their_view reflects that we know about the candidate. - let hash = c.hash(); - let meta = CandidateMeta { pov_block_hash: c.pov_block_hash }; - peer_knowledge.note_aware(hash, meta.clone()); - if let Some(attestation_view) = self.leaf_view_mut(&relay_chain_leaf) { - attestation_view.knowledge.note_aware(hash, meta); - } - - // at this point, the peer hasn't seen the message or the candidate - // and has knowledge of the relevant relay-chain parent. - true - } - } - } - - /// whether it's allowed to send a pov-block to a peer. - pub(super) fn pov_block_allowed( - &mut self, - statement: &super::GossipPoVBlock, - peer_knowledge: &mut Knowledge, - ) -> bool { - peer_knowledge.is_aware_of(&statement.candidate_hash) - } -} - -struct LeafView { - validation_data: MessageValidationData, - knowledge: Knowledge, -} diff --git a/network/src/legacy/gossip/mod.rs b/network/src/legacy/gossip/mod.rs deleted file mode 100644 index 7e97eb688b15cd8cac4d84c100628ef76e7a6a41..0000000000000000000000000000000000000000 --- a/network/src/legacy/gossip/mod.rs +++ /dev/null @@ -1,1254 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Gossip messages and the message validator. -//! -//! At the moment, this module houses 2 gossip protocols central to Polkadot. -//! -//! The first is the attestation-gossip system, which aims to circulate parachain -//! candidate attestations by validators at leaves of the block-DAG. -//! -//! The second is the inter-chain message queue routing gossip, which aims to -//! circulate message queues between parachains, which remain un-routed as of -//! recent leaves. -//! -//! These gossip systems do not have any form of sybil-resistance in terms -//! of the nodes which can participate. It could be imposed e.g. by limiting only to -//! validators, but this would prevent message queues from getting into the hands -//! of collators and of attestations from getting into the hands of fishermen. -//! As such, we take certain precautions which allow arbitrary full nodes to -//! join the gossip graph, as well as validators (who are likely to be well-connected -//! amongst themselves). -//! -//! The first is the notion of a neighbor packet. This is a packet sent between -//! neighbors of the gossip graph to inform each other of their current protocol -//! state. As of this writing, for both attestation and message-routing gossip, -//! the only necessary information here is a (length-limited) set of perceived -//! leaves of the block-DAG. -//! -//! These leaves can be used to derive what information a node is willing to accept -//! There is typically an unbounded amount of possible "future" information relative to -//! any protocol state. For example, attestations or unrouted message queues from millions -//! of blocks after a known protocol state. The neighbor packet is meant to avoid being -//! spammed by illegitimate future information, while informing neighbors of when -//! previously-future and now current gossip messages would be accepted. -//! -//! Peers who send information which was not allowed under a recent neighbor packet -//! will be noted as non-beneficial to Substrate's peer-set management utility. - -use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; -use sp_blockchain::Error as ClientError; -use sc_network::{ObservedRole, PeerId, ReputationChange}; -use sc_network::NetworkService; -use sc_network_gossip::{ - ValidationResult as GossipValidationResult, - ValidatorContext, MessageIntent, -}; -use polkadot_validation::{SignedStatement}; -use polkadot_primitives::{Block, Hash}; -use polkadot_primitives::parachain::{ - ParachainHost, ValidatorId, ErasureChunk as PrimitiveChunk, SigningContext, PoVBlock, -}; -use polkadot_erasure_coding::{self as erasure}; -use codec::{Decode, Encode}; -use sp_api::ProvideRuntimeApi; - -use std::collections::HashMap; -use std::sync::Arc; - -use arrayvec::ArrayVec; -use futures::prelude::*; -use parking_lot::{Mutex, RwLock}; - -use crate::legacy::{GossipMessageStream, GossipService}; - -use attestation::{View as AttestationView, PeerData as AttestationPeerData}; - -mod attestation; - -/// The engine ID of the polkadot attestation system. -pub const POLKADOT_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"dot1"; -pub const POLKADOT_PROTOCOL_NAME: &[u8] = b"/polkadot/legacy/1"; - -// arbitrary; in practice this should not be more than 2. -pub(crate) const MAX_CHAIN_HEADS: usize = 5; - -/// Type alias for a bounded vector of leaves. -pub type LeavesVec = ArrayVec<[Hash; MAX_CHAIN_HEADS]>; - -mod benefit { - use sc_network::ReputationChange as Rep; - /// When a peer sends us a previously-unknown candidate statement. - pub const NEW_CANDIDATE: Rep = Rep::new(100, "Polkadot: New candidate"); - /// When a peer sends us a previously-unknown attestation. - pub const NEW_ATTESTATION: Rep = Rep::new(50, "Polkadot: New attestation"); - /// When a peer sends us a previously-unknown pov-block - pub const NEW_POV_BLOCK: Rep = Rep::new(150, "Polkadot: New PoV block"); - /// When a peer sends us a previously-unknown erasure chunk. - pub const NEW_ERASURE_CHUNK: Rep = Rep::new(10, "Polkadot: New erasure chunk"); -} - -mod cost { - use sc_network::ReputationChange as Rep; - /// No cost. This will not be reported. - pub const NONE: Rep = Rep::new(0, ""); - /// A peer sent us an attestation and we don't know the candidate. - pub const ATTESTATION_NO_CANDIDATE: Rep = Rep::new(-100, "Polkadot: No candidate"); - /// A peer sent us a pov-block and we don't know the candidate or the leaf. - pub const POV_BLOCK_UNWANTED: Rep = Rep::new(-500, "Polkadot: No candidate"); - /// A peer sent us a pov-block message with wrong data. - pub const POV_BLOCK_BAD_DATA: Rep = Rep::new(-1000, "Polkadot: Bad PoV-block data"); - /// A peer sent us a statement we consider in the future. - pub const FUTURE_MESSAGE: Rep = Rep::new(-100, "Polkadot: Future message"); - /// A peer sent us a statement from the past. - pub const PAST_MESSAGE: Rep = Rep::new(-30, "Polkadot: Past message"); - /// A peer sent us a malformed message. - pub const MALFORMED_MESSAGE: Rep = Rep::new(-500, "Polkadot: Malformed message"); - /// A peer sent us a wrongly signed message. - pub const BAD_SIGNATURE: Rep = Rep::new(-500, "Polkadot: Bad signature"); - /// A peer sent us a bad neighbor packet. - pub const BAD_NEIGHBOR_PACKET: Rep = Rep::new(-300, "Polkadot: Bad neighbor"); - /// A peer sent us an erasure chunk referring to a candidate that we are not aware of. - pub const ORPHANED_ERASURE_CHUNK: Rep = Rep::new(-10, "An erasure chunk from unknown candidate"); - /// A peer sent us an erasure chunk that does not match candidate's erasure root. - pub const ERASURE_CHUNK_WRONG_ROOT: Rep = Rep::new(-100, "Chunk doesn't match encoding root"); -} - -/// A gossip message. -#[derive(Encode, Decode, Clone, PartialEq)] -pub enum GossipMessage { - /// A packet sent to a neighbor but not relayed. - #[codec(index = "1")] - Neighbor(VersionedNeighborPacket), - /// An attestation-statement about the candidate. - /// Non-candidate statements should only be sent to peers who are aware of the candidate. - #[codec(index = "2")] - Statement(GossipStatement), - // TODO: https://github.com/paritytech/polkadot/issues/253 - /// A packet containing one of the erasure-coding chunks of one candidate. - #[codec(index = "3")] - ErasureChunk(ErasureChunkMessage), - /// A PoV-block. - #[codec(index = "255")] - PoVBlock(GossipPoVBlock), -} - -impl From for GossipMessage { - fn from(packet: NeighborPacket) -> Self { - GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet)) - } -} - -impl From for GossipMessage { - fn from(stmt: GossipStatement) -> Self { - GossipMessage::Statement(stmt) - } -} - -impl From for GossipMessage { - fn from(pov: GossipPoVBlock) -> Self { - GossipMessage::PoVBlock(pov) - } -} - -/// A gossip message containing a statement. -#[derive(Encode, Decode, Clone, PartialEq)] -pub struct GossipStatement { - /// The block hash of the relay chain being referred to. In context, this should - /// be a leaf. - pub relay_chain_leaf: Hash, - /// The signed statement being gossipped. - pub signed_statement: SignedStatement, -} - -impl GossipStatement { - /// Create a new instance. - pub fn new(relay_chain_leaf: Hash, signed_statement: SignedStatement) -> Self { - Self { - relay_chain_leaf, - signed_statement, - } - } -} - -/// A gossip message containing one erasure chunk of a candidate block. -/// For each chunk of block erasure encoding one of this messages is constructed. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct ErasureChunkMessage { - /// The chunk itself. - pub chunk: PrimitiveChunk, - /// The hash of the candidate receipt of the block this chunk belongs to. - pub candidate_hash: Hash, -} - -impl From for GossipMessage { - fn from(chk: ErasureChunkMessage) -> Self { - GossipMessage::ErasureChunk(chk) - } -} - -/// A pov-block being gossipped. Should only be sent to peers aware of the candidate -/// referenced. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct GossipPoVBlock { - /// The block hash of the relay chain being referred to. In context, this should - /// be a leaf. - pub relay_chain_leaf: Hash, - /// The hash of some candidate localized to the same relay-chain leaf, whose - /// pov-block is this block. - pub candidate_hash: Hash, - /// The pov-block itself. - pub pov_block: PoVBlock, -} - -/// A versioned neighbor message. -#[derive(Encode, Decode, Clone, PartialEq)] -pub enum VersionedNeighborPacket { - #[codec(index = "1")] - V1(NeighborPacket), -} - -/// Contains information on which chain heads the peer is -/// accepting messages for. -#[derive(Encode, Decode, Clone, PartialEq)] -pub struct NeighborPacket { - chain_heads: Vec, -} - -/// whether a block is known. -#[derive(Clone, Copy, PartialEq)] -pub enum Known { - /// The block is a known leaf. - Leaf, - /// The block is known to be old. - Old, - /// The block is known to be bad. - Bad, -} - -/// Context to the underlying polkadot chain. -pub trait ChainContext: Send + Sync { - /// Provide a closure which is invoked for every unrouted queue hash at a given leaf. - fn leaf_unrouted_roots( - &self, - leaf: &Hash, - with_queue_root: &mut dyn FnMut(&Hash), - ) -> Result<(), ClientError>; - - /// whether a block is known. If it's not, returns `None`. - fn is_known(&self, block_hash: &Hash) -> Option; -} - -impl ChainContext for (F, P) where - F: Fn(&Hash) -> Option + Send + Sync, - P: Send + Sync + std::ops::Deref, - P::Target: ProvideRuntimeApi, - >::Api: ParachainHost, -{ - fn is_known(&self, block_hash: &Hash) -> Option { - (self.0)(block_hash) - } - - fn leaf_unrouted_roots( - &self, - _leaf: &Hash, - _with_queue_root: &mut dyn FnMut(&Hash), - ) -> Result<(), ClientError> { - Ok(()) - } -} - - -/// Compute the gossip topic for attestations on the given parent hash. -pub(crate) fn attestation_topic(parent_hash: Hash) -> Hash { - let mut v = parent_hash.as_ref().to_vec(); - v.extend(b"attestations"); - - BlakeTwo256::hash(&v[..]) -} - -/// Compute the gossip topic for PoV blocks based on the given parent hash. -pub(crate) fn pov_block_topic(parent_hash: Hash) -> Hash { - let mut v = parent_hash.as_ref().to_vec(); - v.extend(b"pov-blocks"); - - BlakeTwo256::hash(&v[..]) -} - -/// Register a gossip validator on the network service. -// NOTE: since RegisteredMessageValidator is meant to be a type-safe proof -// that we've actually done the registration, this should be the only way -// to construct it outside of tests. -pub fn register_validator( - service: Arc>, - chain: C, - executor: &impl futures::task::Spawn, -) -> RegisteredMessageValidator -{ - let s = service.clone(); - let report_handle = Box::new(move |peer: &PeerId, cost_benefit: ReputationChange| { - if cost_benefit.value != 0 { - s.report_peer(peer.clone(), cost_benefit); - } - }); - let validator = Arc::new(MessageValidator { - report_handle, - inner: RwLock::new(Inner { - peers: HashMap::new(), - attestation_view: Default::default(), - availability_store: None, - chain, - }) - }); - - let gossip_side = validator.clone(); - let gossip_engine = Arc::new(Mutex::new(sc_network_gossip::GossipEngine::new( - service.clone(), - POLKADOT_ENGINE_ID, - POLKADOT_PROTOCOL_NAME, - gossip_side, - ))); - - // Spawn gossip engine. - // - // Ideally this would not be spawned as an orphaned task, but polled by - // `RegisteredMessageValidator` which in turn would be polled by a `ValidationNetwork`. - { - let gossip_engine = gossip_engine.clone(); - let fut = futures::future::poll_fn(move |cx| { - gossip_engine.lock().poll_unpin(cx) - }); - let spawn_res = executor.spawn_obj(futures::task::FutureObj::from(Box::new(fut))); - - // Note: we consider the chances of an error to spawn a background task almost null. - if spawn_res.is_err() { - log::error!(target: "polkadot-gossip", "Failed to spawn background task"); - } - } - - RegisteredMessageValidator { - inner: validator as _, - service: Some(service), - gossip_engine: Some(gossip_engine), - } -} - -#[derive(PartialEq)] -enum NewLeafAction { - // (who, message) - TargetedMessage(PeerId, GossipMessage), -} - -/// Actions to take after noting a new block-DAG leaf. -/// -/// This should be consumed by passing a consensus-gossip handle to `perform`. -#[must_use = "New chain-head gossip actions must be performed"] -pub struct NewLeafActions { - actions: Vec, -} - -impl NewLeafActions { - #[cfg(test)] - pub fn new() -> Self { - NewLeafActions { actions: Vec::new() } - } - - /// Perform the queued actions, feeding into gossip. - pub fn perform( - self, - gossip: &dyn crate::legacy::GossipService, - ) { - for action in self.actions { - match action { - NewLeafAction::TargetedMessage(who, message) - => gossip.send_message(who, message), - } - } - } -} - -/// A registered message validator. -/// -/// Create this using `register_validator`. -#[derive(Clone)] -pub struct RegisteredMessageValidator { - inner: Arc>, - // Note: this is always `Some` in real code and `None` in tests. - service: Option>>, - // Note: this is always `Some` in real code and `None` in tests. - gossip_engine: Option>>>, -} - -impl RegisteredMessageValidator { - /// Register an availabilty store the gossip service can query. - pub(crate) fn register_availability_store(&self, availability_store: av_store::Store) { - self.inner.inner.write().availability_store = Some(availability_store); - } - - /// Note that we perceive a new leaf of the block-DAG. We will notify our neighbors that - /// we now accept parachain candidate attestations and incoming message queues - /// relevant to this leaf. - pub(crate) fn new_local_leaf( - &self, - validation: MessageValidationData, - ) -> NewLeafActions { - // add an entry in attestation_view - // prune any entries from attestation_view which are no longer leaves - let mut inner = self.inner.inner.write(); - inner.attestation_view.new_local_leaf(validation); - - let mut actions = Vec::new(); - - { - let &mut Inner { - ref chain, - ref mut attestation_view, - .. - } = &mut *inner; - - attestation_view.prune_old_leaves(|hash| match chain.is_known(hash) { - Some(Known::Leaf) => true, - _ => false, - }); - } - - - // send neighbor packets to peers - inner.multicast_neighbor_packet( - |who, message| actions.push(NewLeafAction::TargetedMessage(who.clone(), message)) - ); - - NewLeafActions { actions } - } - - pub(crate) fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream { - let topic_stream = if let Some(gossip_engine) = self.gossip_engine.as_ref() { - gossip_engine.lock().messages_for(topic) - } else { - log::error!("Called gossip_messages_for on a test engine"); - futures::channel::mpsc::channel(0).1 - }; - - GossipMessageStream::new(topic_stream.boxed()) - } - - pub(crate) fn gossip_message(&self, topic: Hash, message: GossipMessage) { - if let Some(gossip_engine) = self.gossip_engine.as_ref() { - gossip_engine.lock().gossip_message( - topic, - message.encode(), - false, - ); - } else { - log::error!("Called gossip_message on a test engine"); - } - } - - pub(crate) fn send_message(&self, who: PeerId, message: GossipMessage) { - if let Some(gossip_engine) = self.gossip_engine.as_ref() { - gossip_engine.lock().send_message(vec![who], message.encode()); - } else { - log::error!("Called send_message on a test engine"); - } - } -} - -impl GossipService for RegisteredMessageValidator { - fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream { - RegisteredMessageValidator::gossip_messages_for(self, topic) - } - - fn gossip_message(&self, topic: Hash, message: GossipMessage) { - RegisteredMessageValidator::gossip_message(self, topic, message) - } - - fn send_message(&self, who: PeerId, message: GossipMessage) { - RegisteredMessageValidator::send_message(self, who, message) - } -} - -/// The data needed for validating gossip messages. -#[derive(Default)] -pub(crate) struct MessageValidationData { - /// The authorities' parachain validation keys at a block. - pub(crate) authorities: Vec, - /// The signing context. - pub(crate) signing_context: SigningContext, -} - -impl MessageValidationData { - // check a statement's signature. - fn check_statement(&self, statement: &SignedStatement) -> Result<(), ()> { - let sender = match self.authorities.get(statement.sender as usize) { - Some(val) => val, - None => return Err(()), - }; - - let good = self.authorities.contains(&sender) && - ::polkadot_validation::check_statement( - &statement.statement, - &statement.signature, - sender.clone(), - &self.signing_context, - ); - - if good { - Ok(()) - } else { - Err(()) - } - } -} - -#[derive(Default)] -struct PeerData { - attestation: AttestationPeerData, -} - -struct Inner { - peers: HashMap, - attestation_view: AttestationView, - availability_store: Option, - chain: C, -} - -impl Inner { - fn validate_neighbor_packet(&mut self, sender: &PeerId, packet: NeighborPacket) - -> (GossipValidationResult, ReputationChange, Vec) - { - let chain_heads = packet.chain_heads; - if chain_heads.len() > MAX_CHAIN_HEADS { - (GossipValidationResult::Discard, cost::BAD_NEIGHBOR_PACKET, Vec::new()) - } else { - let chain_heads: LeavesVec = chain_heads.into_iter().collect(); - let new_topics = if let Some(ref mut peer) = self.peers.get_mut(sender) { - let new_leaves = peer.attestation.update_leaves(&chain_heads); - let new_attestation_topics = new_leaves.iter().cloned().map(attestation_topic); - let new_pov_block_topics = new_leaves.iter().cloned().map(pov_block_topic); - - new_attestation_topics.chain(new_pov_block_topics).collect() - } else { - Vec::new() - }; - - (GossipValidationResult::Discard, cost::NONE, new_topics) - } - } - - fn validate_erasure_chunk_packet(&mut self, msg: ErasureChunkMessage) - -> (GossipValidationResult, ReputationChange) - { - if let Some(store) = &self.availability_store { - if let Some(receipt) = store.get_candidate(&msg.candidate_hash) { - let chunk_hash = erasure::branch_hash( - &receipt.commitments.erasure_root, - &msg.chunk.proof, - msg.chunk.index as usize - ); - - if chunk_hash != Ok(BlakeTwo256::hash(&msg.chunk.chunk)) { - ( - GossipValidationResult::Discard, - cost::ERASURE_CHUNK_WRONG_ROOT - ) - } else { - if let Some(awaited_chunks) = store.awaited_chunks() { - let frontier_entry = av_store::AwaitedFrontierEntry { - candidate_hash: msg.candidate_hash, - relay_parent: receipt.relay_parent, - validator_index: msg.chunk.index, - }; - if awaited_chunks.contains(&frontier_entry) { - let topic = crate::erasure_coding_topic( - &msg.candidate_hash - ); - - return ( - GossipValidationResult::ProcessAndKeep(topic), - benefit::NEW_ERASURE_CHUNK, - ); - } - } - (GossipValidationResult::Discard, cost::NONE) - } - } else { - (GossipValidationResult::Discard, cost::ORPHANED_ERASURE_CHUNK) - } - } else { - (GossipValidationResult::Discard, cost::NONE) - } - } - - fn multicast_neighbor_packet( - &self, - mut send_neighbor_packet: F, - ) { - let neighbor_packet = GossipMessage::from(NeighborPacket { - chain_heads: self.attestation_view.neighbor_info().collect(), - }); - - for peer in self.peers.keys() { - send_neighbor_packet(peer, neighbor_packet.clone()) - } - } -} - -/// An unregistered message validator. Register this with `register_validator`. -pub struct MessageValidator { - report_handle: Box, - inner: RwLock>, -} - -impl MessageValidator { - #[cfg(test)] - fn new_test( - chain: C, - report_handle: Box, - ) -> Self where C: Sized { - MessageValidator { - report_handle, - inner: RwLock::new(Inner { - peers: HashMap::new(), - attestation_view: Default::default(), - availability_store: None, - chain, - }), - } - } - - fn report(&self, who: &PeerId, cost_benefit: ReputationChange) { - (self.report_handle)(who, cost_benefit) - } -} - -impl sc_network_gossip::Validator for MessageValidator { - fn new_peer(&self, _context: &mut dyn ValidatorContext, who: &PeerId, _role: ObservedRole) { - let mut inner = self.inner.write(); - inner.peers.insert(who.clone(), PeerData::default()); - } - - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { - let mut inner = self.inner.write(); - inner.peers.remove(who); - } - - fn validate(&self, context: &mut dyn ValidatorContext, sender: &PeerId, data: &[u8]) - -> GossipValidationResult - { - let mut decode_data = data; - let (res, cost_benefit) = match GossipMessage::decode(&mut decode_data) { - Err(_) => (GossipValidationResult::Discard, cost::MALFORMED_MESSAGE), - Ok(GossipMessage::Neighbor(VersionedNeighborPacket::V1(packet))) => { - let (res, cb, topics) = self.inner.write().validate_neighbor_packet(sender, packet); - for new_topic in topics { - context.send_topic(sender, new_topic, false); - } - (res, cb) - } - Ok(GossipMessage::Statement(statement)) => { - let (res, cb) = { - let mut inner = self.inner.write(); - let inner = &mut *inner; - inner.attestation_view.validate_statement_signature(statement, &inner.chain) - }; - - if let GossipValidationResult::ProcessAndKeep(ref topic) = res { - context.broadcast_message(topic.clone(), data.to_vec(), false); - } - (res, cb) - } - Ok(GossipMessage::PoVBlock(pov_block)) => { - let (res, cb) = { - let mut inner = self.inner.write(); - let inner = &mut *inner; - inner.attestation_view.validate_pov_block_message(&pov_block, &inner.chain) - }; - - if let GossipValidationResult::ProcessAndKeep(ref topic) = res { - context.broadcast_message(topic.clone(), data.to_vec(), false); - } - - (res, cb) - } - Ok(GossipMessage::ErasureChunk(chunk)) => { - self.inner.write().validate_erasure_chunk_packet(chunk) - } - }; - - self.report(sender, cost_benefit); - res - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - let inner = self.inner.read(); - - Box::new(move |topic, _data| { - // check that messages from this topic are considered live by one of our protocols. - // everything else is expired - let live = inner.attestation_view.is_topic_live(&topic); - - !live // = expired - }) - } - - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - let mut inner = self.inner.write(); - Box::new(move |who, intent, topic, data| { - let &mut Inner { - ref mut peers, - ref mut attestation_view, - .. - } = &mut *inner; - - match intent { - MessageIntent::PeriodicRebroadcast => return false, - _ => {}, - } - - let attestation_head = attestation_view.topic_block(topic).map(|x| x.clone()); - let peer = peers.get_mut(who); - - match GossipMessage::decode(&mut &data[..]) { - Ok(GossipMessage::Statement(ref statement)) => { - // to allow statements, we need peer knowledge. - let peer_knowledge = peer.and_then(move |p| attestation_head.map(|r| (p, r))) - .and_then(|(p, r)| p.attestation.knowledge_at_mut(&r).map(|k| (k, r))); - - peer_knowledge.map_or(false, |(knowledge, attestation_head)| { - statement.relay_chain_leaf == attestation_head - && attestation_view.statement_allowed( - statement, - knowledge, - ) - }) - } - Ok(GossipMessage::PoVBlock(ref pov_block)) => { - // to allow pov-blocks, we need peer knowledge. - let peer_knowledge = peer.and_then(move |p| attestation_head.map(|r| (p, r))) - .and_then(|(p, r)| p.attestation.knowledge_at_mut(&r).map(|k| (k, r))); - - peer_knowledge.map_or(false, |(knowledge, attestation_head)| { - pov_block.relay_chain_leaf == attestation_head - && attestation_view.pov_block_allowed( - pov_block, - knowledge, - ) - }) - } - _ => false, - } - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sc_network_gossip::Validator as ValidatorT; - use std::sync::mpsc; - use parking_lot::Mutex; - use polkadot_primitives::parachain::{AbridgedCandidateReceipt, BlockData}; - use sp_core::sr25519::Signature as Sr25519Signature; - use polkadot_validation::GenericStatement; - - #[derive(PartialEq, Clone, Debug)] - enum ContextEvent { - BroadcastTopic(Hash, bool), - BroadcastMessage(Hash, Vec, bool), - SendMessage(PeerId, Vec), - SendTopic(PeerId, Hash, bool), - } - - #[derive(Default)] - struct MockValidatorContext { - events: Vec, - } - - impl MockValidatorContext { - fn clear(&mut self) { - self.events.clear() - } - } - - impl sc_network_gossip::ValidatorContext for MockValidatorContext { - fn broadcast_topic(&mut self, topic: Hash, force: bool) { - self.events.push(ContextEvent::BroadcastTopic(topic, force)); - } - fn broadcast_message(&mut self, topic: Hash, message: Vec, force: bool) { - self.events.push(ContextEvent::BroadcastMessage(topic, message, force)); - } - fn send_message(&mut self, who: &PeerId, message: Vec) { - self.events.push(ContextEvent::SendMessage(who.clone(), message)); - } - fn send_topic(&mut self, who: &PeerId, topic: Hash, force: bool) { - self.events.push(ContextEvent::SendTopic(who.clone(), topic, force)); - } - } - - #[derive(Default)] - struct TestChainContext { - known_map: HashMap, - ingress_roots: HashMap>, - } - - impl ChainContext for TestChainContext { - fn is_known(&self, block_hash: &Hash) -> Option { - self.known_map.get(block_hash).map(|x| x.clone()) - } - - fn leaf_unrouted_roots(&self, leaf: &Hash, with_queue_root: &mut dyn FnMut(&Hash)) - -> Result<(), sp_blockchain::Error> - { - for root in self.ingress_roots.get(leaf).into_iter().flat_map(|roots| roots) { - with_queue_root(root) - } - - Ok(()) - } - } - - #[test] - fn attestation_message_allowed() { - let (tx, _rx) = mpsc::channel(); - let tx = Mutex::new(tx); - let report_handle = Box::new(move |peer: &PeerId, cb: ReputationChange| tx.lock().send((peer.clone(), cb)).unwrap()); - let validator = MessageValidator::new_test( - TestChainContext::default(), - report_handle, - ); - - let peer_a = PeerId::random(); - - let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); - assert!(validator_context.events.is_empty()); - validator_context.clear(); - - let hash_a = [1u8; 32].into(); - let hash_b = [2u8; 32].into(); - let hash_c = [3u8; 32].into(); - - let message = GossipMessage::from(NeighborPacket { - chain_heads: vec![hash_a, hash_b], - }).encode(); - let res = validator.validate( - &mut validator_context, - &peer_a, - &message[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - vec![ - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_b), false), - - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_b), false), - ], - ); - - validator_context.clear(); - - let candidate_receipt = AbridgedCandidateReceipt::default(); - let statement = GossipMessage::Statement(GossipStatement { - relay_chain_leaf: hash_a, - signed_statement: SignedStatement { - statement: GenericStatement::Candidate(candidate_receipt), - signature: Sr25519Signature([255u8; 64]).into(), - sender: 1, - } - }); - let encoded = statement.encode(); - - let topic_a = attestation_topic(hash_a); - let topic_b = attestation_topic(hash_b); - let topic_c = attestation_topic(hash_c); - - // topic_a is in all 3 views -> succeed - let mut validation_data = MessageValidationData::default(); - validation_data.signing_context.parent_hash = hash_a; - validator.inner.write().attestation_view.new_local_leaf(validation_data); - // topic_b is in the neighbor's view but not ours -> fail - // topic_c is not in either -> fail - - { - let mut message_allowed = validator.message_allowed(); - let intent = MessageIntent::Broadcast; - assert!(message_allowed(&peer_a, intent, &topic_a, &encoded)); - assert!(!message_allowed(&peer_a, intent, &topic_b, &encoded)); - assert!(!message_allowed(&peer_a, intent, &topic_c, &encoded)); - } - } - - #[test] - fn too_many_chain_heads_is_report() { - let (tx, rx) = mpsc::channel(); - let tx = Mutex::new(tx); - let report_handle = Box::new(move |peer: &PeerId, cb: ReputationChange| tx.lock().send((peer.clone(), cb)).unwrap()); - let validator = MessageValidator::new_test( - TestChainContext::default(), - report_handle, - ); - - let peer_a = PeerId::random(); - - let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); - assert!(validator_context.events.is_empty()); - validator_context.clear(); - - let chain_heads = (0..MAX_CHAIN_HEADS+1).map(|i| [i as u8; 32].into()).collect(); - - let message = GossipMessage::from(NeighborPacket { - chain_heads, - }).encode(); - let res = validator.validate( - &mut validator_context, - &peer_a, - &message[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - Vec::new(), - ); - - drop(validator); - - assert_eq!(rx.iter().collect::>(), vec![(peer_a, cost::BAD_NEIGHBOR_PACKET)]); - } - - #[test] - fn statement_only_sent_when_candidate_known() { - let (tx, _rx) = mpsc::channel(); - let tx = Mutex::new(tx); - let report_handle = Box::new(move |peer: &PeerId, cb: ReputationChange| tx.lock().send((peer.clone(), cb)).unwrap()); - let validator = MessageValidator::new_test( - TestChainContext::default(), - report_handle, - ); - - let peer_a = PeerId::random(); - - let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); - assert!(validator_context.events.is_empty()); - validator_context.clear(); - - let hash_a = [1u8; 32].into(); - let hash_b = [2u8; 32].into(); - - let message = GossipMessage::from(NeighborPacket { - chain_heads: vec![hash_a, hash_b], - }).encode(); - - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &message[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - vec![ - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_b), false), - - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_b), false), - ], - ); - - validator_context.clear(); - } - - let mut validation_data = MessageValidationData::default(); - validation_data.signing_context.parent_hash = hash_a; - validator.inner.write().attestation_view.new_local_leaf(validation_data); - } - - #[test] - fn pov_block_message_allowed() { - let (tx, _rx) = mpsc::channel(); - let tx = Mutex::new(tx); - let report_handle = Box::new(move |peer: &PeerId, cb: ReputationChange| tx.lock().send((peer.clone(), cb)).unwrap()); - let validator = MessageValidator::new_test( - TestChainContext::default(), - report_handle, - ); - - let peer_a = PeerId::random(); - - let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); - assert!(validator_context.events.is_empty()); - validator_context.clear(); - - let hash_a = [1u8; 32].into(); - let hash_b = [2u8; 32].into(); - - let message = GossipMessage::from(NeighborPacket { - chain_heads: vec![hash_a, hash_b], - }).encode(); - - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &message[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - vec![ - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_b), false), - - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_b), false), - ], - ); - - validator_context.clear(); - } - - let topic_a = pov_block_topic(hash_a); - let c_hash = [99u8; 32].into(); - - let pov_block = PoVBlock { - block_data: BlockData(vec![1, 2, 3]), - }; - - let pov_block_hash = pov_block.hash(); - - let message = GossipMessage::PoVBlock(GossipPoVBlock { - relay_chain_leaf: hash_a, - candidate_hash: c_hash, - pov_block, - }); - let encoded = message.encode(); - let mut validation_data = MessageValidationData::default(); - validation_data.signing_context.parent_hash = hash_a; - validator.inner.write().attestation_view.new_local_leaf(validation_data); - - { - let mut message_allowed = validator.message_allowed(); - assert!(!message_allowed(&peer_a, MessageIntent::Broadcast, &topic_a, &encoded[..])); - } - - validator - .inner - .write() - .peers - .get_mut(&peer_a) - .unwrap() - .attestation - .note_aware_under_leaf( - &hash_a, - c_hash, - attestation::CandidateMeta { pov_block_hash }, - ); - - { - let mut message_allowed = validator.message_allowed(); - assert!(message_allowed(&peer_a, MessageIntent::Broadcast, &topic_a, &encoded[..])); - } - } - - #[test] - fn validate_pov_block_message() { - let (tx, _rx) = mpsc::channel(); - let tx = Mutex::new(tx); - let report_handle = Box::new(move |peer: &PeerId, cb: ReputationChange| tx.lock().send((peer.clone(), cb)).unwrap()); - let validator = MessageValidator::new_test( - TestChainContext::default(), - report_handle, - ); - - let peer_a = PeerId::random(); - - let mut validator_context = MockValidatorContext::default(); - validator.new_peer(&mut validator_context, &peer_a, ObservedRole::Full); - assert!(validator_context.events.is_empty()); - validator_context.clear(); - - let hash_a = [1u8; 32].into(); - let hash_b = [2u8; 32].into(); - - let message = GossipMessage::from(NeighborPacket { - chain_heads: vec![hash_a, hash_b], - }).encode(); - - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &message[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - vec![ - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), attestation_topic(hash_b), false), - - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_a), false), - ContextEvent::SendTopic(peer_a.clone(), pov_block_topic(hash_b), false), - ], - ); - - validator_context.clear(); - } - - let pov_topic = pov_block_topic(hash_a); - - let pov_block = PoVBlock { - block_data: BlockData(vec![1, 2, 3]), - }; - - let pov_block_hash = pov_block.hash(); - let c_hash = [99u8; 32].into(); - - let message = GossipMessage::PoVBlock(GossipPoVBlock { - relay_chain_leaf: hash_a, - candidate_hash: c_hash, - pov_block, - }); - - let bad_message = GossipMessage::PoVBlock(GossipPoVBlock { - relay_chain_leaf: hash_a, - candidate_hash: c_hash, - pov_block: PoVBlock { - block_data: BlockData(vec![4, 5, 6]), - }, - }); - - let encoded = message.encode(); - let bad_encoded = bad_message.encode(); - - let mut validation_data = MessageValidationData::default(); - validation_data.signing_context.parent_hash = hash_a; - validator.inner.write().attestation_view.new_local_leaf(validation_data); - - // before sending `Candidate` message, neither are allowed. - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &encoded[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - Vec::new(), - ); - - validator_context.clear(); - } - - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &bad_encoded[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - Vec::new(), - ); - - validator_context.clear(); - } - - validator.inner.write().attestation_view.note_aware_under_leaf( - &hash_a, - c_hash, - attestation::CandidateMeta { pov_block_hash }, - ); - - // now the good message passes and the others not. - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &encoded[..], - ); - - match res { - GossipValidationResult::ProcessAndKeep(topic) => assert_eq!(topic,pov_topic), - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - vec![ - ContextEvent::BroadcastMessage(pov_topic, encoded.clone(), false), - ], - ); - - validator_context.clear(); - } - - { - let res = validator.validate( - &mut validator_context, - &peer_a, - &bad_encoded[..], - ); - - match res { - GossipValidationResult::Discard => {}, - _ => panic!("wrong result"), - } - assert_eq!( - validator_context.events, - Vec::new(), - ); - - validator_context.clear(); - } - } -} diff --git a/network/src/legacy/local_collations.rs b/network/src/legacy/local_collations.rs deleted file mode 100644 index f1a6615e88b82cb044de6823be6b44d03f2363c3..0000000000000000000000000000000000000000 --- a/network/src/legacy/local_collations.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Local collations to be circulated to validators. -//! -//! Collations are attempted to be repropagated when a new validator connects, -//! a validator changes his session key, or when they are generated. - -use polkadot_primitives::{Hash, parachain::{ValidatorId}}; -use crate::legacy::collator_pool::Role; -use std::collections::{HashMap, HashSet}; -use std::time::Duration; -use wasm_timer::Instant; - -const LIVE_FOR: Duration = Duration::from_secs(60 * 5); - -struct LocalCollation { - targets: HashSet, - collation: C, - live_since: Instant, -} - -/// Tracker for locally collated values and which validators to send them to. -pub struct LocalCollations { - primary_for: HashSet, - local_collations: HashMap>, -} - -impl Default for LocalCollations { - fn default() -> Self { - Self::new() - } -} - -impl LocalCollations { - /// Create a new `LocalCollations` tracker. - pub fn new() -> Self { - LocalCollations { - primary_for: HashSet::new(), - local_collations: HashMap::new(), - } - } - - /// Validator gave us a new role. If the new role is "primary", this function might return - /// a set of collations to send to that validator. - pub fn note_validator_role(&mut self, key: ValidatorId, role: Role) -> Vec<(Hash, C)> { - match role { - Role::Backup => { - self.primary_for.remove(&key); - Vec::new() - } - Role::Primary => { - let new_primary = self.primary_for.insert(key.clone()); - if new_primary { - self.collations_targeting(&key) - } else { - Vec::new() - } - } - } - } - - /// Fresh session key from a validator. Returns a vector of collations to send - /// to the validator. - pub fn fresh_key(&mut self, old_key: &ValidatorId, new_key: &ValidatorId) -> Vec<(Hash, C)> { - if self.primary_for.remove(old_key) { - self.primary_for.insert(new_key.clone()); - - self.collations_targeting(new_key) - } else { - Vec::new() - } - } - - /// Validator disconnected. - pub fn on_disconnect(&mut self, key: &ValidatorId) { - self.primary_for.remove(key); - } - - /// Mark collations relevant to the given parent hash as obsolete. - pub fn collect_garbage(&mut self, relay_parent: Option<&Hash>) { - if let Some(relay_parent) = relay_parent { - self.local_collations.remove(relay_parent); - } - - let now = Instant::now(); - self.local_collations.retain(|_, v| v.live_since + LIVE_FOR > now); - } - - /// Add a collation. Returns an iterator of session keys to send to and lazy copies of the collation. - pub fn add_collation<'a>( - &'a mut self, - relay_parent: Hash, - targets: HashSet, - collation: C - ) - -> impl Iterator + 'a - { - self.local_collations.insert(relay_parent, LocalCollation { - targets, - collation, - live_since: Instant::now(), - }); - - let local = self.local_collations.get(&relay_parent) - .expect("just inserted to this key; qed"); - - let borrowed_collation = &local.collation; - local.targets - .intersection(&self.primary_for) - .map(move |k| (k.clone(), borrowed_collation.clone())) - } - - fn collations_targeting(&self, key: &ValidatorId) -> Vec<(Hash, C)> { - self.local_collations.iter() - .filter(|&(_, ref v)| v.targets.contains(key)) - .map(|(h, v)| (*h, v.collation.clone())) - .collect() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::crypto::UncheckedInto; - use polkadot_primitives::parachain::ValidatorId; - - #[test] - fn add_validator_with_ready_collation() { - let key: ValidatorId = [1; 32].unchecked_into(); - let relay_parent = [2; 32].into(); - let targets = { - let mut set = HashSet::new(); - set.insert(key.clone()); - set - }; - - let mut tracker = LocalCollations::new(); - assert!(tracker.add_collation(relay_parent, targets, 5).next().is_none()); - assert_eq!(tracker.note_validator_role(key, Role::Primary), vec![(relay_parent, 5)]); - } - - #[test] - fn rename_with_ready() { - let orig_key: ValidatorId = [1; 32].unchecked_into(); - let new_key: ValidatorId = [2; 32].unchecked_into(); - let relay_parent = [255; 32].into(); - let targets = { - let mut set = HashSet::new(); - set.insert(new_key.clone()); - set - }; - - let mut tracker: LocalCollations = LocalCollations::new(); - assert!(tracker.add_collation(relay_parent, targets, 5).next().is_none()); - assert!(tracker.note_validator_role(orig_key.clone(), Role::Primary).is_empty()); - assert_eq!(tracker.fresh_key(&orig_key, &new_key), vec![(relay_parent, 5u8)]); - } - - #[test] - fn collecting_garbage() { - let relay_parent_a = [255; 32].into(); - let relay_parent_b = [222; 32].into(); - - let mut tracker: LocalCollations = LocalCollations::new(); - assert!(tracker.add_collation(relay_parent_a, HashSet::new(), 5).next().is_none()); - assert!(tracker.add_collation(relay_parent_b, HashSet::new(), 69).next().is_none()); - - let live_since = Instant::now() - LIVE_FOR - Duration::from_secs(10); - tracker.local_collations.get_mut(&relay_parent_b).unwrap().live_since = live_since; - - tracker.collect_garbage(Some(&relay_parent_a)); - - // first one pruned because of relay parent, other because of time. - assert!(tracker.local_collations.is_empty()); - } - - #[test] - fn add_collation_with_connected_target() { - let key: ValidatorId = [1; 32].unchecked_into(); - let relay_parent = [2; 32].into(); - let targets = { - let mut set = HashSet::new(); - set.insert(key.clone()); - set - }; - - let mut tracker = LocalCollations::new(); - assert!(tracker.note_validator_role(key.clone(), Role::Primary).is_empty()); - assert_eq!(tracker.add_collation(relay_parent, targets, 5).next(), Some((key, 5))); - - } -} diff --git a/network/src/legacy/mod.rs b/network/src/legacy/mod.rs deleted file mode 100644 index 28ea77a6bdc10cac191bb184ae70f4f234071602..0000000000000000000000000000000000000000 --- a/network/src/legacy/mod.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Polkadot-specific network implementation. -//! -//! This manages routing for parachain statements, parachain block and outgoing message -//! data fetching, communication between collators and validators, and more. - -pub mod collator_pool; -pub mod local_collations; -pub mod gossip; - -use codec::Decode; -use futures::prelude::*; -use polkadot_primitives::Hash; -use sc_network::PeerId; -use sc_network_gossip::TopicNotification; -use log::debug; - -use std::pin::Pin; -use std::task::{Context as PollContext, Poll}; - -use self::gossip::GossipMessage; - -/// Basic gossip functionality that a network has to fulfill. -pub trait GossipService { - /// Get a stream of gossip messages for a given hash. - fn gossip_messages_for(&self, topic: Hash) -> GossipMessageStream; - - /// Gossip a message on given topic. - fn gossip_message(&self, topic: Hash, message: GossipMessage); - - /// Send a message to a specific peer we're connected to. - fn send_message(&self, who: PeerId, message: GossipMessage); -} - -/// A stream of gossip messages and an optional sender for a topic. -pub struct GossipMessageStream { - topic_stream: Pin + Send>>, -} - -impl GossipMessageStream { - /// Create a new instance with the given topic stream. - pub fn new(topic_stream: Pin + Send>>) -> Self { - Self { - topic_stream, - } - } -} - -impl Stream for GossipMessageStream { - type Item = (GossipMessage, Option); - - fn poll_next(self: Pin<&mut Self>, cx: &mut PollContext) -> Poll> { - let this = Pin::into_inner(self); - - loop { - let msg = match Pin::new(&mut this.topic_stream).poll_next(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - }; - - debug!(target: "validation", "Processing statement for live validation leaf-work"); - if let Ok(gmsg) = GossipMessage::decode(&mut &msg.message[..]) { - return Poll::Ready(Some((gmsg, msg.sender))) - } - } - } -} diff --git a/network/src/lib.rs b/network/src/lib.rs deleted file mode 100644 index 5048f09adaf51d17dd108686f783cb7c7d41d037..0000000000000000000000000000000000000000 --- a/network/src/lib.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! High-level network protocols for Polkadot. -//! -//! This manages routing for parachain statements, parachain block and outgoing message -//! data fetching, communication between collators and validators, and more. - -#![recursion_limit="256"] - -use polkadot_primitives::{Block, Hash, BlakeTwo256, HashT}; - -pub mod legacy; -pub mod protocol; - -/// Specialization of the network service for the polkadot block type. -pub type PolkadotNetworkService = sc_network::NetworkService; - -mod cost { - use sc_network::ReputationChange as Rep; - pub(super) const UNEXPECTED_MESSAGE: Rep = Rep::new(-200, "Polkadot: Unexpected message"); - pub(super) const INVALID_FORMAT: Rep = Rep::new(-200, "Polkadot: Bad message"); - - pub(super) const UNKNOWN_PEER: Rep = Rep::new(-50, "Polkadot: Unknown peer"); - pub(super) const BAD_COLLATION: Rep = Rep::new(-1000, "Polkadot: Bad collation"); -} - -mod benefit { - use sc_network::ReputationChange as Rep; - pub(super) const VALID_FORMAT: Rep = Rep::new(20, "Polkadot: Valid message format"); - pub(super) const GOOD_COLLATION: Rep = Rep::new(100, "Polkadot: Good collation"); -} - -/// Compute gossip topic for the erasure chunk messages given the hash of the -/// candidate they correspond to. -fn erasure_coding_topic(candidate_hash: &Hash) -> Hash { - let mut v = candidate_hash.as_ref().to_vec(); - v.extend(b"erasure_chunks"); - - BlakeTwo256::hash(&v[..]) -} diff --git a/network/src/protocol/mod.rs b/network/src/protocol/mod.rs deleted file mode 100644 index 0ed2d9ac4395014667b91d201161127a9455df5d..0000000000000000000000000000000000000000 --- a/network/src/protocol/mod.rs +++ /dev/null @@ -1,1555 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Polkadot-specific base networking protocol. -//! -//! This is implemented using the `sc-network` APIs for futures-based -//! notifications protocols. In some cases, we emulate request/response on top -//! of the notifications machinery, which is slightly less efficient but not -//! meaningfully so. -//! -//! We handle events from `sc-network` in a thin wrapper that forwards to a -//! background worker, which also handles commands from other parts of the node. - -use arrayvec::ArrayVec; -use codec::{Decode, Encode}; -use futures::channel::{mpsc, oneshot}; -use futures::future::Either; -use futures::prelude::*; -use futures::task::{Spawn, SpawnExt, Context, Poll}; -use futures::stream::{FuturesUnordered, StreamFuture}; -use log::{debug, trace}; - -use polkadot_primitives::{ - Hash, Block, - parachain::{ - PoVBlock, ValidatorId, ValidatorIndex, Collation, AbridgedCandidateReceipt, - ErasureChunk, ParachainHost, Id as ParaId, CollatorId, - }, -}; -use polkadot_validation::{ - SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators, - SignedStatement, -}; -use sc_network::{ObservedRole, Event, PeerId}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::ConsensusEngineId; - -use std::collections::{hash_map::{Entry, HashMap}, HashSet}; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; - -use super::{cost, benefit, PolkadotNetworkService}; -use crate::legacy::collator_pool::Role as CollatorRole; -use crate::legacy::gossip::{GossipMessage, ErasureChunkMessage, RegisteredMessageValidator}; - -/// The current protocol version. -pub const VERSION: u32 = 1; -/// The minimum supported protocol version. -pub const MIN_SUPPORTED_VERSION: u32 = 1; - -/// The engine ID of the polkadot network protocol. -pub const POLKADOT_ENGINE_ID: ConsensusEngineId = *b"dot2"; -/// The protocol name. -pub const POLKADOT_PROTOCOL_NAME: &[u8] = b"/polkadot/1"; - -pub use crate::legacy::gossip::ChainContext; - -#[cfg(test)] -mod tests; - -// Messages from the service API or network adapter. -enum ServiceToWorkerMsg { - // basic peer messages. - PeerConnected(PeerId, ObservedRole), - PeerMessage(PeerId, Vec), - PeerDisconnected(PeerId), - - // service messages. - BuildConsensusNetworking(mpsc::Receiver, Arc, Vec), - SubmitValidatedCollation( - AbridgedCandidateReceipt, - PoVBlock, - (ValidatorIndex, Vec), - ), - FetchPoVBlock( - AbridgedCandidateReceipt, - oneshot::Sender, - ), - FetchErasureChunk( - Hash, // candidate-hash. - u32, // validator index. - oneshot::Sender, - ), - DistributeErasureChunk( - Hash, // candidate-hash, - ErasureChunk, - ), - AwaitCollation( - Hash, // relay-parent, - ParaId, - oneshot::Sender, - ), - NoteBadCollator( - CollatorId, - ), - RegisterAvailabilityStore( - av_store::Store, - ), - OurCollation( - HashSet, - Collation, - ), - ListenCheckedStatements( - Hash, // relay-parent, - oneshot::Sender + Send>>>, - ), - - /// Used in tests to ensure that all other messages sent from the same - /// thread have been flushed. Also executes arbitrary logic with the protocl - /// handler. - #[cfg(test)] - Synchronize(Box), -} - -/// Messages from a background task to the main worker task. -enum BackgroundToWorkerMsg { - // Spawn a given future. - Spawn(future::BoxFuture<'static, ()>), -} - -/// Operations that a handle to an underlying network service should provide. -pub trait NetworkServiceOps: Send + Sync { - /// Report the peer as having a particular positive or negative value. - fn report_peer(&self, peer: PeerId, value: sc_network::ReputationChange); - - /// Write a notification to a given peer. - fn write_notification( - &self, - peer: PeerId, - engine_id: ConsensusEngineId, - notification: Vec, - ); -} - -impl NetworkServiceOps for PolkadotNetworkService { - fn report_peer(&self, peer: PeerId, value: sc_network::ReputationChange) { - PolkadotNetworkService::report_peer(self, peer, value); - } - - fn write_notification( - &self, - peer: PeerId, - engine_id: ConsensusEngineId, - notification: Vec, - ) { - PolkadotNetworkService::write_notification(self, peer, engine_id, notification); - } -} - -/// Operations that a handle to a gossip network should provide. -trait GossipOps: Clone + Send + crate::legacy::GossipService + 'static { - fn new_local_leaf( - &self, - validation_data: crate::legacy::gossip::MessageValidationData, - ) -> crate::legacy::gossip::NewLeafActions; - - /// Register an availability store in the gossip service to evaluate incoming - /// messages with. - fn register_availability_store( - &self, - store: av_store::Store, - ); -} - -impl GossipOps for RegisteredMessageValidator { - fn new_local_leaf( - &self, - validation_data: crate::legacy::gossip::MessageValidationData, - ) -> crate::legacy::gossip::NewLeafActions { - RegisteredMessageValidator::new_local_leaf( - self, - validation_data, - ) - } - - fn register_availability_store( - &self, - store: av_store::Store, - ) { - RegisteredMessageValidator::register_availability_store(self, store); - } -} - -/// An async handle to the network service. -pub struct Service { - sender: mpsc::Sender, - network_service: Arc, -} - -impl Clone for Service { - fn clone(&self) -> Self { - Self { - sender: self.sender.clone(), - network_service: self.network_service.clone(), - } - } -} - -/// Registers the protocol. -/// -/// You are very strongly encouraged to call this method very early on. Any connection open -/// will retain the protocols that were registered then, and not any new one. -pub fn start( - service: Arc, - config: Config, - chain_context: C, - api: Arc, - executor: SP, -) -> Result, futures::task::SpawnError> where - C: ChainContext + 'static, - Api: ProvideRuntimeApi + Send + Sync + 'static, - Api::Api: ParachainHost, - SP: Spawn + Clone + Send + 'static, -{ - const SERVICE_TO_WORKER_BUF: usize = 256; - - let mut event_stream = service.event_stream("polkadot-network"); - service.register_notifications_protocol(POLKADOT_ENGINE_ID, POLKADOT_PROTOCOL_NAME); - let (mut worker_sender, worker_receiver) = mpsc::channel(SERVICE_TO_WORKER_BUF); - - let gossip_validator = crate::legacy::gossip::register_validator( - service.clone(), - chain_context, - &executor, - ); - executor.spawn(worker_loop( - config, - service.clone(), - gossip_validator, - api, - worker_receiver, - executor.clone(), - ))?; - - let polkadot_service = Service { - sender: worker_sender.clone(), - network_service: service.clone(), - }; - - executor.spawn(async move { - while let Some(event) = event_stream.next().await { - let res = match event { - Event::Dht(_) => continue, - Event::NotificationStreamOpened { - remote, - engine_id, - role, - } => { - if engine_id != POLKADOT_ENGINE_ID { continue } - - worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, role)).await - }, - Event::NotificationStreamClosed { - remote, - engine_id, - } => { - if engine_id != POLKADOT_ENGINE_ID { continue } - - worker_sender.send(ServiceToWorkerMsg::PeerDisconnected(remote)).await - }, - Event::NotificationsReceived { - remote, - messages, - } => { - let our_notifications = messages.into_iter() - .filter_map(|(engine, message)| if engine == POLKADOT_ENGINE_ID { - Some(message) - } else { - None - }) - .collect(); - - worker_sender.send( - ServiceToWorkerMsg::PeerMessage(remote, our_notifications) - ).await - } - }; - - if let Err(e) = res { - // full is impossible here, as we've `await`ed the value being sent. - if e.is_disconnected() { - break - } - } - } - })?; - - Ok(polkadot_service) -} - -/// The Polkadot protocol status message. -#[derive(Debug, Encode, Decode, PartialEq)] -pub struct Status { - version: u32, // protocol version. - collating_for: Option<(CollatorId, ParaId)>, -} - -/// Polkadot-specific messages from peer to peer. -#[derive(Debug, Encode, Decode, PartialEq)] -pub enum Message { - /// Exchange status with a peer. This should be the first message sent. - #[codec(index = "0")] - Status(Status), - /// Inform a peer of their role as a collator. May only be sent after - /// validator ID. - #[codec(index = "1")] - CollatorRole(CollatorRole), - /// Send a collation. - #[codec(index = "2")] - Collation(Hash, Collation), - /// Inform a peer of a new validator public key. - #[codec(index = "3")] - ValidatorId(ValidatorId), -} - -// ensures collator-protocol messages are sent in correct order. -// session key must be sent before collator role. -enum CollatorState { - Fresh, - RolePending(CollatorRole), - Primed(Option), -} - -impl CollatorState { - fn send_key(&mut self, key: ValidatorId, mut f: F) { - f(Message::ValidatorId(key)); - match self { - CollatorState::RolePending(role) => { - f(Message::CollatorRole(*role)); - *self = CollatorState::Primed(Some(*role)); - }, - CollatorState::Fresh => { - *self = CollatorState::Primed(None); - }, - CollatorState::Primed(_) => {}, - } - } - - fn set_role(&mut self, role: CollatorRole, mut f: F) { - if let CollatorState::Primed(ref mut r) = *self { - f(Message::CollatorRole(role)); - *r = Some(role); - } else { - *self = CollatorState::RolePending(role); - } - } -} - -enum ProtocolState { - Fresh, - Ready(Status, CollatorState), -} - -struct PeerData { - claimed_validator: bool, - protocol_state: ProtocolState, - session_keys: RecentValidatorIds, -} - -impl PeerData { - fn ready_and_collating_for(&self) -> Option<(CollatorId, ParaId)> { - match self.protocol_state { - ProtocolState::Ready(ref status, _) => status.collating_for.clone(), - _ => None, - } - } - - fn collator_state_mut(&mut self) -> Option<&mut CollatorState> { - match self.protocol_state { - ProtocolState::Ready(_, ref mut c_state) => Some(c_state), - _ => None, - } - } - - fn should_send_key(&self) -> bool { - self.claimed_validator || self.ready_and_collating_for().is_some() - } -} - -struct ConsensusNetworkingInstance { - statement_table: Arc, - relay_parent: Hash, - attestation_topic: Hash, - _drop_signal: exit_future::Signal, -} - -/// A utility future that resolves when the receiving end of a channel has hung up. -/// -/// This is an `.await`-friendly interface around `poll_canceled`. -// TODO: remove in favor of https://github.com/rust-lang/futures-rs/pull/2092/ -// once published. -#[must_use = "futures do nothing unless you `.await` or poll them"] -#[derive(Debug)] -pub struct AwaitCanceled<'a, T> { - inner: &'a mut oneshot::Sender, -} - -impl Future for AwaitCanceled<'_, T> { - type Output = (); - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut futures::task::Context<'_>, - ) -> futures::task::Poll<()> { - self.inner.poll_canceled(cx) - } -} - -/// Protocol configuration. -#[derive(Default)] -pub struct Config { - /// Which collator-id to use when collating, and on which parachain. - /// `None` if not collating. - pub collating_for: Option<(CollatorId, ParaId)>, -} - -// 3 is chosen because sessions change infrequently and usually -// only the last 2 (current session and "last" session) are relevant. -// the extra is an error boundary. -const RECENT_SESSIONS: usize = 3; - -/// Result when inserting recent session key. -#[derive(PartialEq, Eq)] -pub(crate) enum InsertedRecentKey { - /// Key was already known. - AlreadyKnown, - /// Key was new and pushed out optional old item. - New(Option), -} - -/// Wrapper for managing recent session keys. -#[derive(Default)] -struct RecentValidatorIds { - inner: ArrayVec<[ValidatorId; RECENT_SESSIONS]>, -} - -impl RecentValidatorIds { - /// Insert a new session key. This returns one to be pushed out if the - /// set is full. - fn insert(&mut self, key: ValidatorId) -> InsertedRecentKey { - if self.inner.contains(&key) { return InsertedRecentKey::AlreadyKnown } - - let old = if self.inner.len() == RECENT_SESSIONS { - Some(self.inner.remove(0)) - } else { - None - }; - - self.inner.push(key); - InsertedRecentKey::New(old) - } - - /// As a slice. Most recent is last. - fn as_slice(&self) -> &[ValidatorId] { - &*self.inner - } - - /// Returns the last inserted session key. - fn latest(&self) -> Option<&ValidatorId> { - self.inner.last() - } -} - -struct ProtocolHandler { - service: Arc, - peers: HashMap, - // reverse mapping from validator-ID to PeerID. Multiple peers can represent - // the same validator because of sentry nodes. - connected_validators: HashMap>, - consensus_instances: HashMap, - collators: crate::legacy::collator_pool::CollatorPool, - local_collations: crate::legacy::local_collations::LocalCollations, - config: Config, - local_keys: RecentValidatorIds, -} - -impl ProtocolHandler { - fn new( - service: Arc, - config: Config, - ) -> Self { - ProtocolHandler { - service, - peers: HashMap::new(), - connected_validators: HashMap::new(), - consensus_instances: HashMap::new(), - collators: Default::default(), - local_collations: Default::default(), - local_keys: Default::default(), - config, - } - } - - fn on_connect(&mut self, peer: PeerId, role: ObservedRole) { - let claimed_validator = matches!( - role, - ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority | ObservedRole::Authority - ); - - self.peers.insert(peer.clone(), PeerData { - claimed_validator, - protocol_state: ProtocolState::Fresh, - session_keys: Default::default(), - }); - - let status = Message::Status(Status { - version: VERSION, - collating_for: self.config.collating_for.clone(), - }).encode(); - - self.service.write_notification(peer, POLKADOT_ENGINE_ID, status); - } - - fn on_disconnect(&mut self, peer: PeerId) { - let mut new_primary = None; - if let Some(data) = self.peers.remove(&peer) { - // replace collator. - if let Some((collator_id, _)) = data.ready_and_collating_for() { - if self.collators.collator_id_to_peer_id(&collator_id) == Some(&peer) { - new_primary = self.collators.on_disconnect(collator_id); - } - } - - // clean up stated validator IDs. - for validator_id in data.session_keys.as_slice().iter().cloned() { - self.validator_representative_removed(validator_id, &peer); - } - } - - let service = &self.service; - let peers = &mut self.peers; - if let Some(new_primary) = new_primary { - let new_primary_peer_id = match self.collators.collator_id_to_peer_id(&new_primary) { - None => return, - Some(p) => p.clone(), - }; - if let Some(c_state) = peers.get_mut(&new_primary_peer_id) - .and_then(|p| p.collator_state_mut()) - { - c_state.set_role( - CollatorRole::Primary, - |msg| service.write_notification( - new_primary_peer_id.clone(), - POLKADOT_ENGINE_ID, - msg.encode(), - ), - ); - } - } - } - - fn on_raw_messages(&mut self, remote: PeerId, messages: Vec) { - for raw_message in messages { - match Message::decode(&mut raw_message.as_ref()) { - Ok(message) => { - self.service.report_peer(remote.clone(), benefit::VALID_FORMAT); - match message { - Message::Status(status) => { - self.on_status(remote.clone(), status); - } - Message::CollatorRole(role) => { - self.on_collator_role(remote.clone(), role) - } - Message::Collation(relay_parent, collation) => { - self.on_remote_collation(remote.clone(), relay_parent, collation); - } - Message::ValidatorId(session_key) => { - self.on_validator_id(remote.clone(), session_key) - } - } - }, - Err(_) => self.service.report_peer(remote.clone(), cost::INVALID_FORMAT), - } - } - } - - fn on_status(&mut self, remote: PeerId, status: Status) { - let peer = match self.peers.get_mut(&remote) { - None => { self.service.report_peer(remote, cost::UNKNOWN_PEER); return } - Some(p) => p, - }; - - match peer.protocol_state { - ProtocolState::Fresh => { - peer.protocol_state = ProtocolState::Ready(status, CollatorState::Fresh); - if let Some((collator_id, para_id)) = peer.ready_and_collating_for() { - let collator_attached = self.collators - .collator_id_to_peer_id(&collator_id) - .map_or(false, |id| id != &remote); - - // we only care about the first connection from this collator. - if !collator_attached { - let role = self.collators - .on_new_collator(collator_id, para_id, remote.clone()); - let service = &self.service; - let send_key = peer.should_send_key(); - - if let Some(c_state) = peer.collator_state_mut() { - if send_key { - if let Some(key) = self.local_keys.latest() { - c_state.send_key(key.clone(), |msg| service.write_notification( - remote.clone(), - POLKADOT_ENGINE_ID, - msg.encode(), - )); - } - } - - c_state.set_role(role, |msg| service.write_notification( - remote.clone(), - POLKADOT_ENGINE_ID, - msg.encode(), - )); - } - } - } - } - ProtocolState::Ready(_, _) => { - self.service.report_peer(remote, cost::UNEXPECTED_MESSAGE); - } - } - } - - fn on_remote_collation(&mut self, remote: PeerId, relay_parent: Hash, collation: Collation) { - let peer = match self.peers.get_mut(&remote) { - None => { self.service.report_peer(remote, cost::UNKNOWN_PEER); return } - Some(p) => p, - }; - - let (collator_id, para_id) = match peer.ready_and_collating_for() { - None => { - self.service.report_peer(remote, cost::UNEXPECTED_MESSAGE); - return - } - Some(x) => x, - }; - - let collation_para = collation.info.parachain_index; - let collated_acc = collation.info.collator.clone(); - - let structurally_valid = para_id == collation_para && collator_id == collated_acc; - if structurally_valid && collation.info.check_signature().is_ok() { - debug!(target: "p_net", "Received collation for parachain {:?} from peer {}", - para_id, remote); - - if self.collators.collator_id_to_peer_id(&collator_id) == Some(&remote) { - self.collators.on_collation(collator_id, relay_parent, collation); - self.service.report_peer(remote, benefit::GOOD_COLLATION); - } - } else { - self.service.report_peer(remote, cost::INVALID_FORMAT); - } - } - - fn on_collator_role(&mut self, remote: PeerId, role: CollatorRole) { - let collations_to_send; - - { - let peer = match self.peers.get_mut(&remote) { - None => { self.service.report_peer(remote, cost::UNKNOWN_PEER); return } - Some(p) => p, - }; - - match peer.protocol_state { - ProtocolState::Fresh => { - self.service.report_peer(remote, cost::UNEXPECTED_MESSAGE); - return; - } - ProtocolState::Ready(_, _) => { - let last_key = match peer.session_keys.as_slice().last() { - None => { - self.service.report_peer(remote, cost::UNEXPECTED_MESSAGE); - return; - } - Some(k) => k, - }; - - collations_to_send = self.local_collations - .note_validator_role(last_key.clone(), role); - } - } - } - - send_peer_collations(&*self.service, remote, collations_to_send); - } - - fn on_validator_id(&mut self, remote: PeerId, key: ValidatorId) { - let mut collations_to_send = Vec::new(); - let mut invalidated_key = None; - - { - let peer = match self.peers.get_mut(&remote) { - None => { self.service.report_peer(remote, cost::UNKNOWN_PEER); return } - Some(p) => p, - }; - - match peer.protocol_state { - ProtocolState::Fresh => { - self.service.report_peer(remote, cost::UNEXPECTED_MESSAGE); - return - } - ProtocolState::Ready(_, _) => { - if let InsertedRecentKey::New(Some(last)) = peer.session_keys.insert(key.clone()) { - collations_to_send = self.local_collations.fresh_key(&last, &key); - invalidated_key = Some(last); - } - } - } - } - - if let Some(invalidated) = invalidated_key { - self.validator_representative_removed(invalidated, &remote); - } - self.connected_validators.entry(key).or_insert_with(HashSet::new).insert(remote.clone()); - - send_peer_collations(&*self.service, remote, collations_to_send); - } - - // call when the given peer no longer represents the given validator key. - // - // this can occur when the peer advertises a new key, invalidating an old one, - // or when the peer disconnects. - fn validator_representative_removed(&mut self, validator_id: ValidatorId, peer_id: &PeerId) { - if let Entry::Occupied(mut entry) = self.connected_validators.entry(validator_id) { - entry.get_mut().remove(peer_id); - if entry.get().is_empty() { - let _ = entry.remove_entry(); - } - } - } - - fn await_collation( - &mut self, - relay_parent: Hash, - para_id: ParaId, - sender: oneshot::Sender, - ) { - self.collators.await_collation(relay_parent, para_id, sender); - } - - fn collect_garbage(&mut self) { - self.collators.collect_garbage(None); - self.local_collations.collect_garbage(None); - } - - fn note_bad_collator(&mut self, who: CollatorId) { - if let Some(peer) = self.collators.collator_id_to_peer_id(&who) { - self.service.report_peer(peer.clone(), cost::BAD_COLLATION); - } - } - - // distribute a new session key to any relevant peers. - fn distribute_new_session_key(&mut self, key: ValidatorId) { - let service = &self.service; - - for (peer_id, peer) in self.peers.iter_mut() { - if !peer.should_send_key() { continue } - - if let Some(c_state) = peer.collator_state_mut() { - c_state.send_key(key.clone(), |msg| service.write_notification( - peer_id.clone(), - POLKADOT_ENGINE_ID, - msg.encode(), - )); - } - } - } - - // distribute our (as a collator node) collation to peers. - fn distribute_our_collation(&mut self, targets: HashSet, collation: Collation) { - let relay_parent = collation.info.relay_parent; - let distribution = self.local_collations.add_collation(relay_parent, targets, collation); - - for (validator, collation) in distribution { - let validator_representatives = self.connected_validators.get(&validator) - .into_iter().flat_map(|reps| reps); - - for remote in validator_representatives { - send_peer_collations( - &*self.service, - remote.clone(), - std::iter::once((relay_parent, collation.clone())), - ); - } - } - } - - fn drop_consensus_networking(&mut self, relay_parent: &Hash) { - // this triggers an abort of the background task. - self.consensus_instances.remove(relay_parent); - } -} - -fn send_peer_collations( - service: &dyn NetworkServiceOps, - remote: PeerId, - collations: impl IntoIterator, -) { - for (relay_parent, collation) in collations { - service.write_notification( - remote.clone(), - POLKADOT_ENGINE_ID, - Message::Collation(relay_parent, collation).encode(), - ); - } -} - -/// Receives messages associated to a certain consensus networking instance. -struct ConsensusNetworkingReceiver { - receiver: mpsc::Receiver, - /// The relay parent of this consensus network. - relay_parent: Hash, -} - -impl Stream for ConsensusNetworkingReceiver { - type Item = ServiceToWorkerMsg; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.receiver).poll_next(cx) - } -} - -struct Worker { - protocol_handler: ProtocolHandler, - api: Arc, - executor: Sp, - gossip_handle: Gossip, - background_to_main_sender: mpsc::Sender, - background_receiver: mpsc::Receiver, - service_receiver: mpsc::Receiver, - consensus_networking_receivers: FuturesUnordered>, -} - -impl Worker where - Api: ProvideRuntimeApi + Send + Sync + 'static, - Api::Api: ParachainHost, - Sp: Spawn + Clone, - Gossip: GossipOps, -{ - // spawns a background task to spawn consensus networking. - fn build_consensus_networking( - &mut self, - receiver: mpsc::Receiver, - table: Arc, - authorities: Vec, - ) { - // glue: let gossip know about our new local leaf. - let (signal, exit) = exit_future::signal(); - - let key = table.session_key(); - if let Some(key) = key { - if let InsertedRecentKey::New(_) = self.protocol_handler.local_keys.insert(key.clone()) { - self.protocol_handler.distribute_new_session_key(key); - } - } - - let signing_context = table.signing_context().clone(); - let relay_parent = signing_context.parent_hash.clone(); - let new_leaf_actions = self.gossip_handle.new_local_leaf( - crate::legacy::gossip::MessageValidationData { authorities, signing_context }, - ); - - new_leaf_actions.perform(&self.gossip_handle); - - self.protocol_handler.consensus_instances.insert( - relay_parent.clone(), - ConsensusNetworkingInstance { - statement_table: table.clone(), - relay_parent: relay_parent.clone(), - attestation_topic: crate::legacy::gossip::attestation_topic(relay_parent.clone()), - _drop_signal: signal, - }, - ); - - let relay_parent = table.signing_context().parent_hash; - self.consensus_networking_receivers.push(ConsensusNetworkingReceiver { receiver, relay_parent }.into_future()); - - // glue the incoming messages, shared table, and validation - // work together. - let _ = self.executor.spawn(statement_import_loop( - relay_parent, - table, - self.api.clone(), - self.gossip_handle.clone(), - self.background_to_main_sender.clone(), - exit, - )); - } - - fn handle_service_message(&mut self, message: ServiceToWorkerMsg) { - match message { - ServiceToWorkerMsg::PeerConnected(remote, role) => { - self.protocol_handler.on_connect(remote, role); - } - ServiceToWorkerMsg::PeerDisconnected(remote) => { - self.protocol_handler.on_disconnect(remote); - } - ServiceToWorkerMsg::PeerMessage(remote, messages) => { - self.protocol_handler.on_raw_messages(remote, messages) - } - ServiceToWorkerMsg::BuildConsensusNetworking(receiver, table, authorities) => { - self.build_consensus_networking(receiver, table, authorities); - } - ServiceToWorkerMsg::SubmitValidatedCollation(receipt, pov_block, chunks) => { - let relay_parent = receipt.relay_parent; - let instance = match self.protocol_handler.consensus_instances.get(&relay_parent) { - None => return, - Some(instance) => instance, - }; - - distribute_validated_collation( - instance, - receipt, - pov_block, - chunks, - &self.gossip_handle, - ); - } - ServiceToWorkerMsg::FetchPoVBlock(candidate, mut sender) => { - // The gossip system checks that the correct pov-block data is present - // before placing in the pool, so we can safely check by candidate hash. - let get_msg = fetch_pov_from_gossip(&candidate, &self.gossip_handle); - - let _ = self.executor.spawn(async move { - let res = future::select(get_msg, AwaitCanceled { inner: &mut sender }).await; - if let Either::Left((pov_block, _)) = res { - let _ = sender.send(pov_block); - } - }); - } - ServiceToWorkerMsg::FetchErasureChunk(candidate_hash, validator_index, mut sender) => { - let topic = crate::erasure_coding_topic(&candidate_hash); - - // for every erasure-root, relay-parent pair, there should only be one - // valid chunk with the given index. - // - // so we only care about the first item of the filtered stream. - let get_msg = self.gossip_handle.gossip_messages_for(topic) - .filter_map(move |(msg, _)| { - future::ready(match msg { - GossipMessage::ErasureChunk(chunk) => - if chunk.chunk.index == validator_index { - Some(chunk.chunk) - } else { - None - }, - _ => None, - }) - }) - .into_future() - .map(|(item, _)| item.expect( - "gossip message streams do not conclude early; qed" - )); - - let _ = self.executor.spawn(async move { - let res = future::select(get_msg, AwaitCanceled { inner: &mut sender }).await; - if let Either::Left((chunk, _)) = res { - let _ = sender.send(chunk); - } - }); - } - ServiceToWorkerMsg::DistributeErasureChunk(candidate_hash, erasure_chunk) => { - let topic = crate::erasure_coding_topic(&candidate_hash); - self.gossip_handle.gossip_message( - topic, - GossipMessage::ErasureChunk(ErasureChunkMessage { - chunk: erasure_chunk, - candidate_hash, - }) - ); - } - ServiceToWorkerMsg::AwaitCollation(relay_parent, para_id, sender) => { - debug!( - target: "p_net", "Attempting to get collation for parachain {:?} on relay parent {:?}", - para_id, - relay_parent, - ); - self.protocol_handler.await_collation(relay_parent, para_id, sender) - } - ServiceToWorkerMsg::NoteBadCollator(collator) => { - self.protocol_handler.note_bad_collator(collator); - } - ServiceToWorkerMsg::RegisterAvailabilityStore(store) => { - self.gossip_handle.register_availability_store(store); - } - ServiceToWorkerMsg::OurCollation(targets, collation) => { - self.protocol_handler.distribute_our_collation(targets, collation); - } - ServiceToWorkerMsg::ListenCheckedStatements(relay_parent, sender) => { - let topic = crate::legacy::gossip::attestation_topic(relay_parent); - let checked_messages = self.gossip_handle.gossip_messages_for(topic) - .filter_map(|msg| match msg.0 { - GossipMessage::Statement(s) => future::ready(Some(s.signed_statement)), - _ => future::ready(None), - }) - .boxed(); - - let _ = sender.send(checked_messages); - } - #[cfg(test)] - ServiceToWorkerMsg::Synchronize(callback) => { - (callback)(&mut self.protocol_handler) - } - } - } - - fn handle_background_message(&mut self, message: BackgroundToWorkerMsg) { - match message { - BackgroundToWorkerMsg::Spawn(task) => { - let _ = self.executor.spawn(task); - } - } - } - - async fn main_loop(&mut self) { - const COLLECT_GARBAGE_INTERVAL: Duration = Duration::from_secs(29); - - let mut collect_garbage = stream::unfold((), move |_| { - futures_timer::Delay::new(COLLECT_GARBAGE_INTERVAL).map(|_| Some(((), ()))) - }).map(drop); - - loop { - futures::select! { - _do_collect = collect_garbage.next() => { - self.protocol_handler.collect_garbage(); - } - service_msg = self.service_receiver.next() => match service_msg { - Some(msg) => self.handle_service_message(msg), - None => return, - }, - consensus_service_msg = self.consensus_networking_receivers.next() => match consensus_service_msg { - Some((Some(msg), receiver)) => { - self.handle_service_message(msg); - self.consensus_networking_receivers.push(receiver.into_future()); - }, - Some((None, receiver)) => { - self.protocol_handler.drop_consensus_networking(&receiver.relay_parent); - }, - None => {}, - }, - background_msg = self.background_receiver.next() => match background_msg { - Some(msg) => self.handle_background_message(msg), - None => return, - }, - } - } - } -} - -async fn worker_loop( - config: Config, - service: Arc, - gossip_handle: impl GossipOps, - api: Arc, - receiver: mpsc::Receiver, - executor: Sp, -) where - Api: ProvideRuntimeApi + Send + Sync + 'static, - Api::Api: ParachainHost, - Sp: Spawn + Clone, -{ - const BACKGROUND_TO_MAIN_BUF: usize = 16; - - let (background_tx, background_rx) = mpsc::channel(BACKGROUND_TO_MAIN_BUF); - let mut worker = Worker { - protocol_handler: ProtocolHandler::new(service, config), - api, - executor, - gossip_handle, - background_to_main_sender: background_tx, - background_receiver: background_rx, - service_receiver: receiver, - consensus_networking_receivers: Default::default(), - }; - - worker.main_loop().await -} - -// A unique trace for valid statements issued by a validator. -#[derive(Hash, PartialEq, Eq, Clone, Debug)] -pub(crate) enum StatementTrace { - Valid(ValidatorIndex, Hash), - Invalid(ValidatorIndex, Hash), -} - -/// Helper for deferring statements whose associated candidate is unknown. -struct DeferredStatements { - deferred: HashMap>, - known_traces: HashSet, -} - -impl DeferredStatements { - /// Create a new `DeferredStatements`. - fn new() -> Self { - DeferredStatements { - deferred: HashMap::new(), - known_traces: HashSet::new(), - } - } - - /// Push a new statement onto the deferred pile. `Candidate` statements - /// cannot be deferred and are ignored. - fn push(&mut self, statement: SignedStatement) { - let (hash, trace) = match statement.statement { - GenericStatement::Candidate(_) => return, - GenericStatement::Valid(hash) => (hash, StatementTrace::Valid(statement.sender.clone(), hash)), - GenericStatement::Invalid(hash) => (hash, StatementTrace::Invalid(statement.sender.clone(), hash)), - }; - - if self.known_traces.insert(trace) { - self.deferred.entry(hash).or_insert_with(Vec::new).push(statement); - } - } - - /// Take all deferred statements referencing the given candidate hash out. - fn take_deferred(&mut self, hash: &Hash) -> (Vec, Vec) { - match self.deferred.remove(hash) { - None => (Vec::new(), Vec::new()), - Some(deferred) => { - let mut traces = Vec::new(); - for statement in deferred.iter() { - let trace = match statement.statement { - GenericStatement::Candidate(_) => continue, - GenericStatement::Valid(hash) => StatementTrace::Valid(statement.sender.clone(), hash), - GenericStatement::Invalid(hash) => StatementTrace::Invalid(statement.sender.clone(), hash), - }; - - self.known_traces.remove(&trace); - traces.push(trace); - } - - (deferred, traces) - } - } - } -} - -// the internal loop of waiting for messages and spawning validation work -// as a result of those messages. this future exits when `exit` is ready. -async fn statement_import_loop( - relay_parent: Hash, - table: Arc, - api: Arc, - gossip_handle: impl GossipOps, - mut to_worker: mpsc::Sender, - mut exit: exit_future::Exit, -) where - Api: ProvideRuntimeApi + Send + Sync + 'static, - Api::Api: ParachainHost, -{ - let topic = crate::legacy::gossip::attestation_topic(relay_parent); - let mut checked_messages = gossip_handle.gossip_messages_for(topic) - .filter_map(|msg| match msg.0 { - GossipMessage::Statement(s) => future::ready(Some(s.signed_statement)), - _ => future::ready(None), - }); - - let mut deferred_statements = DeferredStatements::new(); - - loop { - let statement = match future::select(exit, checked_messages.next()).await { - Either::Left(_) | Either::Right((None, _)) => return, - Either::Right((Some(statement), e)) => { - exit = e; - statement - } - }; - - // defer any statements for which we haven't imported the candidate yet - let c_hash = { - let candidate_data = match statement.statement { - GenericStatement::Candidate(ref c) => Some(c.hash()), - GenericStatement::Valid(ref hash) - | GenericStatement::Invalid(ref hash) - => table.with_candidate(hash, |c| c.map(|_| *hash)), - }; - match candidate_data { - Some(x) => x, - None => { - deferred_statements.push(statement); - continue; - } - } - }; - - // import all statements pending on this candidate - let (mut statements, _traces) = if let GenericStatement::Candidate(_) = statement.statement { - deferred_statements.take_deferred(&c_hash) - } else { - (Vec::new(), Vec::new()) - }; - - // prepend the candidate statement. - debug!(target: "validation", "Importing statements about candidate {:?}", c_hash); - statements.insert(0, statement); - - let producers: Vec<_> = { - let gossip_handle = &gossip_handle; - let fetch_pov = |candidate: &AbridgedCandidateReceipt| fetch_pov_from_gossip( - candidate, - gossip_handle, - ).map(Result::<_, std::io::Error>::Ok); - - table.import_remote_statements( - &fetch_pov, - statements.iter().cloned(), - ) - }; - - // dispatch future work as necessary. - for (producer, statement) in producers.into_iter().zip(statements) { - if let Some(_sender) = table.index_to_id(statement.sender) { - if let Some(producer) = producer { - trace!(target: "validation", "driving statement work to completion"); - - let table = table.clone(); - let gossip_handle = gossip_handle.clone(); - - let work = producer.prime(api.clone()).validate().map(move |res| { - let validated = match res { - Err(e) => { - debug!(target: "p_net", "Failed to act on statement: {}", e); - return - } - Ok(v) => v, - }; - - // propagate the statement. - let statement = crate::legacy::gossip::GossipStatement::new( - relay_parent, - match table.import_validated(validated) { - Some(s) => s, - None => return, - } - ); - - gossip_handle.gossip_message(topic, statement.into()); - }); - - let work = future::select(work.boxed(), exit.clone()).map(drop); - if let Err(_) = to_worker.send( - BackgroundToWorkerMsg::Spawn(work.boxed()) - ).await { - // can fail only if remote has hung up - worker is dead, - // we should die too. this is defensive, since the exit future - // would fire shortly anyway. - return - } - } - } - } - } -} - -fn fetch_pov_from_gossip( - candidate: &AbridgedCandidateReceipt, - gossip_handle: &impl GossipOps, -) -> impl Future + Send { - let candidate_hash = candidate.hash(); - let topic = crate::legacy::gossip::pov_block_topic(candidate.relay_parent); - - // The gossip system checks that the correct pov-block data is present - // before placing in the pool, so we can safely check by candidate hash. - gossip_handle.gossip_messages_for(topic) - .filter_map(move |(msg, _)| { - future::ready(match msg { - GossipMessage::PoVBlock(pov_block_message) => - if pov_block_message.candidate_hash == candidate_hash { - Some(pov_block_message.pov_block) - } else { - None - }, - _ => None, - }) - }) - .into_future() - .map(|(item, _)| item.expect( - "gossip message streams do not conclude early; qed" - )) -} - -// distribute a "local collation": this is the collation gotten by a validator -// from a collator. it needs to be distributed to other validators in the same -// group. -fn distribute_validated_collation( - instance: &ConsensusNetworkingInstance, - receipt: AbridgedCandidateReceipt, - pov_block: PoVBlock, - chunks: (ValidatorIndex, Vec), - gossip_handle: &impl GossipOps, -) { - // produce a signed statement. - let hash = receipt.hash(); - let validated = Validated::collated_local( - receipt, - pov_block.clone(), - ); - - // gossip the signed statement. - { - let statement = crate::legacy::gossip::GossipStatement::new( - instance.relay_parent, - match instance.statement_table.import_validated(validated) { - None => return, - Some(s) => s, - } - ); - - gossip_handle.gossip_message(instance.attestation_topic, statement.into()); - } - - // gossip the PoV block. - { - let pov_block_message = crate::legacy::gossip::GossipPoVBlock { - relay_chain_leaf: instance.relay_parent, - candidate_hash: hash, - pov_block, - }; - - gossip_handle.gossip_message( - crate::legacy::gossip::pov_block_topic(instance.relay_parent), - pov_block_message.into(), - ); - } - - // gossip erasure chunks. - for chunk in chunks.1 { - let message = crate::legacy::gossip::ErasureChunkMessage { - chunk, - candidate_hash: hash, - }; - - gossip_handle.gossip_message( - crate::erasure_coding_topic(&hash), - message.into(), - ); - } -} - -/// Routing logic for a particular attestation session. -#[derive(Clone)] -pub struct Router { - inner: Arc, -} - -// note: do _not_ make this `Clone`: the drop implementation needs to _uniquely_ -// send the `DropConsensusNetworking` message. -struct RouterInner { - relay_parent: Hash, - sender: mpsc::Sender, -} - -impl Service { - /// Register an availablility-store that the network can query. - pub fn register_availability_store(&self, store: av_store::Store) { - let _ = self.sender.clone() - .try_send(ServiceToWorkerMsg::RegisterAvailabilityStore(store)); - } - - /// Submit a collation that we (as a collator) have prepared to validators. - /// - /// Provide a set of validator-IDs we should distribute to. - pub fn distribute_collation(&self, targets: HashSet, collation: Collation) { - let _ = self.sender.clone() - .try_send(ServiceToWorkerMsg::OurCollation(targets, collation)); - } - - /// Returns a stream that listens for checked statements on a particular - /// relay chain parent hash. - /// - /// Take care to drop the stream, as the sending side will not be cleaned - /// up until it is. - pub fn checked_statements(&self, relay_parent: Hash) - -> impl Stream + Send { - let (tx, rx) = oneshot::channel(); - let mut sender = self.sender.clone(); - - let receive_stream = async move { - sender.send( - ServiceToWorkerMsg::ListenCheckedStatements(relay_parent, tx) - ).map_err(future::Either::Left).await?; - - rx.map_err(future::Either::Right).await - }; - - receive_stream - .map(|res| match res { - Ok(s) => s.left_stream(), - Err(e) => { - log::warn!( - target: "p_net", - "Polkadot network worker appears to be down: {:?}", - e, - ); - stream::pending().right_stream() - } - }) - .flatten_stream() - } -} - -impl ParachainNetwork for Service { - type Error = mpsc::SendError; - type TableRouter = Router; - type BuildTableRouter = Pin> + Send>>; - - fn build_table_router( - &self, - table: Arc, - authorities: &[ValidatorId], - ) -> Self::BuildTableRouter { - let authorities = authorities.to_vec(); - let mut sender = self.sender.clone(); - let relay_parent = table.signing_context().parent_hash.clone(); - - Box::pin(async move { - let (router_sender, receiver) = mpsc::channel(0); - sender.send( - ServiceToWorkerMsg::BuildConsensusNetworking(receiver, table, authorities) - ).await?; - - Ok(Router { - inner: Arc::new(RouterInner { - relay_parent, - sender: router_sender, - }) - }) - }) - } -} - -impl Collators for Service { - type Error = future::Either; - type Collation = Pin> + Send>>; - - fn collate(&self, parachain: ParaId, relay_parent: Hash) -> Self::Collation { - let (tx, rx) = oneshot::channel(); - let mut sender = self.sender.clone(); - - Box::pin(async move { - sender.send( - ServiceToWorkerMsg::AwaitCollation(relay_parent, parachain, tx) - ).map_err(future::Either::Left).await?; - - rx.map_err(future::Either::Right).await - }) - } - - fn note_bad_collator(&self, collator: CollatorId) { - let _ = self.sender.clone().try_send(ServiceToWorkerMsg::NoteBadCollator(collator)); - } -} - -impl av_store::ErasureNetworking for Service { - type Error = future::Either; - - fn fetch_erasure_chunk(&self, candidate_hash: &Hash, index: u32) - -> Pin> + Send>> - { - let (tx, rx) = oneshot::channel(); - let mut sender = self.sender.clone(); - - let candidate_hash = *candidate_hash; - Box::pin(async move { - sender.send( - ServiceToWorkerMsg::FetchErasureChunk(candidate_hash, index, tx) - ).map_err(future::Either::Left).await?; - - rx.map_err(future::Either::Right).await - }) - } - - fn distribute_erasure_chunk( - &self, - candidate_hash: Hash, - chunk: ErasureChunk, - ) { - let _ = self.sender.clone().try_send( - ServiceToWorkerMsg::DistributeErasureChunk(candidate_hash, chunk) - ); - } -} - -/// Errors when interacting with the statement router. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum RouterError { - #[display(fmt = "Encountered unexpected I/O error: {}", _0)] - Io(std::io::Error), - #[display(fmt = "Worker hung up while answering request.")] - Canceled(oneshot::Canceled), - #[display(fmt = "Could not reach worker with request: {}", _0)] - SendError(mpsc::SendError), - #[display(fmt = "Provided candidate receipt does not have expected relay parent {}", _0)] - IncorrectRelayParent(Hash), -} - -impl TableRouter for Router { - type Error = RouterError; - type SendLocalCollation = Pin> + Send>>; - type FetchValidationProof = Pin> + Send>>; - - fn local_collation( - &self, - receipt: AbridgedCandidateReceipt, - pov_block: PoVBlock, - chunks: (ValidatorIndex, &[ErasureChunk]), - ) -> Self::SendLocalCollation { - if receipt.relay_parent != self.inner.relay_parent { - return Box::pin( - future::ready(Err(RouterError::IncorrectRelayParent(self.inner.relay_parent))) - ); - } - - let message = ServiceToWorkerMsg::SubmitValidatedCollation( - receipt, - pov_block, - (chunks.0, chunks.1.to_vec()), - ); - let mut sender = self.inner.sender.clone(); - Box::pin(async move { - sender.send(message).map_err(Into::into).await - }) - } - - fn fetch_pov_block(&self, candidate: &AbridgedCandidateReceipt) -> Self::FetchValidationProof { - if candidate.relay_parent != self.inner.relay_parent { - return Box::pin( - future::ready(Err(RouterError::IncorrectRelayParent(self.inner.relay_parent))) - ); - } - - let (tx, rx) = oneshot::channel(); - let message = ServiceToWorkerMsg::FetchPoVBlock( - candidate.clone(), - tx, - ); - - let mut sender = self.inner.sender.clone(); - Box::pin(async move { - sender.send(message).await?; - rx.map_err(Into::into).await - }) - } -} diff --git a/network/src/protocol/tests.rs b/network/src/protocol/tests.rs deleted file mode 100644 index a1cad24be34dce79bb54808f43a378a2a6d109ce..0000000000000000000000000000000000000000 --- a/network/src/protocol/tests.rs +++ /dev/null @@ -1,636 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Tests for the protocol. - -use super::*; -use crate::legacy::gossip::GossipPoVBlock; -use parking_lot::Mutex; - -use polkadot_primitives::Block; -use polkadot_primitives::parachain::{ - Id as ParaId, Chain, DutyRoster, ParachainHost, ValidatorId, - Retriable, CollatorId, AbridgedCandidateReceipt, - GlobalValidationSchedule, LocalValidationData, ErasureChunk, SigningContext, - PoVBlock, BlockData, ValidationCode, -}; -use polkadot_validation::{SharedTable, TableRouter}; - -use av_store::{Store as AvailabilityStore, ErasureNetworking}; -use sc_network_gossip::TopicNotification; -use sp_api::{ApiRef, ProvideRuntimeApi}; -use sp_runtime::traits::Block as BlockT; -use sp_core::crypto::Pair; -use sp_keyring::Sr25519Keyring; - -use futures::executor::LocalPool; -use futures::task::LocalSpawnExt; - -#[derive(Default)] -pub struct MockNetworkOps { - recorded: Mutex, -} - -#[derive(Default)] -struct Recorded { - peer_reputations: HashMap, - notifications: Vec<(PeerId, Message)>, -} - -// Test setup registers receivers of gossip messages as well as signals that -// fire when they are taken. -type GossipStreamEntry = (mpsc::UnboundedReceiver, oneshot::Sender<()>); - -#[derive(Default, Clone)] -struct MockGossip { - inner: Arc>>, - gossip_messages: Arc>>, -} - -impl MockGossip { - fn add_gossip_stream(&self, topic: Hash) - -> (mpsc::UnboundedSender, oneshot::Receiver<()>) - { - let (tx, rx) = mpsc::unbounded(); - let (o_tx, o_rx) = oneshot::channel(); - self.inner.lock().insert(topic, (rx, o_tx)); - (tx, o_rx) - } - - fn contains_listener(&self, topic: &Hash) -> bool { - self.inner.lock().contains_key(topic) - } -} - -impl NetworkServiceOps for MockNetworkOps { - fn report_peer(&self, peer: PeerId, value: sc_network::ReputationChange) { - let mut recorded = self.recorded.lock(); - let total_rep = recorded.peer_reputations.entry(peer).or_insert(0); - - *total_rep = total_rep.saturating_add(value.value); - } - - fn write_notification( - &self, - peer: PeerId, - engine_id: ConsensusEngineId, - notification: Vec, - ) { - assert_eq!(engine_id, POLKADOT_ENGINE_ID); - let message = Message::decode(&mut ¬ification[..]).expect("invalid notification"); - self.recorded.lock().notifications.push((peer, message)); - } -} - -impl crate::legacy::GossipService for MockGossip { - fn gossip_messages_for(&self, topic: Hash) -> crate::legacy::GossipMessageStream { - crate::legacy::GossipMessageStream::new(match self.inner.lock().remove(&topic) { - None => Box::pin(stream::empty()), - Some((rx, o_rx)) => { - let _ = o_rx.send(()); - Box::pin(rx) - } - }) - } - - fn gossip_message(&self, topic: Hash, message: GossipMessage) { - self.gossip_messages.lock().insert(topic, message); - } - - fn send_message(&self, _who: PeerId, _message: GossipMessage) { - - } -} - -impl GossipOps for MockGossip { - fn new_local_leaf(&self, _: crate::legacy::gossip::MessageValidationData) -> crate::legacy::gossip::NewLeafActions { - crate::legacy::gossip::NewLeafActions::new() - } - - fn register_availability_store(&self, _store: av_store::Store) {} -} - -#[derive(Default)] -struct ApiData { - validators: Vec, - duties: Vec, - active_parachains: Vec<(ParaId, Option<(CollatorId, Retriable)>)>, -} - -#[derive(Default, Clone)] -struct TestApi { - data: Arc>, -} - -#[derive(Default)] -struct RuntimeApi { - data: Arc>, -} - -impl ProvideRuntimeApi for TestApi { - type Api = RuntimeApi; - - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { data: self.data.clone() }.into() - } -} - -sp_api::mock_impl_runtime_apis! { - impl ParachainHost for RuntimeApi { - type Error = sp_blockchain::Error; - - fn validators(&self) -> Vec { - self.data.lock().validators.clone() - } - - fn duty_roster(&self) -> DutyRoster { - DutyRoster { - validator_duty: self.data.lock().duties.clone(), - } - } - - fn active_parachains(&self) -> Vec<(ParaId, Option<(CollatorId, Retriable)>)> { - self.data.lock().active_parachains.clone() - } - - fn parachain_code(_: ParaId) -> Option { - Some(ValidationCode(Vec::new())) - } - - fn global_validation_schedule() -> GlobalValidationSchedule { - Default::default() - } - - fn local_validation_data(_: ParaId) -> Option { - Some(Default::default()) - } - - fn get_heads(_: Vec<::Extrinsic>) -> Option> { - Some(Vec::new()) - } - - fn signing_context() -> SigningContext { - SigningContext { - session_index: Default::default(), - parent_hash: Default::default(), - } - } - } -} - -impl super::Service { - async fn connect_peer(&mut self, peer: PeerId, role: ObservedRole) { - self.sender.send(ServiceToWorkerMsg::PeerConnected(peer, role)).await.unwrap(); - } - - async fn peer_message(&mut self, peer: PeerId, message: Message) { - let bytes = message.encode().into(); - - self.sender.send(ServiceToWorkerMsg::PeerMessage(peer, vec![bytes])).await.unwrap(); - } - - async fn disconnect_peer(&mut self, peer: PeerId) { - self.sender.send(ServiceToWorkerMsg::PeerDisconnected(peer)).await.unwrap(); - } - - async fn synchronize( - &mut self, - callback: impl FnOnce(&mut ProtocolHandler) -> T + Send + 'static, - ) -> T { - let (tx, rx) = oneshot::channel(); - - let msg = ServiceToWorkerMsg::Synchronize(Box::new(move |proto| { - let res = callback(proto); - if let Err(_) = tx.send(res) { - log::warn!(target: "p_net", "Failed to send synchronization result"); - } - })); - - self.sender.send(msg).await.expect("Worker thread unexpectedly hung up"); - rx.await.expect("Worker thread failed to send back result") - } -} - -fn test_setup(config: Config) -> ( - Service, - MockGossip, - LocalPool, - impl Future + 'static, -) { - let pool = LocalPool::new(); - - let network_ops = Arc::new(MockNetworkOps::default()); - let mock_gossip = MockGossip::default(); - let (worker_tx, worker_rx) = mpsc::channel(0); - let api = Arc::new(TestApi::default()); - - let worker_task = worker_loop( - config, - network_ops.clone(), - mock_gossip.clone(), - api.clone(), - worker_rx, - pool.spawner(), - ); - - let service = Service { - sender: worker_tx, - network_service: network_ops, - }; - - (service, mock_gossip, pool, worker_task) -} - -#[test] -fn worker_task_shuts_down_when_sender_dropped() { - let (service, _gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - - drop(service); - let _ = pool.run_until(worker_task); -} - -/// Given the async nature of `select!` that is being used in the main loop of the worker -/// and that consensus instances use their own channels, we don't know when the synchronize message -/// is handled. This helper functions checks multiple times that the given instance is dropped. Even -/// if the first round fails, the second one should be successful as the consensus instance drop -/// should be already handled this time. -fn wait_for_instance_drop(service: &mut Service, pool: &mut LocalPool, instance: Hash) { - let mut try_counter = 0; - let max_tries = 3; - - while try_counter < max_tries { - let dropped = pool.run_until(service.synchronize(move |proto| { - !proto.consensus_instances.contains_key(&instance) - })); - - if dropped { - return; - } - - try_counter += 1; - } - - panic!("Consensus instance `{}` wasn't dropped!", instance); -} - -#[test] -fn consensus_instances_cleaned_up() { - let (mut service, _gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - let relay_parent = [0; 32].into(); - - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: relay_parent, - }; - let table = Arc::new(SharedTable::new( - Vec::new(), - HashMap::new(), - None, - signing_context, - AvailabilityStore::new_in_memory(service.clone()), - None, - None, - )); - - pool.spawner().spawn_local(worker_task).unwrap(); - - let router = pool.run_until( - service.build_table_router(table, &[]) - ).unwrap(); - - drop(router); - - wait_for_instance_drop(&mut service, &mut pool, relay_parent); -} - -#[test] -fn collation_is_received_with_dropped_router() { - let (mut service, gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - let relay_parent = [0; 32].into(); - let topic = crate::legacy::gossip::attestation_topic(relay_parent); - - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: relay_parent, - }; - let table = Arc::new(SharedTable::new( - vec![Sr25519Keyring::Alice.public().into()], - HashMap::new(), - Some(Arc::new(Sr25519Keyring::Alice.pair().into())), - signing_context, - AvailabilityStore::new_in_memory(service.clone()), - None, - None, - )); - - pool.spawner().spawn_local(worker_task).unwrap(); - - let router = pool.run_until( - service.build_table_router(table, &[]) - ).unwrap(); - - let receipt = AbridgedCandidateReceipt { relay_parent, ..Default::default() }; - let local_collation_future = router.local_collation( - receipt, - PoVBlock { block_data: BlockData(Vec::new()) }, - (0, &[]), - ); - - // Drop the router and make sure that the consensus instance is still alive - drop(router); - - assert!(pool.run_until(service.synchronize(move |proto| { - proto.consensus_instances.contains_key(&relay_parent) - }))); - - // The gossip message should still be unknown - assert!(!gossip.gossip_messages.lock().contains_key(&topic)); - - pool.run_until(local_collation_future).unwrap(); - - // Make sure the instance is now dropped and the message was gossiped - wait_for_instance_drop(&mut service, &mut pool, relay_parent); - assert!(pool.run_until(service.synchronize(move |_| { - gossip.gossip_messages.lock().contains_key(&topic) - }))); -} - -#[test] -fn validator_peer_cleaned_up() { - let (mut service, _gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - - let peer = PeerId::random(); - let validator_key = Sr25519Keyring::Alice.pair(); - let validator_id = ValidatorId::from(validator_key.public()); - - pool.spawner().spawn_local(worker_task).unwrap(); - pool.run_until(async move { - service.connect_peer(peer.clone(), ObservedRole::Authority).await; - service.peer_message(peer.clone(), Message::Status(Status { - version: VERSION, - collating_for: None, - })).await; - service.peer_message(peer.clone(), Message::ValidatorId(validator_id.clone())).await; - - let p = peer.clone(); - let v = validator_id.clone(); - let (peer_has_key, reverse_lookup) = service.synchronize(move |proto| { - let peer_has_key = proto.peers.get(&p).map_or( - false, - |p_data| p_data.session_keys.as_slice().contains(&v), - ); - - let reverse_lookup = proto.connected_validators.get(&v).map_or( - false, - |reps| reps.contains(&p), - ); - - (peer_has_key, reverse_lookup) - }).await; - - assert!(peer_has_key); - assert!(reverse_lookup); - - service.disconnect_peer(peer.clone()).await; - - let p = peer.clone(); - let v = validator_id.clone(); - let (peer_removed, rev_removed) = service.synchronize(move |proto| { - let peer_removed = !proto.peers.contains_key(&p); - let reverse_mapping_removed = !proto.connected_validators.contains_key(&v); - - (peer_removed, reverse_mapping_removed) - }).await; - - assert!(peer_removed); - assert!(rev_removed); - }); -} - -#[test] -fn validator_key_spillover_cleaned() { - let (mut service, _gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - - let peer = PeerId::random(); - let make_validator_id = |ring: Sr25519Keyring| ValidatorId::from(ring.public()); - - // We will push 1 extra beyond what is normally kept. - assert_eq!(RECENT_SESSIONS, 3); - let key_a = make_validator_id(Sr25519Keyring::Alice); - let key_b = make_validator_id(Sr25519Keyring::Bob); - let key_c = make_validator_id(Sr25519Keyring::Charlie); - let key_d = make_validator_id(Sr25519Keyring::Dave); - - let keys = vec![key_a, key_b, key_c, key_d]; - - pool.spawner().spawn_local(worker_task).unwrap(); - pool.run_until(async move { - service.connect_peer(peer.clone(), ObservedRole::Authority).await; - service.peer_message(peer.clone(), Message::Status(Status { - version: VERSION, - collating_for: None, - })).await; - - for key in &keys { - service.peer_message(peer.clone(), Message::ValidatorId(key.clone())).await; - } - - let p = peer.clone(); - let active_keys = keys[1..].to_vec(); - let discarded_key = keys[0].clone(); - assert!(service.synchronize(move |proto| { - let active_correct = proto.peers.get(&p).map_or(false, |p_data| { - p_data.session_keys.as_slice() == &active_keys[..] - }); - - let active_lookup = active_keys.iter().all(|k| { - proto.connected_validators.get(&k).map_or(false, |m| m.contains(&p)) - }); - - let discarded = !proto.connected_validators.contains_key(&discarded_key); - - active_correct && active_lookup && discarded - }).await); - }); -} - -#[test] -fn erasure_fetch_drop_also_drops_gossip_sender() { - let (service, gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - let candidate_hash = [1; 32].into(); - - let expected_index = 1; - - let spawner = pool.spawner(); - - spawner.spawn_local(worker_task).unwrap(); - let topic = crate::erasure_coding_topic(&candidate_hash); - let (mut gossip_tx, gossip_taken_rx) = gossip.add_gossip_stream(topic); - - let test_work = async move { - let chunk_listener = service.fetch_erasure_chunk( - &candidate_hash, - expected_index, - ); - - // spawn an abortable handle to the chunk listener future. - // we will wait until this future has proceeded enough to start grabbing - // messages from gossip, and then we will abort the future. - let (chunk_listener, abort_handle) = future::abortable(chunk_listener); - let handle = spawner.spawn_with_handle(chunk_listener).unwrap(); - gossip_taken_rx.await.unwrap(); - - // gossip listener was taken. and is active. - assert!(!gossip.contains_listener(&topic)); - assert!(!gossip_tx.is_closed()); - - abort_handle.abort(); - - // we must `await` this, otherwise context may never transfer over - // to the spawned `Abortable` future. - assert!(handle.await.is_err()); - loop { - // if dropping the sender leads to the gossip listener - // being cleaned up, we will eventually be unable to send a message - // on the sender. - if gossip_tx.is_closed() { break } - - let fake_chunk = GossipMessage::ErasureChunk( - crate::legacy::gossip::ErasureChunkMessage { - chunk: ErasureChunk { - chunk: vec![], - index: expected_index + 1, - proof: vec![], - }, - candidate_hash, - } - ).encode(); - - match gossip_tx.send(TopicNotification { message: fake_chunk, sender: None }).await { - Err(e) => { assert!(e.is_disconnected()); break }, - Ok(_) => continue, - } - } - }; - - pool.run_until(test_work); -} - -#[test] -fn fetches_pov_block_from_gossip() { - let (service, gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - let relay_parent = [255; 32].into(); - - let pov_block = PoVBlock { - block_data: BlockData(vec![1, 2, 3]), - }; - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.relay_parent = relay_parent; - candidate.pov_block_hash = pov_block.hash(); - let candidate_hash = candidate.hash(); - - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: relay_parent, - }; - - let table = Arc::new(SharedTable::new( - Vec::new(), - HashMap::new(), - None, - signing_context, - AvailabilityStore::new_in_memory(service.clone()), - None, - None, - )); - - let spawner = pool.spawner(); - - spawner.spawn_local(worker_task).unwrap(); - let topic = crate::legacy::gossip::pov_block_topic(relay_parent); - let (mut gossip_tx, _gossip_taken_rx) = gossip.add_gossip_stream(topic); - - let test_work = async move { - let router = service.build_table_router(table, &[]).await.unwrap(); - let pov_block_listener = router.fetch_pov_block(&candidate); - - let message = GossipMessage::PoVBlock(GossipPoVBlock { - relay_chain_leaf: relay_parent, - candidate_hash, - pov_block, - }).encode(); - - gossip_tx.send(TopicNotification { message, sender: None }).await.unwrap(); - pov_block_listener.await - }; - - pool.run_until(test_work).unwrap(); -} - -#[test] -fn validator_sends_key_and_role_to_collator_on_status() { - let (service, _gossip, mut pool, worker_task) = test_setup(Config { collating_for: None }); - - let peer = PeerId::random(); - let peer_clone = peer.clone(); - let validator_key = Sr25519Keyring::Alice.pair(); - let validator_id = ValidatorId::from(validator_key.public()); - let validator_id_clone = validator_id.clone(); - let collator_id = CollatorId::from(Sr25519Keyring::Bob.public()); - let para_id = ParaId::from(100); - let mut service_clone = service.clone(); - - pool.spawner().spawn_local(worker_task).unwrap(); - pool.run_until(async move { - service_clone.synchronize(move |proto| { proto.local_keys.insert(validator_id_clone); }).await; - service_clone.connect_peer(peer_clone.clone(), ObservedRole::Authority).await; - service_clone.peer_message(peer_clone.clone(), Message::Status(Status { - version: VERSION, - collating_for: Some((collator_id, para_id)), - })).await; - }); - - let expected_msg = Message::ValidatorId(validator_id.clone()); - let validator_id_pos = service.network_service.recorded.lock().notifications.iter().position(|(p, notification)| { - peer == *p && *notification == expected_msg - }); - - let expected_msg = Message::CollatorRole(CollatorRole::Primary); - let collator_role_pos = service.network_service.recorded.lock().notifications.iter().position(|(p, notification)| { - peer == *p && *notification == expected_msg - }); - - assert!(validator_id_pos < collator_role_pos); -} - -#[test] -fn collator_state_send_key_updates_state_correctly() { - let mut state = CollatorState::Fresh; - state.send_key(Sr25519Keyring::Alice.public().into(), |_| {}); - assert!(matches!(state, CollatorState::Primed(None))); - - let mut state = CollatorState::RolePending(CollatorRole::Primary); - - let mut counter = 0; - state.send_key(Sr25519Keyring::Alice.public().into(), |msg| { - match (counter, msg) { - (0, Message::ValidatorId(_)) => { - counter += 1; - }, - (1, Message::CollatorRole(CollatorRole::Primary)) => {}, - err @ _ => panic!("Unexpected message: {:?}", err), - } - }); - assert!(matches!(state, CollatorState::Primed(Some(CollatorRole::Primary)))); -} diff --git a/network/test/Cargo.toml b/network/test/Cargo.toml deleted file mode 100644 index c8549924ce3a8caec6f3a42ce25494827e955990..0000000000000000000000000000000000000000 --- a/network/test/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "polkadot-network-test" -version = "0.8.12" -license = "GPL-3.0" -authors = ["Parity Technologies "] -edition = "2018" - -[dependencies] -log = "0.4.8" -parking_lot = "0.10.0" -futures = "0.3.1" -rand = "0.7.2" -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", features = ["test-helpers"], branch = "master" } -sc-network-test = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -polkadot-test-runtime-client = { path = "../../runtime/test-runtime/client" } diff --git a/network/test/src/block_import.rs b/network/test/src/block_import.rs deleted file mode 100644 index ce4c9f8ba0b6725e3ca6561c5029945ee2040cab..0000000000000000000000000000000000000000 --- a/network/test/src/block_import.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Testing block import logic. - -use sp_consensus::ImportedAux; -use sp_consensus::import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, -}; -use polkadot_test_runtime_client::{self, prelude::*}; -use polkadot_test_runtime_client::runtime::{Block, Hash}; -use sp_runtime::generic::BlockId; -use super::*; - -fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = polkadot_test_runtime_client::new(); - let mut builder = client.new_block(Default::default()).unwrap(); - - let extrinsics = polkadot_test_runtime_client::needed_extrinsics(vec![]); - - for extrinsic in &extrinsics { - builder.push(extrinsic.clone()).unwrap(); - } - - let block = builder.build().unwrap().block; - client.import(BlockOrigin::File, block).unwrap(); - - let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); - let header = client.header(&BlockId::Number(1)).unwrap(); - let justification = client.justification(&BlockId::Number(1)).unwrap(); - let peer_id = PeerId::random(); - (client, hash, number, peer_id.clone(), IncomingBlock { - hash, - header, - body: Some(extrinsics), - justification, - origin: Some(peer_id.clone()), - allow_missing_state: false, - import_existing: false, - }) -} - -#[test] -fn import_single_good_block_works() { - let (_, _hash, number, peer_id, block) = prepare_good_block(); - - let mut expected_aux = ImportedAux::default(); - expected_aux.is_new_best = true; - - match import_single_block(&mut polkadot_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) - if *num == number as u32 && *aux == expected_aux && *org == Some(peer_id) => {} - r @ _ => panic!("{:?}", r) - } -} - -#[test] -fn import_single_good_known_block_is_ignored() { - let (mut client, _hash, number, _, block) = prepare_good_block(); - match import_single_block(&mut client, BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number as u32 => {} - _ => panic!() - } -} - -#[test] -fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(); - block.header = None; - match import_single_block(&mut polkadot_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} - _ => panic!() - } -} - -#[test] -fn async_import_queue_drops() { - let executor = sp_core::testing::SpawnBlockingExecutor::new(); - // Perform this test multiple times since it exhibits non-deterministic behavior. - for _ in 0..100 { - let verifier = PassThroughVerifier(true); - - let queue = BasicQueue::new( - verifier, - Box::new(polkadot_test_runtime_client::new()), - None, - None, - &executor, - None - ); - drop(queue); - } -} diff --git a/network/test/src/lib.rs b/network/test/src/lib.rs deleted file mode 100644 index c48bfabdfbaa74222b5b89452d3874c787d8145d..0000000000000000000000000000000000000000 --- a/network/test/src/lib.rs +++ /dev/null @@ -1,884 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -#![allow(missing_docs)] - -#[cfg(test)] -mod block_import; - -use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}}; - -use log::trace; -use sc_network::config::{build_multiaddr, FinalityProofProvider, Role}; -use sp_blockchain::{ - Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo, - HeaderBackend, -}; -use sc_client_api::{ - BlockchainEvents, BlockImportNotification, - FinalityNotifications, ImportNotifications, - FinalityNotification, - client::BlockBackend, - backend::{TransactionFor, AuxStore, Backend, Finalizer}, -}; -use sc_consensus::LongestChain; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, -}; -use sp_consensus::block_import::{BlockImport, ImportResult}; -use sp_consensus::Error as ConsensusError; -use sp_consensus::{BlockOrigin, BlockImportParams, BlockCheckParams, JustificationImport}; -use futures::prelude::*; -use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; -use sc_network::config::{ - NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder, TransactionImport, - TransactionImportFuture -}; -use parking_lot::Mutex; -use sp_core::H256; -use sc_network::{PeerId, config::{ProtocolConfig, TransactionPool}}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::Justification; -pub use sc_network_test::PassThroughVerifier; -use sc_service::client::Client; - -pub use polkadot_test_runtime_client::runtime::{Block, Extrinsic, Hash}; -pub use polkadot_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; - -pub type PeersFullClient = Client< - polkadot_test_runtime_client::Backend, - polkadot_test_runtime_client::Executor, - Block, - polkadot_test_runtime_client::runtime::RuntimeApi ->; -pub type PeersLightClient = Client< - polkadot_test_runtime_client::LightBackend, - polkadot_test_runtime_client::LightExecutor, - Block, - polkadot_test_runtime_client::runtime::RuntimeApi ->; - -#[derive(Clone)] -pub enum PeersClient { - Full(Arc, Arc), - Light(Arc, Arc), -} - -impl PeersClient { - pub fn as_full(&self) -> Option> { - match *self { - PeersClient::Full(ref client, ref _backend) => Some(client.clone()), - _ => None, - } - } - - pub fn as_block_import(&self) -> BlockImportAdapter { - match *self { - PeersClient::Full(ref client, ref _backend) => - BlockImportAdapter::new_full(client.clone()), - PeersClient::Light(ref client, ref _backend) => - BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), - } - } - - pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.get_aux(key), - PeersClient::Light(ref client, ref _backend) => client.get_aux(key), - } - } - - pub fn info(&self) -> BlockchainInfo { - match *self { - PeersClient::Full(ref client, ref _backend) => client.chain_info(), - PeersClient::Light(ref client, ref _backend) => client.chain_info(), - } - } - - pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.header(block), - PeersClient::Light(ref client, ref _backend) => client.header(block), - } - } - - pub fn justification(&self, block: &BlockId) -> ClientResult> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.justification(block), - PeersClient::Light(ref client, ref _backend) => client.justification(block), - } - } - - pub fn finality_notification_stream(&self) -> FinalityNotifications { - match *self { - PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), - } - } - - pub fn import_notification_stream(&self) -> ImportNotifications{ - match *self { - PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), - } - } - - pub fn finalize_block( - &self, - id: BlockId, - justification: Option, - notify: bool - ) -> ClientResult<()> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), - } - } -} - -pub struct Peer { - pub data: D, - client: PeersClient, - /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, - /// instead of going through the import queue. - verifier: VerifierAdapter, - /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, - /// instead of going through the import queue. - block_import: BlockImportAdapter<()>, - select_chain: Option>, - backend: Option>, - network: NetworkWorker::Hash>, - imported_blocks_stream: Pin> + Send>>, - finality_notification_stream: Pin> + Send>>, -} - -impl Peer { - /// Get this peer ID. - pub fn id(&self) -> &PeerId { - self.network.service().local_peer_id() - } - - /// Returns true if we're major syncing. - pub fn is_major_syncing(&self) -> bool { - self.network.service().is_major_syncing() - } - - // Returns a clone of the local SelectChain, only available on full nodes - pub fn select_chain(&self) -> Option> { - self.select_chain.clone() - } - - /// Returns the number of peers we're connected to. - pub fn num_peers(&self) -> usize { - self.network.num_connected_peers() - } - - /// Returns true if we have no peer. - pub fn is_offline(&self) -> bool { - self.num_peers() == 0 - } - - /// Request a justification for the given block. - pub fn request_justification(&self, hash: &::Hash, number: NumberFor) { - self.network.service().request_justification(hash, number); - } - - /// Announces an important block on the network. - pub fn announce_block(&self, hash: ::Hash, data: Vec) { - self.network.service().announce_block(hash, data); - } - - /// Request explicit fork sync. - pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { - self.network.service().set_sync_fork_request(peers, hash, number); - } - - /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block - { - let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) - } - - /// Add blocks to the peer -- edit the block before adding. The chain will - /// start at the given block iD. - fn generate_blocks_at( - &mut self, - at: BlockId, - count: usize, - origin: BlockOrigin, - mut edit_block: F, - headers_only: bool, - ) -> H256 where F: FnMut(BlockBuilder) -> Block { - let full_client = self.client.as_full() - .expect("blocks could only be generated by full clients"); - let mut at = full_client.header(&at).unwrap().unwrap().hash(); - for _ in 0..count { - let builder = full_client.new_block_at( - &BlockId::Hash(at), - Default::default(), - false, - ).unwrap(); - let block = edit_block(builder); - let hash = block.header.hash(); - trace!( - target: "test_network", - "Generating {}, (#{}, parent={})", - hash, - block.header.number, - block.header.parent_hash, - ); - let header = block.header.clone(); - let (import_block, cache) = self.verifier.verify( - origin, - header.clone(), - None, - if headers_only { None } else { Some(block.extrinsics) }, - ).unwrap(); - let cache = if let Some(cache) = cache { - cache.into_iter().collect() - } else { - Default::default() - }; - self.block_import.import_block(import_block, cache).expect("block_import failed"); - at = hash; - } - - self.network.update_chain(); - self.network.service().announce_block(at.clone(), Vec::new()); - at - } - - /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { - let best_hash = self.client.info().best_hash; - self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) - } - - /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_headers(&mut self, count: usize) -> H256 { - let best_hash = self.client.info().best_hash; - self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) - } - - /// Push blocks to the peer (simplified: with or without a TX) starting from - /// given hash. - pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { - self.generate_tx_blocks_at(at, count, with_tx, false) - } - - /// Push blocks/headers to the peer (simplified: with or without a TX) starting from - /// given hash. - fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { - if with_tx { - self.generate_blocks_at( - at, - count, - BlockOrigin::File, - |builder| builder.build().unwrap().block, - headers_only - ) - } else { - self.generate_blocks_at( - at, - count, - BlockOrigin::File, - |builder| builder.build().unwrap().block, - headers_only, - ) - } - } - - /// Get a reference to the client. - pub fn client(&self) -> &PeersClient { - &self.client - } - - /// Get a reference to the network service. - pub fn network_service(&self) -> &Arc::Hash>> { - &self.network.service() - } - - /// Test helper to compare the blockchain state of multiple (networked) - /// clients. - /// Potentially costly, as it creates in-memory copies of both blockchains in order - /// to compare them. If you have easier/softer checks that are sufficient, e.g. - /// by using .info(), you should probably use it instead of this. - pub fn blockchain_canon_equals(&self, other: &Self) -> bool { - if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) { - mine.blockchain().info().best_hash == others.blockchain().info().best_hash - } else { - false - } - } - - /// Count the total number of imported blocks. - pub fn blocks_count(&self) -> u64 { - self.backend.as_ref().map( - |backend| backend.blockchain().info().best_number as u64 - ).unwrap_or(0) - } - - /// Return a collection of block hashes that failed verification - pub fn failed_verifications(&self) -> HashMap<::Hash, String> { - self.verifier.failed_verifications.lock().clone() - } -} - -pub struct EmptyTransactionPool; - -impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(Hash, Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &Extrinsic) -> Hash { - Hash::default() - } - - fn import(&self, _transaction: Extrinsic) -> TransactionImportFuture { - Box::pin(futures::future::ready(TransactionImport::None)) - } - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &Hash) -> Option { None } -} - -/// Implements `BlockImport` for any `Transaction`. Internally the transaction is -/// "converted", aka the field is set to `None`. -/// -/// This is required as the `TestNetFactory` trait does not distinguish between -/// full and light nodes. -pub enum BlockImportAdapter { - Full( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), - Light( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), -} - -impl BlockImportAdapter { - /// Create a new instance of `Self::Full`. - pub fn new_full( - full: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Full(Arc::new(Mutex::new(full)), PhantomData) - } - - /// Create a new instance of `Self::Light`. - pub fn new_light( - light: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Light(Arc::new(Mutex::new(light)), PhantomData) - } -} - -impl Clone for BlockImportAdapter { - fn clone(&self) -> Self { - match self { - Self::Full(full, _) => Self::Full(full.clone(), PhantomData), - Self::Light(light, _) => Self::Light(light.clone(), PhantomData), - } - } -} - -impl BlockImport for BlockImportAdapter { - type Error = ConsensusError; - type Transaction = Transaction; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - match self { - Self::Full(full, _) => full.lock().check_block(block), - Self::Light(light, _) => light.lock().check_block(block), - } - } - - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result { - match self { - Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), - Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), - } - } -} - -/// Implements `Verifier` on an `Arc>`. Used internally. -#[derive(Clone)] -struct VerifierAdapter { - verifier: Arc>>>, - failed_verifications: Arc>>, -} - -impl Verifier for VerifierAdapter { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> - ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - self.verifier.lock().verify(origin, header, justification, body).map_err(|e| { - self.failed_verifications.lock().insert(hash, e.clone()); - e - }) - } -} - -impl VerifierAdapter { - fn new(verifier: Arc>>>) -> VerifierAdapter { - VerifierAdapter { - verifier, - failed_verifications: Default::default(), - } - } -} - -pub trait TestNetFactory: Sized { - type Verifier: 'static + Verifier; - type PeerData: Default; - - /// These two need to be implemented! - fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &Self::PeerData, - ) -> Self::Verifier; - - /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( - &mut self, - closure: F, - ); - - /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - (client.as_block_import(), None, None, None, Default::default()) - } - - /// Get finality proof provider (if supported). - fn make_finality_proof_provider( - &self, - _client: PeersClient, - ) -> Option>> { - None - } - - fn default_config() -> ProtocolConfig { - ProtocolConfig::default() - } - - /// Create new test network with this many peers. - fn new(n: usize) -> Self { - trace!(target: "test_network", "Creating test network"); - let mut net = Self::from_config(&Default::default()); - - for i in 0..n { - trace!(target: "test_network", "Adding peer {}", i); - net.add_full_peer(); - } - net - } - - fn add_full_peer(&mut self,) { - self.add_full_peer_with_states(None) - } - - /// Add a full peer. - fn add_full_peer_with_states(&mut self, keep_blocks: Option) { - let test_client_builder = match keep_blocks { - Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), - None => TestClientBuilder::with_default_backend(), - }; - let backend = test_client_builder.backend(); - let (c, longest_chain) = test_client_builder.build_with_longest_chain(); - let client = Arc::new(c); - - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Full(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - finality_proof_import, - &sp_core::testing::SpawnBlockingExecutor::new(), - None, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - network_config.listen_addresses = vec![listen_addr.clone()]; - network_config.transport = TransportConfig::MemoryOnly; - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Full, - executor: None, - network_config, - chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Full(client.clone(), backend.clone()), - ), - finality_proof_request_builder, - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - metrics_registry: None, - }).unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - client: PeersClient::Full(client, backend.clone()), - select_chain: Some(longest_chain), - backend: Some(backend), - imported_blocks_stream, - finality_notification_stream, - block_import, - verifier, - network, - }); - }); - } - - /// Add a light peer. - fn add_light_peer(&mut self) { - let (c, backend) = polkadot_test_runtime_client::new_light(); - let client = Arc::new(c); - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Light(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - finality_proof_import, - &sp_core::testing::SpawnBlockingExecutor::new(), - None, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - network_config.listen_addresses = vec![listen_addr.clone()]; - network_config.transport = TransportConfig::MemoryOnly; - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Full, - executor: None, - network_config, - chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Light(client.clone(), backend.clone()) - ), - finality_proof_request_builder, - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - metrics_registry: None, - }).unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - verifier, - select_chain: None, - backend: None, - block_import, - client: PeersClient::Light(client, backend), - imported_blocks_stream, - finality_notification_stream, - network, - }); - }); - } - - /// Polls the testnet until all nodes are in sync. - /// - /// Must be executed in a task context. - fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - // Return `NotReady` if there's a mismatch in the highest block number. - let mut highest = None; - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending - } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending - } - match (highest, peer.client.info().best_hash) { - (None, b) => highest = Some(b), - (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Poll::Pending - } - } - Poll::Ready(()) - } - - /// Polls the testnet until theres' no activiy of any kind. - /// - /// Must be executed in a task context. - fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending - } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending - } - } - Poll::Ready(()) - } - - /// Blocks the current thread until we are sync'ed. - /// - /// Calls `poll_until_sync` repeatedly. - fn block_until_sync(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); - } - - /// Blocks the current thread until there are no pending packets. - /// - /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. - fn block_until_idle(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); - } - - /// Polls the testnet. Processes all the pending actions and returns `NotReady`. - fn poll(&mut self, cx: &mut FutureContext) { - self.mut_peers(|peers| { - for peer in peers { - trace!(target: "sync", "-- Polling {}", peer.id()); - if let Poll::Ready(res) = Pin::new(&mut peer.network).poll(cx) { - res.unwrap(); - } - trace!(target: "sync", "-- Polling complete {}", peer.id()); - - // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { - peer.network.service().announce_block(notification.hash, Vec::new()); - } - - // We poll `finality_notification_stream`, but we only take the last event. - let mut last = None; - while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { - last = Some(item); - } - if let Some(notification) = last { - peer.network.on_block_finalized(notification.hash, notification.header); - } - } - }); - } -} - -pub struct TestNet { - peers: Vec>, -} - -impl TestNetFactory for TestNet { - type Verifier = PassThroughVerifier; - type PeerData = (); - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { - peers: Vec::new(), - } - } - - fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { - PassThroughVerifier(false) - } - - fn peer(&mut self, i: usize) -> &mut Peer<()> { - &mut self.peers[i] - } - - fn peers(&self) -> &Vec> { - &self.peers - } - - fn mut_peers>)>(&mut self, closure: F) { - closure(&mut self.peers); - } -} - -pub struct ForceFinalized(PeersClient); - -impl JustificationImport for ForceFinalized { - type Error = ConsensusError; - - fn import_justification( - &mut self, - hash: H256, - _number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error> { - self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) - .map_err(|_| ConsensusError::InvalidJustification.into()) - } -} - -pub struct JustificationTestNet(TestNet); - -impl TestNetFactory for JustificationTestNet { - type Verifier = PassThroughVerifier; - type PeerData = (); - - fn from_config(config: &ProtocolConfig) -> Self { - JustificationTestNet(TestNet::from_config(config)) - } - - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { - self.0.make_verifier(client, config, peer_data) - } - - fn peer(&mut self, i: usize) -> &mut Peer { - self.0.peer(i) - } - - fn peers(&self) -> &Vec> { - self.0.peers() - } - - fn mut_peers>, - )>(&mut self, closure: F) { - self.0.mut_peers(closure) - } - - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - ( - client.as_block_import(), - Some(Box::new(ForceFinalized(client))), - None, - None, - Default::default(), - ) - } -} diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f7d5e7f162efc0c8230cd8d6714035a97b2fc33c --- /dev/null +++ b/node/collation-generation/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "polkadot-node-collation-generation" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +derive_more = "0.99.9" +futures = "0.3.5" +log = "0.4.8" +polkadot-erasure-coding = { path = "../../erasure-coding" } +polkadot-node-primitives = { path = "../primitives" } +polkadot-node-subsystem = { path = "../subsystem" } +polkadot-node-subsystem-util = { path = "../subsystem-util" } +polkadot-primitives = { path = "../../primitives" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2f6f9bc2ceb74ce21de9648be0ebf2768f63992 --- /dev/null +++ b/node/collation-generation/src/lib.rs @@ -0,0 +1,672 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The collation generation subsystem is the interface between polkadot and the collators. + +#![deny(missing_docs)] + +use futures::{ + channel::{mpsc, oneshot}, + future::FutureExt, + join, + select, + sink::SinkExt, + stream::StreamExt, +}; +use polkadot_node_primitives::CollationGenerationConfig; +use polkadot_node_subsystem::{ + errors::RuntimeApiError, + messages::{AllMessages, CollationGenerationMessage, CollatorProtocolMessage}, + FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemError, SubsystemResult, + metrics::{self, prometheus}, +}; +use polkadot_node_subsystem_util::{ + self as util, request_availability_cores_ctx, request_full_validation_data_ctx, + request_validators_ctx, +}; +use polkadot_primitives::v1::{ + collator_signature_payload, AvailableData, CandidateCommitments, + CandidateDescriptor, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption, + PersistedValidationData, PoV, +}; +use sp_core::crypto::Pair; +use std::sync::Arc; + +/// Collation Generation Subsystem +pub struct CollationGenerationSubsystem { + config: Option>, + metrics: Metrics, +} + +impl CollationGenerationSubsystem { + /// Create a new instance of the `CollationGenerationSubsystem`. + pub fn new(metrics: Metrics) -> Self { + Self { + config: None, + metrics, + } + } + + /// Run this subsystem + /// + /// Conceptually, this is very simple: it just loops forever. + /// + /// - On incoming overseer messages, it starts or stops jobs as appropriate. + /// - On other incoming messages, if they can be converted into Job::ToJob and + /// include a hash, then they're forwarded to the appropriate individual job. + /// - On outgoing messages from the jobs, it forwards them to the overseer. + /// + /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. + /// Otherwise, most are logged and then discarded. + async fn run(mut self, mut ctx: Context) + where + Context: SubsystemContext, + { + // when we activate new leaves, we spawn a bunch of sub-tasks, each of which is + // expected to generate precisely one message. We don't want to block the main loop + // at any point waiting for them all, so instead, we create a channel on which they can + // send those messages. We can then just monitor the channel and forward messages on it + // to the overseer here, via the context. + let (sender, mut receiver) = mpsc::channel(0); + + loop { + select! { + incoming = ctx.recv().fuse() => { + if self.handle_incoming::(incoming, &mut ctx, &sender).await { + break; + } + }, + msg = receiver.next().fuse() => { + if let Some(msg) = msg { + if let Err(err) = ctx.send_message(msg).await { + log::warn!(target: "collation_generation", "failed to forward message to overseer: {:?}", err); + break; + } + } + }, + } + } + } + + // handle an incoming message. return true if we should break afterwards. + // note: this doesn't strictly need to be a separate function; it's more an administrative function + // so that we don't clutter the run loop. It could in principle be inlined directly into there. + // it should hopefully therefore be ok that it's an async function mutably borrowing self. + async fn handle_incoming( + &mut self, + incoming: SubsystemResult>, + ctx: &mut Context, + sender: &mpsc::Sender, + ) -> bool + where + Context: SubsystemContext, + { + use polkadot_node_subsystem::ActiveLeavesUpdate; + use polkadot_node_subsystem::FromOverseer::{Communication, Signal}; + use polkadot_node_subsystem::OverseerSignal::{ActiveLeaves, BlockFinalized, Conclude}; + + match incoming { + Ok(Signal(ActiveLeaves(ActiveLeavesUpdate { activated, .. }))) => { + // follow the procedure from the guide + if let Some(config) = &self.config { + let metrics = self.metrics.clone(); + if let Err(err) = + handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await + { + log::warn!(target: "collation_generation", "failed to handle new activations: {:?}", err); + return true; + }; + } + false + } + Ok(Signal(Conclude)) => true, + Ok(Communication { + msg: CollationGenerationMessage::Initialize(config), + }) => { + if self.config.is_some() { + log::warn!(target: "collation_generation", "double initialization"); + true + } else { + self.config = Some(Arc::new(config)); + false + } + } + Ok(Signal(BlockFinalized(_))) => false, + Err(err) => { + log::error!(target: "collation_generation", "error receiving message from subsystem context: {:?}", err); + true + } + } + } +} + +impl Subsystem for CollationGenerationSubsystem +where + Context: SubsystemContext, +{ + type Metrics = Metrics; + + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = Box::pin(self.run(ctx)); + + SpawnedSubsystem { + name: "collation-generation-subsystem", + future, + } + } +} + +#[derive(Debug, derive_more::From)] +enum Error { + #[from] + Subsystem(SubsystemError), + #[from] + OneshotRecv(oneshot::Canceled), + #[from] + Runtime(RuntimeApiError), + #[from] + Util(util::Error), + #[from] + Erasure(polkadot_erasure_coding::Error), +} + +type Result = std::result::Result; + +async fn handle_new_activations( + config: Arc, + activated: &[Hash], + ctx: &mut Context, + metrics: Metrics, + sender: &mpsc::Sender, +) -> Result<()> { + // follow the procedure from the guide: + // https://w3f.github.io/parachain-implementers-guide/node/collators/collation-generation.html + + for relay_parent in activated.iter().copied() { + // double-future magic happens here: the first layer of requests takes a mutable borrow of the context, and + // returns a receiver. The second layer of requests actually polls those receivers to completion. + let (availability_cores, validators) = join!( + request_availability_cores_ctx(relay_parent, ctx).await?, + request_validators_ctx(relay_parent, ctx).await?, + ); + + let availability_cores = availability_cores??; + let n_validators = validators??.len(); + + for core in availability_cores { + let (scheduled_core, assumption) = match core { + CoreState::Scheduled(scheduled_core) => { + (scheduled_core, OccupiedCoreAssumption::Free) + } + CoreState::Occupied(_occupied_core) => { + // TODO: https://github.com/paritytech/polkadot/issues/1573 + continue; + } + _ => continue, + }; + + if scheduled_core.para_id != config.para_id { + continue; + } + + // we get validation data synchronously for each core instead of + // within the subtask loop, because we have only a single mutable handle to the + // context, so the work can't really be distributed + let validation_data = match request_full_validation_data_ctx( + relay_parent, + scheduled_core.para_id, + assumption, + ctx, + ) + .await? + .await?? + { + Some(v) => v, + None => continue, + }; + + let task_config = config.clone(); + let mut task_sender = sender.clone(); + let metrics = metrics.clone(); + ctx.spawn("collation generation collation builder", Box::pin(async move { + let persisted_validation_data_hash = validation_data.persisted.hash(); + + let collation = (task_config.collator)(&validation_data).await; + + let pov_hash = collation.proof_of_validity.hash(); + + let signature_payload = collator_signature_payload( + &relay_parent, + &scheduled_core.para_id, + &persisted_validation_data_hash, + &pov_hash, + ); + + let erasure_root = match erasure_root( + n_validators, + validation_data.persisted, + collation.proof_of_validity.clone(), + ) { + Ok(erasure_root) => erasure_root, + Err(err) => { + log::error!(target: "collation_generation", "failed to calculate erasure root for para_id {}: {:?}", scheduled_core.para_id, err); + return + } + }; + + let commitments = CandidateCommitments { + fees: collation.fees, + upward_messages: collation.upward_messages, + new_validation_code: collation.new_validation_code, + head_data: collation.head_data, + erasure_root, + }; + + let ccr = CandidateReceipt { + commitments_hash: commitments.hash(), + descriptor: CandidateDescriptor { + signature: task_config.key.sign(&signature_payload), + para_id: scheduled_core.para_id, + relay_parent, + collator: task_config.key.public(), + persisted_validation_data_hash, + pov_hash, + }, + }; + + metrics.on_collation_generated(); + + if let Err(err) = task_sender.send(AllMessages::CollatorProtocol( + CollatorProtocolMessage::DistributeCollation(ccr, collation.proof_of_validity) + )).await { + log::warn!(target: "collation_generation", "failed to send collation result for para_id {}: {:?}", scheduled_core.para_id, err); + } + })).await?; + } + } + + Ok(()) +} + +fn erasure_root( + n_validators: usize, + persisted_validation: PersistedValidationData, + pov: PoV, +) -> Result { + let available_data = AvailableData { + validation_data: persisted_validation, + pov, + }; + + let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; + Ok(polkadot_erasure_coding::branches(&chunks).root()) +} + +#[derive(Clone)] +struct MetricsInner { + collations_generated_total: prometheus::Counter, +} + +/// CollationGenerationSubsystem metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_collation_generated(&self) { + if let Some(metrics) = &self.0 { + metrics.collations_generated_total.inc(); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> std::result::Result { + let metrics = MetricsInner { + collations_generated_total: prometheus::register( + prometheus::Counter::new( + "parachain_collations_generated_total", + "Number of collations generated." + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +#[cfg(test)] +mod tests { + mod handle_new_activations { + use super::super::*; + use futures::{ + lock::Mutex, + task::{Context as FuturesContext, Poll}, + Future, + }; + use polkadot_node_primitives::Collation; + use polkadot_node_subsystem::messages::{ + AllMessages, RuntimeApiMessage, RuntimeApiRequest, + }; + use polkadot_node_subsystem_test_helpers::{ + subsystem_test_harness, TestSubsystemContextHandle, + }; + use polkadot_primitives::v1::{ + BlockData, BlockNumber, CollatorPair, Id as ParaId, + PersistedValidationData, PoV, ScheduledCore, ValidationData, + }; + use std::pin::Pin; + + fn test_collation() -> Collation { + Collation { + fees: Default::default(), + upward_messages: Default::default(), + new_validation_code: Default::default(), + head_data: Default::default(), + proof_of_validity: PoV { + block_data: BlockData(Vec::new()), + }, + } + } + + // Box + Unpin + Send + struct TestCollator; + + impl Future for TestCollator { + type Output = Collation; + + fn poll(self: Pin<&mut Self>, _cx: &mut FuturesContext) -> Poll { + Poll::Ready(test_collation()) + } + } + + impl Unpin for TestCollator {} + + fn test_config>(para_id: Id) -> Arc { + Arc::new(CollationGenerationConfig { + key: CollatorPair::generate().0, + collator: Box::new(|_vd: &ValidationData| { + Box::new(TestCollator) + }), + para_id: para_id.into(), + }) + } + + fn scheduled_core_for>(para_id: Id) -> ScheduledCore { + ScheduledCore { + para_id: para_id.into(), + collator: None, + } + } + + #[test] + fn requests_availability_per_relay_parent() { + let activated_hashes: Vec = vec![ + [1; 32].into(), + [4; 32].into(), + [9; 32].into(), + [16; 32].into(), + ]; + + let requested_availability_cores = Arc::new(Mutex::new(Vec::new())); + + let overseer_requested_availability_cores = requested_availability_cores.clone(); + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx)))) => { + overseer_requested_availability_cores.lock().await.push(hash); + tx.send(Ok(vec![])).unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => { + tx.send(Ok(vec![Default::default(); 3])).unwrap(); + } + Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), + } + } + }; + + let (tx, _rx) = mpsc::channel(0); + + let subsystem_activated_hashes = activated_hashes.clone(); + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations( + test_config(123u32), + &subsystem_activated_hashes, + &mut ctx, + Metrics(None), + &tx, + ) + .await + .unwrap(); + }); + + let mut requested_availability_cores = Arc::try_unwrap(requested_availability_cores) + .expect("overseer should have shut down by now") + .into_inner(); + requested_availability_cores.sort(); + + assert_eq!(requested_availability_cores, activated_hashes); + } + + #[test] + fn requests_validation_data_for_scheduled_matches() { + let activated_hashes: Vec = vec![ + Hash::repeat_byte(1), + Hash::repeat_byte(4), + Hash::repeat_byte(9), + Hash::repeat_byte(16), + ]; + + let requested_full_validation_data = Arc::new(Mutex::new(Vec::new())); + + let overseer_requested_full_validation_data = requested_full_validation_data.clone(); + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AvailabilityCores(tx), + ))) => { + tx.send(Ok(vec![ + CoreState::Free, + // this is weird, see explanation below + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 4) as u32, + )), + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 5) as u32, + )), + ])) + .unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::FullValidationData( + _para_id, + _occupied_core_assumption, + tx, + ), + ))) => { + overseer_requested_full_validation_data + .lock() + .await + .push(hash); + tx.send(Ok(Default::default())).unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Validators(tx), + ))) => { + tx.send(Ok(vec![Default::default(); 3])).unwrap(); + } + Some(msg) => { + panic!("didn't expect any other overseer requests; got {:?}", msg) + } + } + } + }; + + let (tx, _rx) = mpsc::channel(0); + + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations(test_config(16), &activated_hashes, &mut ctx, Metrics(None), &tx) + .await + .unwrap(); + }); + + let requested_full_validation_data = Arc::try_unwrap(requested_full_validation_data) + .expect("overseer should have shut down by now") + .into_inner(); + + // the only activated hash should be from the 4 hash: + // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 + // given that the test configuration has a para_id of 16, there's only one way to get that value: with the 4 + // hash. + assert_eq!(requested_full_validation_data, vec![[4; 32].into()]); + } + + #[test] + fn sends_distribute_collation_message() { + let activated_hashes: Vec = vec![ + Hash::repeat_byte(1), + Hash::repeat_byte(4), + Hash::repeat_byte(9), + Hash::repeat_byte(16), + ]; + + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AvailabilityCores(tx), + ))) => { + tx.send(Ok(vec![ + CoreState::Free, + // this is weird, see explanation below + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 4) as u32, + )), + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 5) as u32, + )), + ])) + .unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::FullValidationData( + _para_id, + _occupied_core_assumption, + tx, + ), + ))) => { + tx.send(Ok(Some(Default::default()))).unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Validators(tx), + ))) => { + tx.send(Ok(vec![Default::default(); 3])).unwrap(); + } + Some(msg) => { + panic!("didn't expect any other overseer requests; got {:?}", msg) + } + } + } + }; + + let config = test_config(16); + let subsystem_config = config.clone(); + + let (tx, rx) = mpsc::channel(0); + + // empty vec doesn't allocate on the heap, so it's ok we throw it away + let sent_messages = Arc::new(Mutex::new(Vec::new())); + let subsystem_sent_messages = sent_messages.clone(); + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations(subsystem_config, &activated_hashes, &mut ctx, Metrics(None), &tx) + .await + .unwrap(); + + std::mem::drop(tx); + + // collect all sent messages + *subsystem_sent_messages.lock().await = rx.collect().await; + }); + + let sent_messages = Arc::try_unwrap(sent_messages) + .expect("subsystem should have shut down by now") + .into_inner(); + + // we expect a single message to be sent, containing a candidate receipt. + // we don't care too much about the commitments_hash right now, but let's ensure that we've calculated the + // correct descriptor + let expect_pov_hash = test_collation().proof_of_validity.hash(); + let expect_validation_data_hash + = PersistedValidationData::::default().hash(); + let expect_relay_parent = Hash::repeat_byte(4); + let expect_payload = collator_signature_payload( + &expect_relay_parent, + &config.para_id, + &expect_validation_data_hash, + &expect_pov_hash, + ); + let expect_descriptor = CandidateDescriptor { + signature: config.key.sign(&expect_payload), + para_id: config.para_id, + relay_parent: expect_relay_parent, + collator: config.key.public(), + persisted_validation_data_hash: expect_validation_data_hash, + pov_hash: expect_pov_hash, + }; + + assert_eq!(sent_messages.len(), 1); + match &sent_messages[0] { + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation( + CandidateReceipt { descriptor, .. }, + _pov, + )) => { + // signature generation is non-deterministic, so we can't just assert that the + // expected descriptor is correct. What we can do is validate that the produced + // descriptor has a valid signature, then just copy in the generated signature + // and check the rest of the fields for equality. + assert!(CollatorPair::verify( + &descriptor.signature, + &collator_signature_payload( + &descriptor.relay_parent, + &descriptor.para_id, + &descriptor.persisted_validation_data_hash, + &descriptor.pov_hash, + ) + .as_ref(), + &descriptor.collator, + )); + let expect_descriptor = { + let mut expect_descriptor = expect_descriptor; + expect_descriptor.signature = descriptor.signature.clone(); + expect_descriptor + }; + assert_eq!(descriptor, &expect_descriptor); + } + _ => panic!("received wrong message type"), + } + } + } +} diff --git a/node/core/README.md b/node/core/README.md index a53faa966a73869690b3bff4df2d5f100560d456..1656bb569fe404634cacc09a449f7ed89a045d0e 100644 --- a/node/core/README.md +++ b/node/core/README.md @@ -1 +1 @@ -Stub - This folder will hold core subsystem implementations, each with their own crate. +This folder contains core subsystems, each with their own crate. diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..68137b6ae77dd45db34357f973fb6adb9b0077fe --- /dev/null +++ b/node/core/av-store/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "polkadot-node-core-av-store" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-overseer = { path = "../../overseer" } +polkadot-primitives = { path = "../../../primitives" } +erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } +kvdb = "0.7.0" +kvdb-rocksdb = "0.9.0" +codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +log = "0.4.8" +derive_more = "0.99.9" + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = { version = "0.3.5", features = ["thread-pool"] } +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +kvdb-memorydb = "0.7.0" +assert_matches = "1.3.0" diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5837377dd5f975bb8e6e75fec1115fe78c1dbd68 --- /dev/null +++ b/node/core/av-store/src/lib.rs @@ -0,0 +1,568 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements a `AvailabilityStoreSubsystem`. + +#![recursion_limit="256"] +#![warn(missing_docs)] + +use std::collections::HashMap; +use std::io; +use std::path::PathBuf; +use std::sync::Arc; + +use codec::{Encode, Decode}; +use futures::{select, channel::oneshot, FutureExt}; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use kvdb::{KeyValueDB, DBTransaction}; + +use polkadot_primitives::v1::{ + Hash, AvailableData, ErasureChunk, ValidatorIndex, +}; +use polkadot_subsystem::{ + FromOverseer, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem, + metrics::{self, prometheus}, +}; +use polkadot_subsystem::messages::AvailabilityStoreMessage; + +const LOG_TARGET: &str = "availability"; + +mod columns { + pub const DATA: u32 = 0; + pub const NUM_COLUMNS: u32 = 1; +} + +#[derive(Debug, derive_more::From)] +enum Error { + #[from] + Erasure(erasure::Error), + #[from] + Io(io::Error), + #[from] + Oneshot(oneshot::Canceled), + #[from] + Subsystem(SubsystemError), +} + +/// An implementation of the Availability Store subsystem. +pub struct AvailabilityStoreSubsystem { + inner: Arc, + metrics: Metrics, +} + +fn available_data_key(candidate_hash: &Hash) -> Vec { + (candidate_hash, 0i8).encode() +} + +fn erasure_chunk_key(candidate_hash: &Hash, index: u32) -> Vec { + (candidate_hash, index, 0i8).encode() +} + +#[derive(Encode, Decode)] +struct StoredAvailableData { + data: AvailableData, + n_validators: u32, +} + +/// Configuration for the availability store. +pub struct Config { + /// Total cache size in megabytes. If `None` the default (128 MiB per column) is used. + pub cache_size: Option, + /// Path to the database. + pub path: PathBuf, +} + +impl AvailabilityStoreSubsystem { + /// Create a new `AvailabilityStoreSubsystem` with a given config on disk. + pub fn new_on_disk(config: Config, metrics: Metrics) -> io::Result { + let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS); + + if let Some(cache_size) = config.cache_size { + let mut memory_budget = HashMap::new(); + + for i in 0..columns::NUM_COLUMNS { + memory_budget.insert(i, cache_size / columns::NUM_COLUMNS as usize); + } + db_config.memory_budget = memory_budget; + } + + let path = config.path.to_str().ok_or_else(|| io::Error::new( + io::ErrorKind::Other, + format!("Bad database path: {:?}", config.path), + ))?; + + let db = Database::open(&db_config, &path)?; + + Ok(Self { + inner: Arc::new(db), + metrics, + }) + } + + #[cfg(test)] + fn new_in_memory(inner: Arc) -> Self { + Self { + inner, + metrics: Metrics(None), + } + } +} + +async fn run(subsystem: AvailabilityStoreSubsystem, mut ctx: Context) + -> Result<(), Error> +where + Context: SubsystemContext, +{ + let ctx = &mut ctx; + loop { + select! { + incoming = ctx.recv().fuse() => { + match incoming { + Ok(FromOverseer::Signal(Conclude)) => break, + Ok(FromOverseer::Signal(_)) => (), + Ok(FromOverseer::Communication { msg }) => { + process_message(&subsystem.inner, &subsystem.metrics, msg)?; + } + Err(_) => break, + } + } + complete => break, + } + } + + Ok(()) +} + +fn process_message(db: &Arc, metrics: &Metrics, msg: AvailabilityStoreMessage) -> Result<(), Error> { + use AvailabilityStoreMessage::*; + match msg { + QueryAvailableData(hash, tx) => { + tx.send(available_data(db, &hash).map(|d| d.data)).map_err(|_| oneshot::Canceled)?; + } + QueryDataAvailability(hash, tx) => { + tx.send(available_data(db, &hash).is_some()).map_err(|_| oneshot::Canceled)?; + } + QueryChunk(hash, id, tx) => { + tx.send(get_chunk(db, &hash, id, metrics)?).map_err(|_| oneshot::Canceled)?; + } + QueryChunkAvailability(hash, id, tx) => { + tx.send(get_chunk(db, &hash, id, metrics)?.is_some()).map_err(|_| oneshot::Canceled)?; + } + StoreChunk(hash, id, chunk, tx) => { + match store_chunk(db, &hash, id, chunk) { + Err(e) => { + tx.send(Err(())).map_err(|_| oneshot::Canceled)?; + return Err(e); + } + Ok(()) => { + tx.send(Ok(())).map_err(|_| oneshot::Canceled)?; + } + } + } + StoreAvailableData(hash, id, n_validators, av_data, tx) => { + match store_available_data(db, &hash, id, n_validators, av_data, metrics) { + Err(e) => { + tx.send(Err(())).map_err(|_| oneshot::Canceled)?; + return Err(e); + } + Ok(()) => { + tx.send(Ok(())).map_err(|_| oneshot::Canceled)?; + } + } + } + } + + Ok(()) +} + +fn available_data(db: &Arc, candidate_hash: &Hash) -> Option { + query_inner(db, columns::DATA, &available_data_key(candidate_hash)) +} + +fn store_available_data( + db: &Arc, + candidate_hash: &Hash, + id: Option, + n_validators: u32, + available_data: AvailableData, + metrics: &Metrics, +) -> Result<(), Error> { + let mut tx = DBTransaction::new(); + + if let Some(index) = id { + let chunks = get_chunks(&available_data, n_validators as usize, metrics)?; + store_chunk(db, candidate_hash, n_validators, chunks[index as usize].clone())?; + } + + let stored_data = StoredAvailableData { + data: available_data, + n_validators, + }; + + tx.put_vec( + columns::DATA, + available_data_key(&candidate_hash).as_slice(), + stored_data.encode(), + ); + + db.write(tx)?; + + Ok(()) +} + +fn store_chunk(db: &Arc, candidate_hash: &Hash, _n_validators: u32, chunk: ErasureChunk) + -> Result<(), Error> +{ + let mut tx = DBTransaction::new(); + + let dbkey = erasure_chunk_key(candidate_hash, chunk.index); + + tx.put_vec(columns::DATA, &dbkey, chunk.encode()); + db.write(tx)?; + + Ok(()) +} + +fn get_chunk(db: &Arc, candidate_hash: &Hash, index: u32, metrics: &Metrics) + -> Result, Error> +{ + if let Some(chunk) = query_inner( + db, + columns::DATA, + &erasure_chunk_key(candidate_hash, index)) { + return Ok(Some(chunk)); + } + + if let Some(data) = available_data(db, candidate_hash) { + let mut chunks = get_chunks(&data.data, data.n_validators as usize, metrics)?; + let desired_chunk = chunks.get(index as usize).cloned(); + for chunk in chunks.drain(..) { + store_chunk(db, candidate_hash, data.n_validators, chunk)?; + } + return Ok(desired_chunk); + } + + Ok(None) +} + +fn query_inner(db: &Arc, column: u32, key: &[u8]) -> Option { + match db.get(column, key) { + Ok(Some(raw)) => { + let res = D::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"); + Some(res) + } + Ok(None) => None, + Err(e) => { + log::warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); + None + } + } +} + +impl Subsystem for AvailabilityStoreSubsystem + where + Context: SubsystemContext, +{ + type Metrics = Metrics; + + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = Box::pin(async move { + if let Err(e) = run(self, ctx).await { + log::error!(target: "availabilitystore", "Subsystem exited with an error {:?}", e); + } + }); + + SpawnedSubsystem { + name: "availability-store-subsystem", + future, + } + } +} + +fn get_chunks(data: &AvailableData, n_validators: usize, metrics: &Metrics) -> Result, Error> { + let chunks = erasure::obtain_chunks_v1(n_validators, data)?; + metrics.on_chunks_received(chunks.len()); + let branches = erasure::branches(chunks.as_ref()); + + Ok(chunks + .iter() + .zip(branches.map(|(proof, _)| proof)) + .enumerate() + .map(|(index, (chunk, proof))| ErasureChunk { + chunk: chunk.clone(), + proof, + index: index as u32, + }) + .collect() + ) +} + +#[derive(Clone)] +struct MetricsInner { + received_availability_chunks_total: prometheus::Counter, +} + +/// Availability metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_chunks_received(&self, count: usize) { + if let Some(metrics) = &self.0 { + use core::convert::TryFrom as _; + // assume usize fits into u64 + let by = u64::try_from(count).unwrap_or_default(); + metrics.received_availability_chunks_total.inc_by(by); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + received_availability_chunks_total: prometheus::register( + prometheus::Counter::new( + "parachain_received_availability_chunks_total", + "Number of availability chunks received.", + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::{ + future, + channel::oneshot, + executor, + Future, + }; + use std::cell::RefCell; + use polkadot_primitives::v1::{ + AvailableData, BlockData, HeadData, PersistedValidationData, PoV, + }; + use polkadot_node_subsystem_test_helpers as test_helpers; + + struct TestHarness { + virtual_overseer: test_helpers::TestSubsystemContextHandle, + } + + thread_local! { + static TIME_NOW: RefCell> = RefCell::new(None); + } + + struct TestState { + persisted_validation_data: PersistedValidationData, + } + + impl Default for TestState { + fn default() -> Self { + + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + block_number: Default::default(), + hrmp_mqc_heads: Vec::new(), + }; + Self { + persisted_validation_data, + } + } + } + + fn test_harness>( + store: Arc, + test: impl FnOnce(TestHarness) -> T, + ) { + let pool = sp_core::testing::TaskExecutor::new(); + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + + let subsystem = AvailabilityStoreSubsystem::new_in_memory(store); + let subsystem = run(subsystem, context); + + let test_fut = test(TestHarness { + virtual_overseer, + }); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + executor::block_on(future::select(test_fut, subsystem)); + } + + #[test] + fn store_chunk_works() { + let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS)); + test_harness(store.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + let relay_parent = Hash::from([1; 32]); + let validator_index = 5; + + let chunk = ErasureChunk { + chunk: vec![1, 2, 3], + index: validator_index, + proof: vec![vec![3, 4, 5]], + }; + + let (tx, rx) = oneshot::channel(); + + let chunk_msg = AvailabilityStoreMessage::StoreChunk( + relay_parent, + validator_index, + chunk.clone(), + tx, + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: chunk_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let (tx, rx) = oneshot::channel(); + let query_chunk = AvailabilityStoreMessage::QueryChunk( + relay_parent, + validator_index, + tx, + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: query_chunk }).await; + + assert_eq!(rx.await.unwrap().unwrap(), chunk); + }); + } + + #[test] + fn store_block_works() { + let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS)); + let test_state = TestState::default(); + test_harness(store.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + let candidate_hash = Hash::from([1; 32]); + let validator_index = 5; + let n_validators = 10; + + let pov = PoV { + block_data: BlockData(vec![4, 5, 6]), + }; + + let available_data = AvailableData { + pov, + validation_data: test_state.persisted_validation_data, + }; + + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData( + candidate_hash, + Some(validator_index), + n_validators, + available_data.clone(), + tx, + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: block_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let pov = query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); + assert_eq!(pov, available_data); + + let chunk = query_chunk(&mut virtual_overseer, candidate_hash, validator_index).await.unwrap(); + + let chunks = erasure::obtain_chunks_v1(10, &available_data).unwrap(); + + let mut branches = erasure::branches(chunks.as_ref()); + + let branch = branches.nth(5).unwrap(); + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: 5, + proof: branch.0, + }; + + assert_eq!(chunk, expected_chunk); + }); + } + + + #[test] + fn store_pov_and_query_chunk_works() { + let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS)); + let test_state = TestState::default(); + + test_harness(store.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + let candidate_hash = Hash::from([1; 32]); + let n_validators = 10; + + let pov = PoV { + block_data: BlockData(vec![4, 5, 6]), + }; + + let available_data = AvailableData { + pov, + validation_data: test_state.persisted_validation_data, + }; + + let no_metrics = Metrics(None); + let chunks_expected = get_chunks(&available_data, n_validators as usize, &no_metrics).unwrap(); + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData( + candidate_hash, + None, + n_validators, + available_data, + tx, + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: block_msg }).await; + + assert_eq!(rx.await.unwrap(), Ok(())); + + for validator_index in 0..n_validators { + let chunk = query_chunk(&mut virtual_overseer, candidate_hash, validator_index).await.unwrap(); + + assert_eq!(chunk, chunks_expected[validator_index as usize]); + } + }); + } + + async fn query_available_data( + virtual_overseer: &mut test_helpers::TestSubsystemContextHandle, + candidate_hash: Hash, + ) -> Option { + let (tx, rx) = oneshot::channel(); + + let query = AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx); + virtual_overseer.send(FromOverseer::Communication{ msg: query }).await; + + rx.await.unwrap() + } + + async fn query_chunk( + virtual_overseer: &mut test_helpers::TestSubsystemContextHandle, + candidate_hash: Hash, + index: u32, + ) -> Option { + let (tx, rx) = oneshot::channel(); + + let query = AvailabilityStoreMessage::QueryChunk(candidate_hash, index, tx); + virtual_overseer.send(FromOverseer::Communication{ msg: query }).await; + + rx.await.unwrap() + } +} diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9401290f2b5b59b045fd98fe21c5c0dd377ee42c --- /dev/null +++ b/node/core/backing/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "polkadot-node-core-backing" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } +erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } +statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } +derive_more = "0.99.9" +bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } +log = "0.4.8" + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = { version = "0.3.5", features = ["thread-pool"] } +assert_matches = "1.3.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..7e6e64ee47d42ee3fd7a06c7e5a0e60798c366a6 --- /dev/null +++ b/node/core/backing/src/lib.rs @@ -0,0 +1,1892 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements a `CandidateBackingSubsystem`. + +use std::collections::{HashMap, HashSet}; +use std::convert::TryFrom; +use std::pin::Pin; +use std::sync::Arc; + +use bitvec::vec::BitVec; +use futures::{ + channel::{mpsc, oneshot}, + Future, FutureExt, SinkExt, StreamExt, +}; + +use keystore::KeyStorePtr; +use polkadot_primitives::v1::{ + CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorId, + ValidatorIndex, SigningContext, PoV, + CandidateDescriptor, AvailableData, ValidatorSignature, Hash, CandidateReceipt, + CandidateCommitments, CoreState, CoreIndex, CollatorId, +}; +use polkadot_node_primitives::{ + FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, + ValidationOutputs, ValidationResult, +}; +use polkadot_subsystem::{ + messages::{ + AllMessages, AvailabilityStoreMessage, CandidateBackingMessage, CandidateSelectionMessage, + CandidateValidationMessage, NewBackedCandidate, PoVDistributionMessage, ProvisionableData, + ProvisionerMessage, RuntimeApiMessage, StatementDistributionMessage, ValidationFailed, + RuntimeApiRequest, + }, + metrics::{self, prometheus}, +}; +use polkadot_node_subsystem_util::{ + self as util, + request_session_index_for_child, + request_validator_groups, + request_validators, + request_from_runtime, + Validator, + delegated_subsystem, +}; +use statement_table::{ + generic::AttestedCandidate as TableAttestedCandidate, + Context as TableContextTrait, + Table, + v1::{ + Statement as TableStatement, + SignedStatement as TableSignedStatement, Summary as TableSummary, + }, +}; + +#[derive(Debug, derive_more::From)] +enum Error { + CandidateNotFound, + InvalidSignature, + StoreFailed, + #[from] + Erasure(erasure_coding::Error), + #[from] + ValidationFailed(ValidationFailed), + #[from] + Oneshot(oneshot::Canceled), + #[from] + Mpsc(mpsc::SendError), + #[from] + UtilError(util::Error), +} + +/// Holds all data needed for candidate backing job operation. +struct CandidateBackingJob { + /// The hash of the relay parent on top of which this job is doing it's work. + parent: Hash, + /// Inbound message channel receiving part. + rx_to: mpsc::Receiver, + /// Outbound message channel sending part. + tx_from: mpsc::Sender, + /// The `ParaId` assigned to this validator + assignment: ParaId, + /// The collator required to author the candidate, if any. + required_collator: Option, + /// We issued `Valid` or `Invalid` statements on about these candidates. + issued_statements: HashSet, + /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. + seconded: Option, + /// We have already reported misbehaviors for these validators. + reported_misbehavior_for: HashSet, + table: Table, + table_context: TableContext, + metrics: Metrics, +} + +const fn group_quorum(n_validators: usize) -> usize { + (n_validators / 2) + 1 +} + +#[derive(Default)] +struct TableContext { + signing_context: SigningContext, + validator: Option, + groups: HashMap>, + validators: Vec, +} + +impl TableContextTrait for TableContext { + type AuthorityId = ValidatorIndex; + type Digest = Hash; + type GroupId = ParaId; + type Signature = ValidatorSignature; + type Candidate = CommittedCandidateReceipt; + + fn candidate_digest(candidate: &CommittedCandidateReceipt) -> Hash { + candidate.hash() + } + + fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { + candidate.descriptor().para_id + } + + fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { + self.groups.get(group).map_or(false, |g| g.iter().position(|a| a == authority).is_some()) + } + + fn requisite_votes(&self, group: &ParaId) -> usize { + self.groups.get(group).map_or(usize::max_value(), |g| group_quorum(g.len())) + } +} + +/// A message type that is sent from `CandidateBackingSubsystem` to `CandidateBackingJob`. +pub enum ToJob { + /// A `CandidateBackingMessage`. + CandidateBacking(CandidateBackingMessage), + /// Stop working. + Stop, +} + +impl TryFrom for ToJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::CandidateBacking(msg) => Ok(ToJob::CandidateBacking(msg)), + _ => Err(()), + } + } +} + +impl From for ToJob { + fn from(msg: CandidateBackingMessage) -> Self { + Self::CandidateBacking(msg) + } +} + +impl util::ToJobTrait for ToJob { + const STOP: Self = ToJob::Stop; + + fn relay_parent(&self) -> Option { + match self { + Self::CandidateBacking(cb) => cb.relay_parent(), + Self::Stop => None, + } + } +} + +/// A message type that is sent from `CandidateBackingJob` to `CandidateBackingSubsystem`. +enum FromJob { + AvailabilityStore(AvailabilityStoreMessage), + RuntimeApiMessage(RuntimeApiMessage), + CandidateValidation(CandidateValidationMessage), + CandidateSelection(CandidateSelectionMessage), + Provisioner(ProvisionerMessage), + PoVDistribution(PoVDistributionMessage), + StatementDistribution(StatementDistributionMessage), +} + +impl From for AllMessages { + fn from(f: FromJob) -> Self { + match f { + FromJob::AvailabilityStore(msg) => AllMessages::AvailabilityStore(msg), + FromJob::RuntimeApiMessage(msg) => AllMessages::RuntimeApi(msg), + FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), + FromJob::CandidateSelection(msg) => AllMessages::CandidateSelection(msg), + FromJob::StatementDistribution(msg) => AllMessages::StatementDistribution(msg), + FromJob::PoVDistribution(msg) => AllMessages::PoVDistribution(msg), + FromJob::Provisioner(msg) => AllMessages::Provisioner(msg), + } + } +} + +impl TryFrom for FromJob { + type Error = &'static str; + + fn try_from(f: AllMessages) -> Result { + match f { + AllMessages::AvailabilityStore(msg) => Ok(FromJob::AvailabilityStore(msg)), + AllMessages::RuntimeApi(msg) => Ok(FromJob::RuntimeApiMessage(msg)), + AllMessages::CandidateValidation(msg) => Ok(FromJob::CandidateValidation(msg)), + AllMessages::CandidateSelection(msg) => Ok(FromJob::CandidateSelection(msg)), + AllMessages::StatementDistribution(msg) => Ok(FromJob::StatementDistribution(msg)), + AllMessages::PoVDistribution(msg) => Ok(FromJob::PoVDistribution(msg)), + AllMessages::Provisioner(msg) => Ok(FromJob::Provisioner(msg)), + _ => Err("can't convert this AllMessages variant to FromJob"), + } + } +} + +// It looks like it's not possible to do an `impl From` given the current state of +// the code. So this does the necessary conversion. +fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { + let statement = match s.payload() { + Statement::Seconded(c) => TableStatement::Candidate(c.clone()), + Statement::Valid(h) => TableStatement::Valid(h.clone()), + Statement::Invalid(h) => TableStatement::Invalid(h.clone()), + }; + + TableSignedStatement { + statement, + signature: s.signature().clone(), + sender: s.validator_index(), + } +} + +impl CandidateBackingJob { + /// Run asynchronously. + async fn run_loop(mut self) -> Result<(), Error> { + while let Some(msg) = self.rx_to.next().await { + match msg { + ToJob::CandidateBacking(msg) => { + self.process_msg(msg).await?; + } + _ => break, + } + } + + Ok(()) + } + + async fn issue_candidate_invalid_message( + &mut self, + candidate: CandidateReceipt, + ) -> Result<(), Error> { + self.tx_from.send(FromJob::CandidateSelection( + CandidateSelectionMessage::Invalid(self.parent, candidate) + )).await?; + + Ok(()) + } + + /// Validate the candidate that is requested to be `Second`ed and distribute validation result. + /// + /// Returns `Ok(true)` if we issued a `Seconded` statement about this candidate. + async fn validate_and_second( + &mut self, + candidate: &CandidateReceipt, + pov: PoV, + ) -> Result { + // Check that candidate is collated by the right collator. + if self.required_collator.as_ref() + .map_or(false, |c| c != &candidate.descriptor().collator) + { + self.issue_candidate_invalid_message(candidate.clone()).await?; + return Ok(false); + } + + let valid = self.request_candidate_validation( + candidate.descriptor().clone(), + Arc::new(pov.clone()), + ).await?; + + let candidate_hash = candidate.hash(); + + let statement = match valid { + ValidationResult::Valid(outputs) => { + // make PoV available for later distribution. Send data to the availability + // store to keep. Sign and dispatch `valid` statement to network if we + // have not seconded the given candidate. + // + // If the commitments hash produced by validation is not the same as given by + // the collator, do not make available and report the collator. + let commitments_check = self.make_pov_available( + pov, + outputs, + |commitments| if commitments.hash() == candidate.commitments_hash { + Ok(CommittedCandidateReceipt { + descriptor: candidate.descriptor().clone(), + commitments, + }) + } else { + Err(()) + }, + ).await?; + + match commitments_check { + Ok(candidate) => { + self.issued_statements.insert(candidate_hash); + Some(Statement::Seconded(candidate)) + } + Err(()) => { + self.issue_candidate_invalid_message(candidate.clone()).await?; + None + } + } + } + ValidationResult::Invalid(_reason) => { + // no need to issue a statement about this if we aren't seconding it. + // + // there's an infinite amount of garbage out there. no need to acknowledge + // all of it. + self.issue_candidate_invalid_message(candidate.clone()).await?; + None + } + }; + + let issued_statement = statement.is_some(); + if let Some(signed_statement) = statement.and_then(|s| self.sign_statement(s)) { + self.import_statement(&signed_statement).await?; + self.distribute_signed_statement(signed_statement).await?; + } + + Ok(issued_statement) + } + + fn get_backed(&self) -> Vec { + let proposed = self.table.proposed_candidates(&self.table_context); + let mut res = Vec::with_capacity(proposed.len()); + + for p in proposed.into_iter() { + let TableAttestedCandidate { candidate, validity_votes, .. } = p; + + let (ids, validity_votes): (Vec<_>, Vec<_>) = validity_votes + .into_iter() + .map(|(id, vote)| (id, vote.into())) + .unzip(); + + let group = match self.table_context.groups.get(&self.assignment) { + Some(group) => group, + None => continue, + }; + + let mut validator_indices = BitVec::with_capacity(group.len()); + + validator_indices.resize(group.len(), false); + + for id in ids.iter() { + if let Some(position) = group.iter().position(|x| x == id) { + validator_indices.set(position, true); + } + } + + let backed = BackedCandidate { + candidate, + validity_votes, + validator_indices, + }; + + res.push(NewBackedCandidate(backed.clone())); + } + + res + } + + /// Check if there have happened any new misbehaviors and issue necessary messages. + /// + /// TODO: Report multiple misbehaviors (https://github.com/paritytech/polkadot/issues/1387) + async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { + let mut reports = Vec::new(); + + for (k, v) in self.table.get_misbehavior().iter() { + if !self.reported_misbehavior_for.contains(k) { + self.reported_misbehavior_for.insert(*k); + + let f = FromTableMisbehavior { + id: *k, + report: v.clone(), + signing_context: self.table_context.signing_context.clone(), + key: self.table_context.validators[*k as usize].clone(), + }; + + if let Ok(report) = MisbehaviorReport::try_from(f) { + let message = ProvisionerMessage::ProvisionableData( + ProvisionableData::MisbehaviorReport(self.parent, report), + ); + + reports.push(message); + } + } + } + + for report in reports.drain(..) { + self.send_to_provisioner(report).await? + } + + Ok(()) + } + + /// Import a statement into the statement table and return the summary of the import. + async fn import_statement( + &mut self, + statement: &SignedFullStatement, + ) -> Result, Error> { + let stmt = primitive_statement_to_table(statement); + + let summary = self.table.import_statement(&self.table_context, stmt); + + self.issue_new_misbehaviors().await?; + + return Ok(summary); + } + + async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { + match msg { + CandidateBackingMessage::Second(_, candidate, pov) => { + // Sanity check that candidate is from our assignment. + if candidate.descriptor().para_id != self.assignment { + return Ok(()); + } + + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a + // Seconded statement only if we have not seconded any other candidate and + // have not signed a Valid statement for the requested candidate. + match self.seconded { + // This job has not seconded a candidate yet. + None => { + let candidate_hash = candidate.hash(); + + if !self.issued_statements.contains(&candidate_hash) { + if let Ok(true) = self.validate_and_second( + &candidate, + pov, + ).await { + self.metrics.on_candidate_seconded(); + self.seconded = Some(candidate_hash); + } + } + } + // This job has already seconded a candidate. + Some(_) => {} + } + } + CandidateBackingMessage::Statement(_, statement) => { + self.check_statement_signature(&statement)?; + match self.maybe_validate_and_import(statement).await { + Err(Error::ValidationFailed(_)) => return Ok(()), + Err(e) => return Err(e), + Ok(()) => (), + } + } + CandidateBackingMessage::GetBackedCandidates(_, tx) => { + let backed = self.get_backed(); + + tx.send(backed).map_err(|_| oneshot::Canceled)?; + } + } + + Ok(()) + } + + /// Kick off validation work and distribute the result as a signed statement. + async fn kick_off_validation_work( + &mut self, + summary: TableSummary, + ) -> Result<(), Error> { + let candidate_hash = summary.candidate.clone(); + + if self.issued_statements.contains(&candidate_hash) { + return Ok(()) + } + + // We clone the commitments here because there are borrowck + // errors relating to this being a struct and methods borrowing the entirety of self + // and not just those things that the function uses. + let candidate = self.table.get_candidate(&candidate_hash).ok_or(Error::CandidateNotFound)?; + let expected_commitments = candidate.commitments.clone(); + + let descriptor = candidate.descriptor().clone(); + + // Check that candidate is collated by the right collator. + if self.required_collator.as_ref() + .map_or(false, |c| c != &descriptor.collator) + { + // If not, we've got the statement in the table but we will + // not issue validation work for it. + // + // Act as though we've issued a statement. + self.issued_statements.insert(candidate_hash); + return Ok(()); + } + + let pov = self.request_pov_from_distribution(descriptor.clone()).await?; + let v = self.request_candidate_validation(descriptor, pov.clone()).await?; + + let statement = match v { + ValidationResult::Valid(outputs) => { + // If validation produces a new set of commitments, we vote the candidate as invalid. + let commitments_check = self.make_pov_available( + (&*pov).clone(), + outputs, + |commitments| if commitments == expected_commitments { + Ok(()) + } else { + Err(()) + } + ).await?; + + match commitments_check { + Ok(()) => Statement::Valid(candidate_hash), + Err(()) => Statement::Invalid(candidate_hash), + } + } + ValidationResult::Invalid(_reason) => { + Statement::Invalid(candidate_hash) + } + }; + + self.issued_statements.insert(candidate_hash); + + if let Some(signed_statement) = self.sign_statement(statement) { + self.distribute_signed_statement(signed_statement).await?; + } + + Ok(()) + } + + /// Import the statement and kick off validation work if it is a part of our assignment. + async fn maybe_validate_and_import( + &mut self, + statement: SignedFullStatement, + ) -> Result<(), Error> { + if let Some(summary) = self.import_statement(&statement).await? { + if let Statement::Seconded(_) = statement.payload() { + if summary.group_id == self.assignment { + self.kick_off_validation_work(summary).await?; + } + } + } + + Ok(()) + } + + fn sign_statement(&self, statement: Statement) -> Option { + let signed = self.table_context.validator.as_ref()?.sign(statement); + self.metrics.on_statement_signed(); + Some(signed) + } + + fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { + let idx = statement.validator_index() as usize; + + if self.table_context.validators.len() > idx { + statement.check_signature( + &self.table_context.signing_context, + &self.table_context.validators[idx], + ).map_err(|_| Error::InvalidSignature)?; + } else { + return Err(Error::InvalidSignature); + } + + Ok(()) + } + + async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> { + self.tx_from.send(FromJob::Provisioner(msg)).await?; + + Ok(()) + } + + async fn request_pov_from_distribution( + &mut self, + descriptor: CandidateDescriptor, + ) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::PoVDistribution( + PoVDistributionMessage::FetchPoV(self.parent, descriptor, tx) + )).await?; + + Ok(rx.await?) + } + + async fn request_candidate_validation( + &mut self, + candidate: CandidateDescriptor, + pov: Arc, + ) -> Result { + let (tx, rx) = oneshot::channel(); + + self.tx_from.send(FromJob::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + candidate, + pov, + tx, + ) + ) + ).await?; + + Ok(rx.await??) + } + + async fn store_available_data( + &mut self, + id: Option, + n_validators: u32, + available_data: AvailableData, + ) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + self.tx_from.send(FromJob::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData( + self.parent, + id, + n_validators, + available_data, + tx, + ) + ) + ).await?; + + rx.await?.map_err(|_| Error::StoreFailed)?; + + Ok(()) + } + + // Make a `PoV` available. + // + // This calls an inspection function before making the PoV available for any last checks + // that need to be done. If the inspection function returns an error, this function returns + // early without making the PoV available. + async fn make_pov_available( + &mut self, + pov: PoV, + outputs: ValidationOutputs, + with_commitments: impl FnOnce(CandidateCommitments) -> Result, + ) -> Result, Error> { + let available_data = AvailableData { + pov, + validation_data: outputs.validation_data, + }; + + let chunks = erasure_coding::obtain_chunks_v1( + self.table_context.validators.len(), + &available_data, + )?; + + let branches = erasure_coding::branches(chunks.as_ref()); + let erasure_root = branches.root(); + + let commitments = CandidateCommitments { + fees: outputs.fees, + upward_messages: outputs.upward_messages, + erasure_root, + new_validation_code: outputs.new_validation_code, + head_data: outputs.head_data, + }; + + let res = match with_commitments(commitments) { + Ok(x) => x, + Err(e) => return Ok(Err(e)), + }; + + self.store_available_data( + self.table_context.validator.as_ref().map(|v| v.index()), + self.table_context.validators.len() as u32, + available_data, + ).await?; + + Ok(Ok(res)) + } + + async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> { + let smsg = StatementDistributionMessage::Share(self.parent, s); + + self.tx_from.send(FromJob::StatementDistribution(smsg)).await?; + + Ok(()) + } +} + +impl util::JobTrait for CandidateBackingJob { + type ToJob = ToJob; + type FromJob = FromJob; + type Error = Error; + type RunArgs = KeyStorePtr; + type Metrics = Metrics; + + const NAME: &'static str = "CandidateBackingJob"; + + fn run( + parent: Hash, + keystore: KeyStorePtr, + metrics: Metrics, + rx_to: mpsc::Receiver, + mut tx_from: mpsc::Sender, + ) -> Pin> + Send>> { + async move { + macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(e) => { + log::warn!( + target: "candidate_backing", + "Failed to fetch runtime API data for job: {:?}", + e, + ); + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(()); + } + } + } + } + + let (validators, groups, session_index, cores) = futures::try_join!( + request_validators(parent, &mut tx_from).await?, + request_validator_groups(parent, &mut tx_from).await?, + request_session_index_for_child(parent, &mut tx_from).await?, + request_from_runtime( + parent, + &mut tx_from, + |tx| RuntimeApiRequest::AvailabilityCores(tx), + ).await?, + )?; + + let validators = try_runtime_api!(validators); + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let session_index = try_runtime_api!(session_index); + let cores = try_runtime_api!(cores); + + let signing_context = SigningContext { parent_hash: parent, session_index }; + let validator = match Validator::construct( + &validators, + signing_context, + keystore.clone(), + ) { + Ok(v) => v, + Err(util::Error::NotAValidator) => { return Ok(()) }, + Err(e) => { + log::warn!( + target: "candidate_backing", + "Cannot participate in candidate backing: {:?}", + e + ); + + return Ok(()) + } + }; + + let mut groups = HashMap::new(); + + let n_cores = cores.len(); + + let mut assignment = None; + for (idx, core) in cores.into_iter().enumerate() { + // Ignore prospective assignments on occupied cores for the time being. + if let CoreState::Scheduled(scheduled) = core { + let core_index = CoreIndex(idx as _); + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + if let Some(g) = validator_groups.get(group_index.0 as usize) { + if g.contains(&validator.index()) { + assignment = Some((scheduled.para_id, scheduled.collator)); + } + groups.insert(scheduled.para_id, g.clone()); + } + } + } + + let table_context = TableContext { + groups, + validators, + signing_context: validator.signing_context().clone(), + validator: Some(validator), + }; + + let (assignment, required_collator) = match assignment { + None => return Ok(()), // no need to work. + Some((a, r)) => (a, r), + }; + + let job = CandidateBackingJob { + parent, + rx_to, + tx_from, + assignment, + required_collator, + issued_statements: HashSet::new(), + seconded: None, + reported_misbehavior_for: HashSet::new(), + table: Table::default(), + table_context, + metrics, + }; + + job.run_loop().await + } + .boxed() + } +} + +#[derive(Clone)] +struct MetricsInner { + signed_statements_total: prometheus::Counter, + candidates_seconded_total: prometheus::Counter +} + +/// Candidate backing metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_statement_signed(&self) { + if let Some(metrics) = &self.0 { + metrics.signed_statements_total.inc(); + } + } + + fn on_candidate_seconded(&self) { + if let Some(metrics) = &self.0 { + metrics.candidates_seconded_total.inc(); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + signed_statements_total: prometheus::register( + prometheus::Counter::new( + "parachain_signed_statements_total", + "Number of statements signed.", + )?, + registry, + )?, + candidates_seconded_total: prometheus::register( + prometheus::Counter::new( + "parachain_candidates_seconded_total", + "Number of candidates seconded.", + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +delegated_subsystem!(CandidateBackingJob(KeyStorePtr, Metrics) <- ToJob as CandidateBackingSubsystem); + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use futures::{executor, future, Future}; + use polkadot_primitives::v1::{ + ScheduledCore, BlockData, CandidateCommitments, + PersistedValidationData, ValidationData, TransientValidationData, HeadData, + ValidatorPair, ValidityAttestation, GroupRotationInfo, + }; + use polkadot_subsystem::{ + messages::RuntimeApiRequest, + ActiveLeavesUpdate, FromOverseer, OverseerSignal, + }; + use polkadot_node_primitives::InvalidCandidate; + use sp_keyring::Sr25519Keyring; + use std::collections::HashMap; + + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() + } + + struct TestState { + chain_ids: Vec, + keystore: KeyStorePtr, + validators: Vec, + validator_public: Vec, + validation_data: ValidationData, + validator_groups: (Vec>, GroupRotationInfo), + availability_cores: Vec, + head_data: HashMap, + signing_context: SigningContext, + relay_parent: Hash, + } + + impl Default for TestState { + fn default() -> Self { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let thread_a = ParaId::from(3); + + let chain_ids = vec![chain_a, chain_b, thread_a]; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + + let keystore = keystore::Store::new_in_memory(); + // Make sure `Alice` key is in the keystore, so this mocked node will be a parachain validator. + keystore.write().insert_ephemeral_from_seed::(&validators[0].to_seed()) + .expect("Insert key into keystore"); + + let validator_public = validator_pubkeys(&validators); + + let validator_groups = vec![vec![2, 0, 3], vec![1], vec![4]]; + let group_rotation_info = GroupRotationInfo { + session_start_block: 0, + group_rotation_frequency: 100, + now: 1, + }; + + let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); + let availability_cores = vec![ + CoreState::Scheduled(ScheduledCore { + para_id: chain_a, + collator: None, + }), + CoreState::Scheduled(ScheduledCore { + para_id: chain_b, + collator: None, + }), + CoreState::Scheduled(ScheduledCore { + para_id: thread_a, + collator: Some(thread_collator.clone()), + }), + ]; + + let mut head_data = HashMap::new(); + head_data.insert(chain_a, HeadData(vec![4, 5, 6])); + + let relay_parent = Hash::from([5; 32]); + + let signing_context = SigningContext { + session_index: 1, + parent_hash: relay_parent, + }; + + let validation_data = ValidationData { + persisted: PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + block_number: Default::default(), + hrmp_mqc_heads: Vec::new(), + }, + transient: TransientValidationData { + max_code_size: 1000, + max_head_data_size: 1000, + balance: Default::default(), + code_upgrade_allowed: None, + }, + }; + + Self { + chain_ids, + keystore, + validators, + validator_public, + validator_groups: (validator_groups, group_rotation_info), + availability_cores, + head_data, + validation_data, + signing_context, + relay_parent, + } + } + } + + struct TestHarness { + virtual_overseer: polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle, + } + + fn test_harness>(keystore: KeyStorePtr, test: impl FnOnce(TestHarness) -> T) { + let pool = sp_core::testing::TaskExecutor::new(); + + let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); + + let subsystem = CandidateBackingSubsystem::run(context, keystore, Metrics(None), pool.clone()); + + let test_fut = test(TestHarness { + virtual_overseer, + }); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + executor::block_on(future::select(test_fut, subsystem)); + } + + fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { + let available_data = AvailableData { + validation_data: test.validation_data.persisted.clone(), + pov, + }; + + let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); + erasure_coding::branches(&chunks).root() + } + + #[derive(Default)] + struct TestCandidateBuilder { + para_id: ParaId, + head_data: HeadData, + pov_hash: Hash, + relay_parent: Hash, + erasure_root: Hash, + } + + impl TestCandidateBuilder { + fn build(self) -> CommittedCandidateReceipt { + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: self.para_id, + pov_hash: self.pov_hash, + relay_parent: self.relay_parent, + ..Default::default() + }, + commitments: CandidateCommitments { + head_data: self.head_data, + erasure_root: self.erasure_root, + ..Default::default() + }, + } + } + } + + // Tests that the subsystem performs actions that are requied on startup. + async fn test_startup( + virtual_overseer: &mut polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle, + test_state: &TestState, + ) { + // Start work on some new parent. + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(test_state.relay_parent))) + ).await; + + // Check that subsystem job issues a request for a validator set. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the validator groups. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the session index for child. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + } + + // Test that a `CandidateBackingMessage::Second` issues validation work + // and in case validation is successful issues a `StatementDistributionMessage`. + #[test] + fn backing_second_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate.descriptor() => { + tx.send(Ok( + ValidationResult::Valid(ValidationOutputs { + validation_data: test_state.validation_data.persisted, + head_data: expected_head_data.clone(), + upward_messages: Vec::new(), + fees: Default::default(), + new_validation_code: None, + }), + )).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) + ) if parent_hash == test_state.relay_parent => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + } + ); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } + + // Test that the candidate reaches quorum succesfully. + #[test] + fn backing_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![1, 2, 3]), + }; + + let pov_hash = pov.hash(); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let candidate_a_hash = candidate_a.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + let signed_b = SignedFullStatement::sign( + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Sending a `Statement::Seconded` for our assignment will start + // validation process. The first thing requested is PoV from the + // `PoVDistribution`. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) if relay_parent == test_state.relay_parent => { + tx.send(Arc::new(pov.clone())).unwrap(); + } + ); + + // The next step is the actual request to Validation subsystem + // to validate the `Seconded` candidate. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate_a.descriptor() => { + tx.send(Ok( + ValidationResult::Valid(ValidationOutputs { + validation_data: test_state.validation_data.persisted, + head_data: expected_head_data.clone(), + upward_messages: Vec::new(), + fees: Default::default(), + new_validation_code: None, + }), + )).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) + ) if parent_hash == test_state.relay_parent => { + tx.send(Ok(())).unwrap(); + } + ); + + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_b.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + let (tx, rx) = oneshot::channel(); + + // The backed candidats set should be not empty at this point. + virtual_overseer.send(FromOverseer::Communication{ + msg: CandidateBackingMessage::GetBackedCandidates( + test_state.relay_parent, + tx, + ) + }).await; + + let backed = rx.await.unwrap(); + + // `validity_votes` may be in any order so we can't do this in a single assert. + assert_eq!(backed[0].0.candidate, candidate_a); + assert_eq!(backed[0].0.validity_votes.len(), 2); + assert!(backed[0].0.validity_votes.contains( + &ValidityAttestation::Explicit(signed_b.signature().clone()) + )); + assert!(backed[0].0.validity_votes.contains( + &ValidityAttestation::Implicit(signed_a.signature().clone()) + )); + assert_eq!(backed[0].0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 1, 0]); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } + + // Issuing conflicting statements on the same candidate should + // be a misbehavior. + #[test] + fn backing_misbehavior_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![1, 2, 3]), + }; + + let pov_hash = pov.hash(); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov.clone()), + head_data: expected_head_data.clone(), + ..Default::default() + }.build(); + + let candidate_a_hash = candidate_a.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + let signed_b = SignedFullStatement::sign( + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); + + let signed_c = SignedFullStatement::sign( + Statement::Invalid(candidate_a_hash), + &test_state.signing_context, + 0, + &test_state.validators[0].pair().into(), + ); + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) if relay_parent == test_state.relay_parent => { + tx.send(Arc::new(pov.clone())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate_a.descriptor() => { + tx.send(Ok( + ValidationResult::Valid(ValidationOutputs { + validation_data: test_state.validation_data.persisted, + head_data: expected_head_data.clone(), + upward_messages: Vec::new(), + fees: Default::default(), + new_validation_code: None, + }), + )).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) + ) if parent_hash == test_state.relay_parent => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) if relay_parent == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + + assert_eq!(*signed_statement.payload(), Statement::Valid(candidate_a_hash)); + } + ); + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + ProvisionableData::MisbehaviorReport( + relay_parent, + MisbehaviorReport::SelfContradiction(_, s1, s2), + ) + ) + ) if relay_parent == test_state.relay_parent => { + s1.check_signature( + &test_state.signing_context, + &test_state.validator_public[s1.validator_index() as usize], + ).unwrap(); + + s2.check_signature( + &test_state.signing_context, + &test_state.validator_public[s2.validator_index() as usize], + ).unwrap(); + } + ); + }); + } + + // Test that if we are asked to second an invalid candidate we + // can still second a valid one afterwards. + #[test] + fn backing_dont_second_invalid() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov_block_a = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_block_b = PoV { + block_data: BlockData(vec![45, 46, 47]), + }; + + let pov_hash_a = pov_block_a.hash(); + let pov_hash_b = pov_block_b.hash(); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_hash_a, + erasure_root: make_erasure_root(&test_state, pov_block_a.clone()), + ..Default::default() + }.build(); + + let candidate_b = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_hash_b, + erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), + head_data: expected_head_data.clone(), + ..Default::default() + }.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_a.to_plain(), + pov_block_a.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate_a.descriptor() => { + tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateSelection( + CandidateSelectionMessage::Invalid(parent_hash, c) + ) if parent_hash == test_state.relay_parent && c == candidate_a.to_plain() + ); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_b.to_plain(), + pov_block_b.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate_b.descriptor() => { + tx.send(Ok( + ValidationResult::Valid(ValidationOutputs { + validation_data: test_state.validation_data.persisted, + head_data: expected_head_data.clone(), + upward_messages: Vec::new(), + fees: Default::default(), + new_validation_code: None, + }), + )).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) + ) if parent_hash == test_state.relay_parent => { + tx.send(Ok(())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + signed_statement, + ) + ) if parent_hash == test_state.relay_parent => { + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + + assert_eq!(*signed_statement.payload(), Statement::Seconded(candidate_b)); + } + ); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } + + // Test that if we have already issued a statement (in this case `Invalid`) about a + // candidate we will not be issuing a `Seconded` statement on it. + #[test] + fn backing_multiple_statements_work() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_hash = pov.hash(); + + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let candidate_hash = candidate.hash(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + // Send in a `Statement` with a candidate. + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_a.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Subsystem requests PoV and requests validation. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Arc::new(pov.clone())).unwrap(); + } + ); + + + // Tell subsystem that this candidate is invalid. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate.descriptor() => { + tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + } + ); + + // The invalid message is shared. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + signed_statement.check_signature( + &test_state.signing_context, + &test_state.validator_public[0], + ).unwrap(); + assert_eq!(*signed_statement.payload(), Statement::Invalid(candidate_hash)); + } + ); + + // Ask subsystem to `Second` a candidate that already has a statement issued about. + // This should emit no actions from subsystem. + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + let pov_to_second = PoV { + block_data: BlockData(vec![3, 2, 1]), + }; + + let pov_hash = pov_to_second.hash(); + + let candidate_to_second = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov_to_second.clone()), + ..Default::default() + }.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate_to_second.to_plain(), + pov_to_second.clone(), + ); + + // In order to trigger _some_ actions from subsystem ask it to second another + // candidate. The only reason to do so is to make sure that no actions were + // triggered on the prev step. + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + _, + pov, + _, + ) + ) => { + assert_eq!(&*pov, &pov_to_second); + } + ); + }); + } + + // That that if the validation of the candidate has failed this does not stop + // the work of this subsystem and so it is not fatal to the node. + #[test] + fn backing_works_after_failed_validation() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_hash = pov.hash(); + + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let signed_a = SignedFullStatement::sign( + Statement::Seconded(candidate.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + // Send in a `Statement` with a candidate. + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_a.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Subsystem requests PoV and requests validation. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Arc::new(pov.clone())).unwrap(); + } + ); + + // Tell subsystem that this candidate is invalid. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate.descriptor() => { + tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); + } + ); + + // Try to get a set of backable candidates to trigger _some_ action in the subsystem + // and check that it is still alive. + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + test_state.relay_parent, + tx, + ); + + virtual_overseer.send(FromOverseer::Communication{ msg }).await; + assert_eq!(rx.await.unwrap().len(), 0); + }); + } + + // Test that a `CandidateBackingMessage::Second` issues validation work + // and in case validation is successful issues a `StatementDistributionMessage`. + #[test] + fn backing_doesnt_second_wrong_collator() { + let mut test_state = TestState::default(); + test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(1), + collator: Some(Sr25519Keyring::Bob.public().into()), + }); + + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateSelection( + CandidateSelectionMessage::Invalid(parent, c) + ) if parent == test_state.relay_parent && c == candidate.to_plain() => { + } + ); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } + + #[test] + fn validation_work_ignores_wrong_collator() { + let mut test_state = TestState::default(); + test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(1), + collator: Some(Sr25519Keyring::Bob.public().into()), + }); + + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![1, 2, 3]), + }; + + let pov_hash = pov.hash(); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let seconding = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &test_state.validators[2].pair().into(), + ); + + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + seconding.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // The statement will be ignored because it has the wrong collator. + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } +} diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..11bfef713c7b60ba059f3baf501a4d21854464f5 --- /dev/null +++ b/node/core/bitfield-signing/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "polkadot-node-core-bitfield-signing" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +bitvec = "0.17.4" +derive_more = "0.99.9" +futures = "0.3.5" +log = "0.4.8" +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } +keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } +wasm-timer = "0.2.4" diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..66badf3d18c5f1049c6c08ce6ac8a2c029fe61c6 --- /dev/null +++ b/node/core/bitfield-signing/src/lib.rs @@ -0,0 +1,355 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The bitfield signing subsystem produces `SignedAvailabilityBitfield`s once per block. + +use bitvec::bitvec; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, + stream, Future, +}; +use keystore::KeyStorePtr; +use polkadot_node_subsystem::{ + messages::{ + self, AllMessages, AvailabilityStoreMessage, BitfieldDistributionMessage, + BitfieldSigningMessage, CandidateBackingMessage, RuntimeApiMessage, + }, + errors::RuntimeApiError, + metrics::{self, prometheus}, +}; +use polkadot_node_subsystem_util::{ + self as util, JobManager, JobTrait, ToJobTrait, Validator +}; +use polkadot_primitives::v1::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; +use std::{convert::TryFrom, pin::Pin, time::Duration}; +use wasm_timer::{Delay, Instant}; + +/// Delay between starting a bitfield signing job and its attempting to create a bitfield. +const JOB_DELAY: Duration = Duration::from_millis(1500); + +/// Each `BitfieldSigningJob` prepares a signed bitfield for a single relay parent. +pub struct BitfieldSigningJob; + +/// Messages which a `BitfieldSigningJob` is prepared to receive. +pub enum ToJob { + BitfieldSigning(BitfieldSigningMessage), + Stop, +} + +impl ToJobTrait for ToJob { + const STOP: Self = ToJob::Stop; + + fn relay_parent(&self) -> Option { + match self { + Self::BitfieldSigning(bsm) => bsm.relay_parent(), + Self::Stop => None, + } + } +} + +impl TryFrom for ToJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::BitfieldSigning(bsm) => Ok(ToJob::BitfieldSigning(bsm)), + _ => Err(()), + } + } +} + +impl From for ToJob { + fn from(bsm: BitfieldSigningMessage) -> ToJob { + ToJob::BitfieldSigning(bsm) + } +} + +/// Messages which may be sent from a `BitfieldSigningJob`. +pub enum FromJob { + AvailabilityStore(AvailabilityStoreMessage), + BitfieldDistribution(BitfieldDistributionMessage), + CandidateBacking(CandidateBackingMessage), + RuntimeApi(RuntimeApiMessage), +} + +impl From for AllMessages { + fn from(from_job: FromJob) -> AllMessages { + match from_job { + FromJob::AvailabilityStore(asm) => AllMessages::AvailabilityStore(asm), + FromJob::BitfieldDistribution(bdm) => AllMessages::BitfieldDistribution(bdm), + FromJob::CandidateBacking(cbm) => AllMessages::CandidateBacking(cbm), + FromJob::RuntimeApi(ram) => AllMessages::RuntimeApi(ram), + } + } +} + +impl TryFrom for FromJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::AvailabilityStore(asm) => Ok(Self::AvailabilityStore(asm)), + AllMessages::BitfieldDistribution(bdm) => Ok(Self::BitfieldDistribution(bdm)), + AllMessages::CandidateBacking(cbm) => Ok(Self::CandidateBacking(cbm)), + AllMessages::RuntimeApi(ram) => Ok(Self::RuntimeApi(ram)), + _ => Err(()), + } + } +} + +/// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`. +#[derive(Debug, derive_more::From)] +pub enum Error { + /// error propagated from the utility subsystem + #[from] + Util(util::Error), + /// io error + #[from] + Io(std::io::Error), + /// a one shot channel was canceled + #[from] + Oneshot(oneshot::Canceled), + /// a mspc channel failed to send + #[from] + MpscSend(mpsc::SendError), + /// several errors collected into one + #[from] + Multiple(Vec), + /// the runtime API failed to return what we wanted + #[from] + Runtime(RuntimeApiError), +} + +// if there is a candidate pending availability, query the Availability Store +// for whether we have the availability chunk for our validator index. +async fn get_core_availability( + relay_parent: Hash, + core: CoreState, + validator_idx: ValidatorIndex, + sender: &mpsc::Sender, +) -> Result { + use messages::{ + AvailabilityStoreMessage::QueryChunkAvailability, + RuntimeApiRequest::CandidatePendingAvailability, + }; + use FromJob::{AvailabilityStore, RuntimeApi}; + use RuntimeApiMessage::Request; + + // we have to (cheaply) clone this sender so we can mutate it to actually send anything + let mut sender = sender.clone(); + + if let CoreState::Occupied(core) = core { + let (tx, rx) = oneshot::channel(); + sender + .send(RuntimeApi(Request( + relay_parent, + CandidatePendingAvailability(core.para_id, tx), + ))) + .await?; + + let committed_candidate_receipt = match rx.await? { + Ok(Some(ccr)) => ccr, + Ok(None) => return Ok(false), + Err(e) => { + // Don't take down the node on runtime API errors. + log::warn!(target: "bitfield_signing", "Encountered a runtime API error: {:?}", e); + return Ok(false); + } + }; + let (tx, rx) = oneshot::channel(); + sender + .send(AvailabilityStore(QueryChunkAvailability( + committed_candidate_receipt.descriptor.pov_hash, + validator_idx, + tx, + ))) + .await?; + return rx.await.map_err(Into::into); + } + Ok(false) +} + +// delegates to the v1 runtime API +async fn get_availability_cores(relay_parent: Hash, sender: &mut mpsc::Sender) -> Result, Error> { + use FromJob::RuntimeApi; + use messages::{ + RuntimeApiMessage::Request, + RuntimeApiRequest::AvailabilityCores, + }; + + let (tx, rx) = oneshot::channel(); + sender.send(RuntimeApi(Request(relay_parent, AvailabilityCores(tx)))).await?; + match rx.await { + Ok(Ok(out)) => Ok(out), + Ok(Err(runtime_err)) => Err(runtime_err.into()), + Err(err) => Err(err.into()) + } +} + +// - get the list of core states from the runtime +// - for each core, concurrently determine chunk availability (see `get_core_availability`) +// - return the bitfield if there were no errors at any point in this process +// (otherwise, it's prone to false negatives) +async fn construct_availability_bitfield( + relay_parent: Hash, + validator_idx: ValidatorIndex, + sender: &mut mpsc::Sender, +) -> Result { + use futures::lock::Mutex; + + // get the set of availability cores from the runtime + let availability_cores = get_availability_cores(relay_parent, sender).await?; + + // we now need sender to be immutable so we can copy the reference to multiple concurrent closures + let sender = &*sender; + + // prepare outputs + let out = Mutex::new(bitvec!(bitvec::order::Lsb0, u8; 0; availability_cores.len())); + // in principle, we know that we never want concurrent access to the _same_ bit within the vec; + // we could `let out_ref = out.as_mut_ptr();` here instead, and manually assign bits, avoiding + // any need to ever wait to lock this mutex. + // in practice, it's safer to just use the mutex, and speed optimizations should wait until + // benchmarking proves that they are necessary. + let out_ref = &out; + let errs = Mutex::new(Vec::new()); + let errs_ref = &errs; + + // Handle each (idx, core) pair concurrently + // + // In principle, this work is all concurrent, not parallel. In practice, we can't guarantee it, which is why + // we need the mutexes and explicit references above. + stream::iter(availability_cores.into_iter().enumerate()) + .for_each_concurrent(None, |(idx, core)| async move { + let availability = match get_core_availability(relay_parent, core, validator_idx, sender).await { + Ok(availability) => availability, + Err(err) => { + errs_ref.lock().await.push(err); + return; + } + }; + out_ref.lock().await.set(idx, availability); + }) + .await; + + let errs = errs.into_inner(); + if errs.is_empty() { + Ok(out.into_inner().into()) + } else { + Err(errs.into()) + } +} + +#[derive(Clone)] +struct MetricsInner { + bitfields_signed_total: prometheus::Counter, +} + +/// Bitfield signing metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_bitfield_signed(&self) { + if let Some(metrics) = &self.0 { + metrics.bitfields_signed_total.inc(); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + bitfields_signed_total: prometheus::register( + prometheus::Counter::new( + "parachain_bitfields_signed_total", + "Number of bitfields signed.", + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +impl JobTrait for BitfieldSigningJob { + type ToJob = ToJob; + type FromJob = FromJob; + type Error = Error; + type RunArgs = KeyStorePtr; + type Metrics = Metrics; + + const NAME: &'static str = "BitfieldSigningJob"; + + /// Run a job for the parent block indicated + fn run( + relay_parent: Hash, + keystore: Self::RunArgs, + metrics: Self::Metrics, + _receiver: mpsc::Receiver, + mut sender: mpsc::Sender, + ) -> Pin> + Send>> { + async move { + // figure out when to wait to + let wait_until = Instant::now() + JOB_DELAY; + + // now do all the work we can before we need to wait for the availability store + // if we're not a validator, we can just succeed effortlessly + let validator = match Validator::new(relay_parent, keystore, sender.clone()).await { + Ok(validator) => validator, + Err(util::Error::NotAValidator) => return Ok(()), + Err(err) => return Err(Error::Util(err)), + }; + + // wait a bit before doing anything else + Delay::new_at(wait_until).await?; + + let bitfield = + match construct_availability_bitfield(relay_parent, validator.index(), &mut sender).await + { + Err(Error::Runtime(runtime_err)) => { + // Don't take down the node on runtime API errors. + log::warn!(target: "bitfield_signing", "Encountered a runtime API error: {:?}", runtime_err); + return Ok(()); + } + Err(err) => return Err(err), + Ok(bitfield) => bitfield, + }; + + let signed_bitfield = validator.sign(bitfield); + metrics.on_bitfield_signed(); + + // make an anonymous scope to contain some use statements to simplify creating the outbound message + { + use BitfieldDistributionMessage::DistributeBitfield; + use FromJob::BitfieldDistribution; + + sender + .send(BitfieldDistribution(DistributeBitfield( + relay_parent, + signed_bitfield, + ))) + .await + .map_err(Into::into) + } + } + .boxed() + } +} + +/// BitfieldSigningSubsystem manages a number of bitfield signing jobs. +pub type BitfieldSigningSubsystem = + JobManager; diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..8f1b7a0fa0fd2ea7a8e4ca7869c7fcc832296de6 --- /dev/null +++ b/node/core/candidate-validation/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "polkadot-node-core-candidate-validation" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } +parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] } + +polkadot-primitives = { path = "../../../primitives" } +polkadot-parachain = { path = "../../../parachain" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +derive_more = "0.99.9" +log = "0.4.8" + +[dev-dependencies] +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = { version = "0.3.5", features = ["thread-pool"] } +assert_matches = "1.3.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..600dde2c4e72a14bf06dd7243e28f79e88d34f8d --- /dev/null +++ b/node/core/candidate-validation/src/lib.rs @@ -0,0 +1,986 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Candidate Validation subsystem. +//! +//! This handles incoming requests from other subsystems to validate candidates +//! according to a validation function. This delegates validation to an underlying +//! pool of processes used for execution of the Wasm. + +use polkadot_subsystem::{ + Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemResult, + FromOverseer, OverseerSignal, + messages::{ + AllMessages, CandidateValidationMessage, RuntimeApiMessage, + ValidationFailed, RuntimeApiRequest, + }, + metrics::{self, prometheus}, +}; +use polkadot_subsystem::errors::RuntimeApiError; +use polkadot_node_primitives::{ValidationResult, ValidationOutputs, InvalidCandidate}; +use polkadot_primitives::v1::{ + ValidationCode, PoV, CandidateDescriptor, ValidationData, PersistedValidationData, + TransientValidationData, OccupiedCoreAssumption, Hash, +}; +use polkadot_parachain::wasm_executor::{self, ValidationPool, ExecutionMode, ValidationError, + InvalidCandidate as WasmInvalidCandidate}; +use polkadot_parachain::primitives::{ValidationResult as WasmValidationResult, ValidationParams}; + +use parity_scale_codec::Encode; +use sp_core::traits::SpawnNamed; + +use futures::channel::oneshot; +use futures::prelude::*; + +use std::sync::Arc; + +const LOG_TARGET: &'static str = "candidate_validation"; + +/// The candidate validation subsystem. +pub struct CandidateValidationSubsystem { + spawn: S, + metrics: Metrics, +} + +#[derive(Clone)] +struct MetricsInner { + validation_requests: prometheus::CounterVec, +} + +/// Candidate validation metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_validation_event(&self, event: &Result) { + if let Some(metrics) = &self.0 { + match event { + Ok(ValidationResult::Valid(_)) => { + metrics.validation_requests.with_label_values(&["valid"]).inc(); + }, + Ok(ValidationResult::Invalid(_)) => { + metrics.validation_requests.with_label_values(&["invalid"]).inc(); + }, + Err(_) => { + metrics.validation_requests.with_label_values(&["failed"]).inc(); + }, + } + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + validation_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "parachain_validation_requests_total", + "Number of validation requests served.", + ), + &["valid", "invalid", "failed"], + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +impl CandidateValidationSubsystem { + /// Create a new `CandidateValidationSubsystem` with the given task spawner. + pub fn new(spawn: S, metrics: Metrics) -> Self { + CandidateValidationSubsystem { spawn, metrics } + } +} + +impl Subsystem for CandidateValidationSubsystem where + C: SubsystemContext, + S: SpawnNamed + Clone + 'static, +{ + type Metrics = Metrics; + + fn start(self, ctx: C) -> SpawnedSubsystem { + SpawnedSubsystem { + name: "candidate-validation-subsystem", + future: run(ctx, self.spawn, self.metrics).map(|_| ()).boxed(), + } + } +} + +async fn run( + mut ctx: impl SubsystemContext, + spawn: impl SpawnNamed + Clone + 'static, + metrics: Metrics, +) + -> SubsystemResult<()> +{ + let pool = ValidationPool::new(); + + loop { + match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {} + FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {} + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Communication { msg } => match msg { + CandidateValidationMessage::ValidateFromChainState( + descriptor, + pov, + response_sender, + ) => { + let res = spawn_validate_from_chain_state( + &mut ctx, + Some(pool.clone()), + descriptor, + pov, + spawn.clone(), + ).await; + + match res { + Ok(x) => { + metrics.on_validation_event(&x); + let _ = response_sender.send(x); + } + Err(e) => return Err(e), + } + } + CandidateValidationMessage::ValidateFromExhaustive( + persisted_validation_data, + transient_validation_data, + validation_code, + descriptor, + pov, + response_sender, + ) => { + let res = spawn_validate_exhaustive( + &mut ctx, + Some(pool.clone()), + persisted_validation_data, + transient_validation_data, + validation_code, + descriptor, + pov, + spawn.clone(), + ).await; + + match res { + Ok(x) => { + metrics.on_validation_event(&x); + if let Err(_e) = response_sender.send(x) { + log::warn!( + target: LOG_TARGET, + "Requester of candidate validation dropped", + ) + } + }, + Err(e) => return Err(e), + } + } + } + } + } +} + +async fn runtime_api_request( + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + request: RuntimeApiRequest, + receiver: oneshot::Receiver>, +) -> SubsystemResult> { + ctx.send_message( + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + request, + )) + ).await?; + + receiver.await.map_err(Into::into) +} + +#[derive(Debug)] +enum AssumptionCheckOutcome { + Matches(ValidationData, ValidationCode), + DoesNotMatch, + BadRequest, +} + +async fn check_assumption_validation_data( + ctx: &mut impl SubsystemContext, + descriptor: &CandidateDescriptor, + assumption: OccupiedCoreAssumption, +) -> SubsystemResult { + let validation_data = { + let (tx, rx) = oneshot::channel(); + let d = runtime_api_request( + ctx, + descriptor.relay_parent, + RuntimeApiRequest::FullValidationData( + descriptor.para_id, + assumption, + tx, + ), + rx, + ).await?; + + match d { + Ok(None) | Err(_) => { + return Ok(AssumptionCheckOutcome::BadRequest); + } + Ok(Some(d)) => d, + } + }; + + let persisted_validation_data_hash = validation_data.persisted.hash(); + + SubsystemResult::Ok(if descriptor.persisted_validation_data_hash == persisted_validation_data_hash { + let (code_tx, code_rx) = oneshot::channel(); + let validation_code = runtime_api_request( + ctx, + descriptor.relay_parent, + RuntimeApiRequest::ValidationCode( + descriptor.para_id, + OccupiedCoreAssumption::Included, + code_tx, + ), + code_rx, + ).await?; + + match validation_code { + Ok(None) | Err(_) => AssumptionCheckOutcome::BadRequest, + Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v), + } + } else { + AssumptionCheckOutcome::DoesNotMatch + }) +} + +async fn spawn_validate_from_chain_state( + ctx: &mut impl SubsystemContext, + validation_pool: Option, + descriptor: CandidateDescriptor, + pov: Arc, + spawn: impl SpawnNamed + 'static, +) -> SubsystemResult> { + // The candidate descriptor has a `persisted_validation_data_hash` which corresponds to + // one of up to two possible values that we can derive from the state of the + // relay-parent. We can fetch these values by getting the persisted validation data + // based on the different `OccupiedCoreAssumption`s. + match check_assumption_validation_data( + ctx, + &descriptor, + OccupiedCoreAssumption::Included, + ).await? { + AssumptionCheckOutcome::Matches(validation_data, validation_code) => { + return spawn_validate_exhaustive( + ctx, + validation_pool, + validation_data.persisted, + Some(validation_data.transient), + validation_code, + descriptor, + pov, + spawn, + ).await; + } + AssumptionCheckOutcome::DoesNotMatch => {}, + AssumptionCheckOutcome::BadRequest => return Ok(Err(ValidationFailed("Bad request".into()))), + } + + match check_assumption_validation_data( + ctx, + &descriptor, + OccupiedCoreAssumption::TimedOut, + ).await? { + AssumptionCheckOutcome::Matches(validation_data, validation_code) => { + return spawn_validate_exhaustive( + ctx, + validation_pool, + validation_data.persisted, + Some(validation_data.transient), + validation_code, + descriptor, + pov, + spawn, + ).await; + } + AssumptionCheckOutcome::DoesNotMatch => {}, + AssumptionCheckOutcome::BadRequest => return Ok(Err(ValidationFailed("Bad request".into()))), + } + + // If neither the assumption of the occupied core having the para included or the assumption + // of the occupied core timing out are valid, then the persisted_validation_data_hash in the descriptor + // is not based on the relay parent and is thus invalid. + Ok(Ok(ValidationResult::Invalid(InvalidCandidate::BadParent))) +} + +async fn spawn_validate_exhaustive( + ctx: &mut impl SubsystemContext, + validation_pool: Option, + persisted_validation_data: PersistedValidationData, + transient_validation_data: Option, + validation_code: ValidationCode, + descriptor: CandidateDescriptor, + pov: Arc, + spawn: impl SpawnNamed + 'static, +) -> SubsystemResult> { + let (tx, rx) = oneshot::channel(); + let fut = async move { + let res = validate_candidate_exhaustive::( + validation_pool, + persisted_validation_data, + transient_validation_data, + validation_code, + descriptor, + pov, + spawn, + ); + + let _ = tx.send(res); + }; + + ctx.spawn_blocking("blocking-candidate-validation-task", fut.boxed()).await?; + rx.await.map_err(Into::into) +} + +/// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks +/// are passed, `Err` otherwise. +fn perform_basic_checks( + candidate: &CandidateDescriptor, + max_block_data_size: Option, + pov: &PoV, +) -> Result<(), InvalidCandidate> { + let encoded_pov = pov.encode(); + let hash = pov.hash(); + + if let Some(max_size) = max_block_data_size { + if encoded_pov.len() as u64 > max_size { + return Err(InvalidCandidate::ParamsTooLarge(encoded_pov.len() as u64)); + } + } + + if hash != candidate.pov_hash { + return Err(InvalidCandidate::HashMismatch); + } + + if let Err(()) = candidate.check_collator_signature() { + return Err(InvalidCandidate::BadSignature); + } + + Ok(()) +} + +/// Check the result of Wasm execution against the constraints given by the relay-chain. +/// +/// Returns `Ok(())` if checks pass, error otherwise. +fn check_wasm_result_against_constraints( + transient_params: &TransientValidationData, + result: &WasmValidationResult, +) -> Result<(), InvalidCandidate> { + if result.head_data.0.len() > transient_params.max_head_data_size as _ { + return Err(InvalidCandidate::HeadDataTooLarge(result.head_data.0.len() as u64)) + } + + if let Some(ref code) = result.new_validation_code { + if transient_params.code_upgrade_allowed.is_none() { + return Err(InvalidCandidate::CodeUpgradeNotAllowed) + } + + if code.0.len() > transient_params.max_code_size as _ { + return Err(InvalidCandidate::NewCodeTooLarge(code.0.len() as u64)) + } + } + + Ok(()) +} + +trait ValidationBackend { + type Arg; + + fn validate( + arg: Self::Arg, + validation_code: &ValidationCode, + params: ValidationParams, + spawn: S, + ) -> Result; +} + +struct RealValidationBackend; + +impl ValidationBackend for RealValidationBackend { + type Arg = Option; + + fn validate( + pool: Option, + validation_code: &ValidationCode, + params: ValidationParams, + spawn: S, + ) -> Result { + let execution_mode = pool.as_ref() + .map(ExecutionMode::Remote) + .unwrap_or(ExecutionMode::Local); + + wasm_executor::validate_candidate( + &validation_code.0, + params, + execution_mode, + spawn, + ) + } +} + +/// Validates the candidate from exhaustive parameters. +/// +/// Sends the result of validation on the channel once complete. +fn validate_candidate_exhaustive( + backend_arg: B::Arg, + persisted_validation_data: PersistedValidationData, + transient_validation_data: Option, + validation_code: ValidationCode, + descriptor: CandidateDescriptor, + pov: Arc, + spawn: S, +) -> Result { + if let Err(e) = perform_basic_checks(&descriptor, None, &*pov) { + return Ok(ValidationResult::Invalid(e)) + } + + let params = ValidationParams { + parent_head: persisted_validation_data.parent_head.clone(), + block_data: pov.block_data.clone(), + relay_chain_height: persisted_validation_data.block_number, + hrmp_mqc_heads: persisted_validation_data.hrmp_mqc_heads.clone(), + }; + + match B::validate(backend_arg, &validation_code, params, spawn) { + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Timeout)) => + Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::ParamsTooLarge(l))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ParamsTooLarge(l as u64))), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::CodeTooLarge(l))) => + Ok(ValidationResult::Invalid(InvalidCandidate::CodeTooLarge(l as u64))), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::BadReturn)) => + Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn)), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WasmExecutor(e))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e.to_string()))), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::ExternalWasmExecutor(e))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e.to_string()))), + Err(ValidationError::Internal(e)) => Err(ValidationFailed(e.to_string())), + Ok(res) => { + let post_check_result = if let Some(transient) = transient_validation_data { + check_wasm_result_against_constraints( + &transient, + &res, + ) + } else { + Ok(()) + }; + + Ok(match post_check_result { + Ok(()) => ValidationResult::Valid(ValidationOutputs { + head_data: res.head_data, + validation_data: persisted_validation_data, + upward_messages: res.upward_messages, + fees: 0, + new_validation_code: res.new_validation_code, + }), + Err(e) => ValidationResult::Invalid(e), + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_node_subsystem_test_helpers as test_helpers; + use polkadot_primitives::v1::{HeadData, BlockData}; + use sp_core::testing::TaskExecutor; + use futures::executor; + use assert_matches::assert_matches; + use sp_keyring::Sr25519Keyring; + + struct MockValidationBackend; + + struct MockValidationArg { + result: Result, + } + + impl ValidationBackend for MockValidationBackend { + type Arg = MockValidationArg; + + fn validate( + arg: Self::Arg, + _validation_code: &ValidationCode, + _params: ValidationParams, + _spawn: S, + ) -> Result { + arg.result + } + } + + fn collator_sign(descriptor: &mut CandidateDescriptor, collator: Sr25519Keyring) { + descriptor.collator = collator.public().into(); + let payload = polkadot_primitives::v1::collator_signature_payload( + &descriptor.relay_parent, + &descriptor.para_id, + &descriptor.persisted_validation_data_hash, + &descriptor.pov_hash, + ); + + descriptor.signature = collator.sign(&payload[..]).into(); + assert!(descriptor.check_collator_signature().is_ok()); + } + + #[test] + fn correctly_checks_included_assumption() { + let validation_data: ValidationData = Default::default(); + let validation_code: ValidationCode = vec![1, 2, 3].into(); + + let persisted_validation_data_hash = validation_data.persisted.hash(); + let relay_parent = [2; 32].into(); + let para_id = 5.into(); + + let mut candidate = CandidateDescriptor::default(); + candidate.relay_parent = relay_parent; + candidate.persisted_validation_data_hash = persisted_validation_data_hash; + candidate.para_id = para_id; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + + let (check_fut, check_result) = check_assumption_validation_data( + &mut ctx, + &candidate, + OccupiedCoreAssumption::Included, + ).remote_handle(); + + let test_fut = async move { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::FullValidationData(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_data.clone()))); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_code.clone()))); + } + ); + + assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::Matches(o, v) => { + assert_eq!(o, validation_data); + assert_eq!(v, validation_code); + }); + }; + + let test_fut = future::join(test_fut, check_fut); + executor::block_on(test_fut); + } + + #[test] + fn correctly_checks_timed_out_assumption() { + let validation_data: ValidationData = Default::default(); + let validation_code: ValidationCode = vec![1, 2, 3].into(); + + let persisted_validation_data_hash = validation_data.persisted.hash(); + let relay_parent = [2; 32].into(); + let para_id = 5.into(); + + let mut candidate = CandidateDescriptor::default(); + candidate.relay_parent = relay_parent; + candidate.persisted_validation_data_hash = persisted_validation_data_hash; + candidate.para_id = para_id; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + + let (check_fut, check_result) = check_assumption_validation_data( + &mut ctx, + &candidate, + OccupiedCoreAssumption::TimedOut, + ).remote_handle(); + + let test_fut = async move { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::FullValidationData(p, OccupiedCoreAssumption::TimedOut, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_data.clone()))); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_code.clone()))); + } + ); + + assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::Matches(o, v) => { + assert_eq!(o, validation_data); + assert_eq!(v, validation_code); + }); + }; + + let test_fut = future::join(test_fut, check_fut); + executor::block_on(test_fut); + } + + #[test] + fn check_is_bad_request_if_no_validation_data() { + let validation_data: ValidationData = Default::default(); + let persisted_validation_data_hash = validation_data.persisted.hash(); + let relay_parent = [2; 32].into(); + let para_id = 5.into(); + + let mut candidate = CandidateDescriptor::default(); + candidate.relay_parent = relay_parent; + candidate.persisted_validation_data_hash = persisted_validation_data_hash; + candidate.para_id = para_id; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + + let (check_fut, check_result) = check_assumption_validation_data( + &mut ctx, + &candidate, + OccupiedCoreAssumption::Included, + ).remote_handle(); + + let test_fut = async move { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::FullValidationData(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(None)); + } + ); + + assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::BadRequest); + }; + + let test_fut = future::join(test_fut, check_fut); + executor::block_on(test_fut); + } + + #[test] + fn check_is_bad_request_if_no_validation_code() { + let validation_data: ValidationData = Default::default(); + let persisted_validation_data_hash = validation_data.persisted.hash(); + let relay_parent = [2; 32].into(); + let para_id = 5.into(); + + let mut candidate = CandidateDescriptor::default(); + candidate.relay_parent = relay_parent; + candidate.persisted_validation_data_hash = persisted_validation_data_hash; + candidate.para_id = para_id; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + + let (check_fut, check_result) = check_assumption_validation_data( + &mut ctx, + &candidate, + OccupiedCoreAssumption::TimedOut, + ).remote_handle(); + + let test_fut = async move { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::FullValidationData(p, OccupiedCoreAssumption::TimedOut, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_data.clone()))); + } + ); + + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(None)); + } + ); + + assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::BadRequest); + }; + + let test_fut = future::join(test_fut, check_fut); + executor::block_on(test_fut); + } + + #[test] + fn check_does_not_match() { + let validation_data: ValidationData = Default::default(); + let relay_parent = [2; 32].into(); + let para_id = 5.into(); + + let mut candidate = CandidateDescriptor::default(); + candidate.relay_parent = relay_parent; + candidate.persisted_validation_data_hash = [3; 32].into(); + candidate.para_id = para_id; + + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = test_helpers::make_subsystem_context(pool.clone()); + + let (check_fut, check_result) = check_assumption_validation_data( + &mut ctx, + &candidate, + OccupiedCoreAssumption::Included, + ).remote_handle(); + + let test_fut = async move { + assert_matches!( + ctx_handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + rp, + RuntimeApiRequest::FullValidationData(p, OccupiedCoreAssumption::Included, tx) + )) => { + assert_eq!(rp, relay_parent); + assert_eq!(p, para_id); + + let _ = tx.send(Ok(Some(validation_data.clone()))); + } + ); + + assert_matches!(check_result.await.unwrap(), AssumptionCheckOutcome::DoesNotMatch); + }; + + let test_fut = future::join(test_fut, check_fut); + executor::block_on(test_fut); + } + + #[test] + fn candidate_validation_ok_is_ok() { + let mut validation_data: ValidationData = Default::default(); + validation_data.transient.max_head_data_size = 1024; + validation_data.transient.max_code_size = 1024; + validation_data.transient.code_upgrade_allowed = Some(20); + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov.hash(); + collator_sign(&mut descriptor, Sr25519Keyring::Alice); + + assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + + let validation_result = WasmValidationResult { + head_data: HeadData(vec![1, 1, 1]), + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Vec::new(), + processed_downward_messages: 0, + }; + + assert!(check_wasm_result_against_constraints( + &validation_data.transient, + &validation_result, + ).is_ok()); + + let v = validate_candidate_exhaustive::( + MockValidationArg { result: Ok(validation_result) }, + validation_data.persisted.clone(), + Some(validation_data.transient), + vec![1, 2, 3].into(), + descriptor, + Arc::new(pov), + TaskExecutor::new(), + ).unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.validation_data, validation_data.persisted); + assert_eq!(outputs.upward_messages, Vec::new()); + assert_eq!(outputs.fees, 0); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + }); + } + + #[test] + fn candidate_validation_bad_return_is_invalid() { + let mut validation_data: ValidationData = Default::default(); + + validation_data.transient.max_head_data_size = 1024; + validation_data.transient.max_code_size = 1024; + validation_data.transient.code_upgrade_allowed = Some(20); + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov.hash(); + collator_sign(&mut descriptor, Sr25519Keyring::Alice); + + assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + + let validation_result = WasmValidationResult { + head_data: HeadData(vec![1, 1, 1]), + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Vec::new(), + processed_downward_messages: 0, + }; + + assert!(check_wasm_result_against_constraints( + &validation_data.transient, + &validation_result, + ).is_ok()); + + let v = validate_candidate_exhaustive::( + MockValidationArg { + result: Err(ValidationError::InvalidCandidate( + WasmInvalidCandidate::BadReturn + )) + }, + validation_data.persisted, + Some(validation_data.transient), + vec![1, 2, 3].into(), + descriptor, + Arc::new(pov), + TaskExecutor::new(), + ).unwrap(); + + assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::BadReturn)); + } + + + #[test] + fn candidate_validation_timeout_is_internal_error() { + let mut validation_data: ValidationData = Default::default(); + + validation_data.transient.max_head_data_size = 1024; + validation_data.transient.max_code_size = 1024; + validation_data.transient.code_upgrade_allowed = Some(20); + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov.hash(); + collator_sign(&mut descriptor, Sr25519Keyring::Alice); + + assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + + let validation_result = WasmValidationResult { + head_data: HeadData(vec![1, 1, 1]), + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Vec::new(), + processed_downward_messages: 0, + }; + + assert!(check_wasm_result_against_constraints( + &validation_data.transient, + &validation_result, + ).is_ok()); + + let v = validate_candidate_exhaustive::( + MockValidationArg { + result: Err(ValidationError::InvalidCandidate( + WasmInvalidCandidate::Timeout + )) + }, + validation_data.persisted, + Some(validation_data.transient), + vec![1, 2, 3].into(), + descriptor, + Arc::new(pov), + TaskExecutor::new(), + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); + } + + #[test] + fn candidate_validation_ok_does_not_validate_outputs_if_no_transient() { + let mut validation_data: ValidationData = Default::default(); + validation_data.transient.max_head_data_size = 1; + validation_data.transient.max_code_size = 1; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov.hash(); + collator_sign(&mut descriptor, Sr25519Keyring::Alice); + + assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + + let validation_result = WasmValidationResult { + head_data: HeadData(vec![1, 1, 1]), + new_validation_code: Some(vec![2, 2, 2].into()), + upward_messages: Vec::new(), + processed_downward_messages: 0, + }; + + assert!(check_wasm_result_against_constraints( + &validation_data.transient, + &validation_result, + ).is_err()); + + let v = validate_candidate_exhaustive::( + MockValidationArg { result: Ok(validation_result) }, + validation_data.persisted.clone(), + None, + vec![1, 2, 3].into(), + descriptor, + Arc::new(pov), + TaskExecutor::new(), + ).unwrap(); + + assert_matches!(v, ValidationResult::Valid(outputs) => { + assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); + assert_eq!(outputs.validation_data, validation_data.persisted); + assert_eq!(outputs.upward_messages, Vec::new()); + assert_eq!(outputs.fees, 0); + assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + }); + } +} diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d626a0d9329d0949f0d5ba76947bfe9eb3ce075b --- /dev/null +++ b/node/core/chain-api/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "polkadot-node-core-chain-api" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = { version = "0.3.5" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-primitives = { path = "../../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } + +[dev-dependencies] +futures = { version = "0.3.5", features = ["thread-pool"] } +maplit = "1.0.2" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d3be1c4bf484fb293de2c9e55bf60193314334a4 --- /dev/null +++ b/node/core/chain-api/src/lib.rs @@ -0,0 +1,389 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the Chain API Subsystem +//! +//! Provides access to the chain data. Every request may return an error. +//! At the moment, the implementation requires `Client` to implement `HeaderBackend`, +//! we may add more bounds in the future if we will need e.g. block bodies. +//! +//! Supported requests: +//! * Block hash to number +//! * Finalized block number to hash +//! * Last finalized block number +//! * Ancestors + +use polkadot_subsystem::{ + FromOverseer, OverseerSignal, + SpawnedSubsystem, Subsystem, SubsystemResult, SubsystemContext, + messages::ChainApiMessage, + metrics::{self, prometheus}, +}; +use polkadot_primitives::v1::{Block, BlockId}; +use sp_blockchain::HeaderBackend; + +use futures::prelude::*; + +/// The Chain API Subsystem implementation. +pub struct ChainApiSubsystem { + client: Client, + metrics: Metrics, +} + +impl ChainApiSubsystem { + /// Create a new Chain API subsystem with the given client. + pub fn new(client: Client, metrics: Metrics) -> Self { + ChainApiSubsystem { + client, + metrics, + } + } +} + +impl Subsystem for ChainApiSubsystem where + Client: HeaderBackend + 'static, + Context: SubsystemContext +{ + type Metrics = Metrics; + + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx, self).map(|_| ()).boxed(), + name: "chain-api-subsystem", + } + } +} + +async fn run( + mut ctx: impl SubsystemContext, + subsystem: ChainApiSubsystem, +) -> SubsystemResult<()> +where + Client: HeaderBackend, +{ + loop { + match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {}, + FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}, + FromOverseer::Communication { msg } => match msg { + ChainApiMessage::BlockNumber(hash, response_channel) => { + let result = subsystem.client.number(hash).map_err(|e| e.to_string().into()); + subsystem.metrics.on_request(result.is_ok()); + let _ = response_channel.send(result); + }, + ChainApiMessage::FinalizedBlockHash(number, response_channel) => { + // Note: we don't verify it's finalized + let result = subsystem.client.hash(number).map_err(|e| e.to_string().into()); + subsystem.metrics.on_request(result.is_ok()); + let _ = response_channel.send(result); + }, + ChainApiMessage::FinalizedBlockNumber(response_channel) => { + let result = subsystem.client.info().finalized_number; + // always succeeds + subsystem.metrics.on_request(true); + let _ = response_channel.send(Ok(result)); + }, + ChainApiMessage::Ancestors { hash, k, response_channel } => { + let mut hash = hash; + + let next_parent = core::iter::from_fn(|| { + let maybe_header = subsystem.client.header(BlockId::Hash(hash)); + match maybe_header { + // propagate the error + Err(e) => Some(Err(e.to_string().into())), + // fewer than `k` ancestors are available + Ok(None) => None, + Ok(Some(header)) => { + hash = header.parent_hash; + Some(Ok(hash)) + } + } + }); + + let result = next_parent.take(k).collect::, _>>(); + subsystem.metrics.on_request(result.is_ok()); + let _ = response_channel.send(result); + }, + } + } + } +} + +#[derive(Clone)] +struct MetricsInner { + chain_api_requests: prometheus::CounterVec, +} + +/// Chain API metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_request(&self, succeeded: bool) { + if let Some(metrics) = &self.0 { + if succeeded { + metrics.chain_api_requests.with_label_values(&["succeeded"]).inc(); + } else { + metrics.chain_api_requests.with_label_values(&["failed"]).inc(); + } + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + chain_api_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "parachain_chain_api_requests_total", + "Number of Chain API requests served.", + ), + &["succeeded", "failed"], + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + use std::collections::BTreeMap; + use futures::{future::BoxFuture, channel::oneshot}; + + use polkadot_primitives::v1::{Hash, BlockNumber, BlockId, Header}; + use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; + use sp_blockchain::Info as BlockInfo; + use sp_core::testing::TaskExecutor; + + #[derive(Clone)] + struct TestClient { + blocks: BTreeMap, + finalized_blocks: BTreeMap, + headers: BTreeMap, + } + + const ONE: Hash = Hash::repeat_byte(0x01); + const TWO: Hash = Hash::repeat_byte(0x02); + const THREE: Hash = Hash::repeat_byte(0x03); + const FOUR: Hash = Hash::repeat_byte(0x04); + const ERROR_PATH: Hash = Hash::repeat_byte(0xFF); + + fn default_header() -> Header { + Header { + parent_hash: Hash::zero(), + number: 100500, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + } + } + + impl Default for TestClient { + fn default() -> Self { + Self { + blocks: maplit::btreemap! { + ONE => 1, + TWO => 2, + THREE => 3, + FOUR => 4, + }, + finalized_blocks: maplit::btreemap! { + 1 => ONE, + 3 => THREE, + }, + headers: maplit::btreemap! { + TWO => Header { + parent_hash: ONE, + number: 2, + ..default_header() + }, + THREE => Header { + parent_hash: TWO, + number: 3, + ..default_header() + }, + FOUR => Header { + parent_hash: THREE, + number: 4, + ..default_header() + }, + ERROR_PATH => Header { + ..default_header() + } + } + } + } + } + + fn last_key_value(map: &BTreeMap) -> (K, V) { + assert!(!map.is_empty()); + map.iter() + .last() + .map(|(k, v)| (k.clone(), v.clone())) + .unwrap() + } + + impl HeaderBackend for TestClient { + fn info(&self) -> BlockInfo { + let genesis_hash = self.blocks.iter().next().map(|(h, _)| *h).unwrap(); + let (best_hash, best_number) = last_key_value(&self.blocks); + let (finalized_number, finalized_hash) = last_key_value(&self.finalized_blocks); + + BlockInfo { + best_hash, + best_number, + genesis_hash, + finalized_hash, + finalized_number, + number_leaves: 0, + } + } + fn number(&self, hash: Hash) -> sp_blockchain::Result> { + Ok(self.blocks.get(&hash).copied()) + } + fn hash(&self, number: BlockNumber) -> sp_blockchain::Result> { + Ok(self.finalized_blocks.get(&number).copied()) + } + fn header(&self, id: BlockId) -> sp_blockchain::Result> { + match id { + // for error path testing + BlockId::Hash(hash) if hash.is_zero() => { + Err(sp_blockchain::Error::Backend("Zero hashes are illegal!".into())) + } + BlockId::Hash(hash) => { + Ok(self.headers.get(&hash).cloned()) + } + _ => unreachable!(), + } + } + fn status(&self, _id: BlockId) -> sp_blockchain::Result { + unimplemented!() + } + } + + fn test_harness( + test: impl FnOnce(TestClient, TestSubsystemContextHandle) + -> BoxFuture<'static, ()>, + ) { + let (ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new()); + let client = TestClient::default(); + + let subsystem = ChainApiSubsystem::new(client.clone(), Metrics(None)); + let chain_api_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = test(client, ctx_handle); + + futures::executor::block_on(future::join(chain_api_task, test_task)); + } + + #[test] + fn request_block_number() { + test_harness(|client, mut sender| { + async move { + let zero = Hash::zero(); + let test_cases = [ + (TWO, client.number(TWO).unwrap()), + (zero, client.number(zero).unwrap()), // not here + ]; + for (hash, expected) in &test_cases { + let (tx, rx) = oneshot::channel(); + + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::BlockNumber(*hash, tx), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), *expected); + } + + sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }.boxed() + }) + } + + #[test] + fn request_finalized_hash() { + test_harness(|client, mut sender| { + async move { + let test_cases = [ + (1, client.hash(1).unwrap()), // not here + (2, client.hash(2).unwrap()), + ]; + for (number, expected) in &test_cases { + let (tx, rx) = oneshot::channel(); + + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::FinalizedBlockHash(*number, tx), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), *expected); + } + + sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }.boxed() + }) + } + + #[test] + fn request_last_finalized_number() { + test_harness(|client, mut sender| { + async move { + let (tx, rx) = oneshot::channel(); + + let expected = client.info().finalized_number; + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::FinalizedBlockNumber(tx), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), expected); + + sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }.boxed() + }) + } + + #[test] + fn request_ancestors() { + test_harness(|_client, mut sender| { + async move { + let (tx, rx) = oneshot::channel(); + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::Ancestors { hash: THREE, k: 4, response_channel: tx }, + }).await; + assert_eq!(rx.await.unwrap().unwrap(), vec![TWO, ONE]); + + let (tx, rx) = oneshot::channel(); + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::Ancestors { hash: TWO, k: 1, response_channel: tx }, + }).await; + assert_eq!(rx.await.unwrap().unwrap(), vec![ONE]); + + let (tx, rx) = oneshot::channel(); + sender.send(FromOverseer::Communication { + msg: ChainApiMessage::Ancestors { hash: ERROR_PATH, k: 2, response_channel: tx }, + }).await; + assert!(rx.await.unwrap().is_err()); + + sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }.boxed() + }) + } +} diff --git a/node/core/proposer/Cargo.toml b/node/core/proposer/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7cf7fc28af22253a5d6a5e789a3f266749526710 --- /dev/null +++ b/node/core/proposer/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "polkadot-node-core-proposer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.4" +futures-timer = "3.0.1" +log = "0.4.8" +parity-scale-codec = "1.3.4" +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-overseer = { path = "../../overseer" } +polkadot-primitives = { path = "../../../primitives" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } +wasm-timer = "0.2.4" diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..8fd432a1be90e51c94b269d09d3a1ced037454f3 --- /dev/null +++ b/node/core/proposer/src/lib.rs @@ -0,0 +1,264 @@ +use futures::prelude::*; +use futures::select; +use polkadot_node_subsystem::{messages::{AllMessages, ProvisionerInherentData, ProvisionerMessage}, SubsystemError}; +use polkadot_overseer::OverseerHandler; +use polkadot_primitives::v1::{ + Block, Hash, Header, +}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_consensus::{Proposal, RecordProof}; +use sp_inherents::InherentData; +use sp_runtime::traits::{DigestFor, HashFor}; +use sp_transaction_pool::TransactionPool; +use std::{fmt, pin::Pin, sync::Arc, time}; + +/// How long proposal can take before we give up and err out +const PROPOSE_TIMEOUT: core::time::Duration = core::time::Duration::from_secs(2); + +/// Custom Proposer factory for Polkadot +pub struct ProposerFactory { + inner: sc_basic_authorship::ProposerFactory, + overseer: OverseerHandler, +} + +impl ProposerFactory { + pub fn new( + client: Arc, + transaction_pool: Arc, + overseer: OverseerHandler, + ) -> Self { + ProposerFactory { + inner: sc_basic_authorship::ProposerFactory::new( + client, + transaction_pool, + None, + ), + overseer, + } + } +} + +impl sp_consensus::Environment + for ProposerFactory +where + TxPool: 'static + TransactionPool, + Client: 'static + + BlockBuilderProvider + + ProvideRuntimeApi + + HeaderBackend + + Send + + Sync, + Client::Api: + BlockBuilderApi + ApiExt, + Backend: + 'static + sc_client_api::Backend>, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sp_api::StateBackendFor: sp_api::StateBackend> + Send, +{ + type CreateProposer = Pin> + Send + 'static, + >>; + type Proposer = Proposer; + type Error = Error; + + fn init(&mut self, parent_header: &Header) -> Self::CreateProposer { + // create the inner proposer + let proposer = self.inner.init(parent_header).into_inner(); + + // data to be moved into the future + let overseer = self.overseer.clone(); + let parent_header_hash = parent_header.hash(); + + async move { + Ok(Proposer { + inner: proposer?, + overseer, + parent_header_hash, + }) + }.boxed() + } +} + +/// Custom Proposer for Polkadot. +/// +/// This proposer gets the ProvisionerInherentData and injects it into the wrapped +/// proposer's inherent data, then delegates the actual proposal generation. +pub struct Proposer, Backend, Client> { + inner: sc_basic_authorship::Proposer, + overseer: OverseerHandler, + parent_header_hash: Hash, +} + +// This impl has the same generic bounds as the Proposer impl. +impl Proposer +where + TxPool: 'static + TransactionPool, + Client: 'static + + BlockBuilderProvider + + ProvideRuntimeApi + + HeaderBackend + + Send + + Sync, + Client::Api: + BlockBuilderApi + ApiExt, + Backend: + 'static + sc_client_api::Backend>, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sp_api::StateBackendFor: sp_api::StateBackend> + Send, +{ + /// Get provisioner inherent data + /// + /// This function has a constant timeout: `PROPOSE_TIMEOUT`. + fn get_provisioner_data(&self) -> impl Future> { + // clone this (lightweight) data because we're going to move it into the future + let mut overseer = self.overseer.clone(); + let parent_header_hash = self.parent_header_hash.clone(); + + let mut provisioner_inherent_data = async move { + let (sender, receiver) = futures::channel::oneshot::channel(); + + overseer.wait_for_activation(parent_header_hash, sender).await?; + receiver.await.map_err(Error::ClosedChannelFromProvisioner)?; + + let (sender, receiver) = futures::channel::oneshot::channel(); + // strictly speaking, we don't _have_ to .await this send_msg before opening the + // receiver; it's possible that the response there would be ready slightly before + // this call completes. IMO it's not worth the hassle or overhead of spawning a + // distinct task for that kind of miniscule efficiency improvement. + overseer.send_msg(AllMessages::Provisioner( + ProvisionerMessage::RequestInherentData(parent_header_hash, sender), + )).await?; + + receiver.await.map_err(Error::ClosedChannelFromProvisioner) + } + .boxed() + .fuse(); + + let mut timeout = wasm_timer::Delay::new(PROPOSE_TIMEOUT).fuse(); + + async move { + select! { + pid = provisioner_inherent_data => pid, + _ = timeout => Err(Error::Timeout), + } + } + } +} + +impl sp_consensus::Proposer for Proposer +where + TxPool: 'static + TransactionPool, + Client: 'static + + BlockBuilderProvider + + ProvideRuntimeApi + + HeaderBackend + + Send + + Sync, + Client::Api: + BlockBuilderApi + ApiExt, + Backend: + 'static + sc_client_api::Backend>, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + sp_api::StateBackendFor: sp_api::StateBackend> + Send, +{ + type Transaction = sc_client_api::TransactionFor; + type Proposal = Pin>, Error>> + Send, + >>; + type Error = Error; + + fn propose( + self, + mut inherent_data: InherentData, + inherent_digests: DigestFor, + max_duration: time::Duration, + record_proof: RecordProof, + ) -> Self::Proposal { + let provisioner_data = self.get_provisioner_data(); + + async move { + let provisioner_data = match provisioner_data.await { + Ok(pd) => pd, + Err(err) => { + log::warn!("could not get provisioner inherent data; injecting default data: {}", err); + Default::default() + } + }; + + inherent_data.put_data( + polkadot_primitives::v1::INCLUSION_INHERENT_IDENTIFIER, + &provisioner_data, + )?; + + self.inner + .propose(inherent_data, inherent_digests, max_duration, record_proof) + .await + .map_err(Into::into) + } + .boxed() + } +} + +// It would have been more ergonomic to use thiserror to derive the +// From implementations, Display, and std::error::Error, but unfortunately +// two of the wrapped errors (sp_inherents::Error, SubsystemError) also +// don't impl std::error::Error, which breaks the thiserror derive. +#[derive(Debug)] +pub enum Error { + Consensus(sp_consensus::Error), + Blockchain(sp_blockchain::Error), + Inherent(sp_inherents::Error), + Timeout, + ClosedChannelFromProvisioner(futures::channel::oneshot::Canceled), + Subsystem(SubsystemError) +} + +impl From for Error { + fn from(e: sp_consensus::Error) -> Error { + Error::Consensus(e) + } +} + +impl From for Error { + fn from(e: sp_blockchain::Error) -> Error { + Error::Blockchain(e) + } +} + +impl From for Error { + fn from(e: sp_inherents::Error) -> Error { + Error::Inherent(e) + } +} + +impl From for Error { + fn from(e: SubsystemError) -> Error { + Error::Subsystem(e) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Consensus(err) => write!(f, "consensus error: {}", err), + Self::Blockchain(err) => write!(f, "blockchain error: {}", err), + Self::Inherent(err) => write!(f, "inherent error: {:?}", err), + Self::Timeout => write!(f, "timeout: provisioner did not return inherent data after {:?}", PROPOSE_TIMEOUT), + Self::ClosedChannelFromProvisioner(err) => write!(f, "provisioner closed inherent data channel before sending: {}", err), + Self::Subsystem(err) => write!(f, "subsystem error: {:?}", err), + } + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::Consensus(err) => Some(err), + Self::Blockchain(err) => Some(err), + Self::ClosedChannelFromProvisioner(err) => Some(err), + _ => None + } + } +} diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c391f8e45a77ddd1a67c305c3afc69b83f92b47d --- /dev/null +++ b/node/core/provisioner/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "polkadot-node-core-provisioner" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } +derive_more = "0.99.9" +futures = "0.3.5" +log = "0.4.8" +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } + +[dev-dependencies] +lazy_static = "1.4" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +tokio = "0.2" diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..24254734289bd9c6f00f6d44158bb56298f5d8c1 --- /dev/null +++ b/node/core/provisioner/src/lib.rs @@ -0,0 +1,876 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The provisioner is responsible for assembling a relay chain block +//! from a set of available parachain candidates of its choice. + +#![deny(missing_docs)] + +use bitvec::vec::BitVec; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; +use polkadot_node_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + messages::{ + AllMessages, ChainApiMessage, ProvisionableData, ProvisionerInherentData, + ProvisionerMessage, RuntimeApiMessage, + }, + metrics::{self, prometheus}, +}; +use polkadot_node_subsystem_util::{ + self as util, + delegated_subsystem, + request_availability_cores, request_persisted_validation_data, JobTrait, ToJobTrait, +}; +use polkadot_primitives::v1::{ + BackedCandidate, BlockNumber, CoreState, Hash, OccupiedCoreAssumption, + SignedAvailabilityBitfield, +}; +use std::{collections::HashMap, convert::TryFrom, pin::Pin}; + +struct ProvisioningJob { + relay_parent: Hash, + sender: mpsc::Sender, + receiver: mpsc::Receiver, + provisionable_data_channels: Vec>, + backed_candidates: Vec, + signed_bitfields: Vec, + metrics: Metrics, +} + +/// This enum defines the messages that the provisioner is prepared to receive. +pub enum ToJob { + /// The provisioner message is the main input to the provisioner. + Provisioner(ProvisionerMessage), + /// This message indicates that the provisioner should shut itself down. + Stop, +} + +impl ToJobTrait for ToJob { + const STOP: Self = Self::Stop; + + fn relay_parent(&self) -> Option { + match self { + Self::Provisioner(pm) => pm.relay_parent(), + Self::Stop => None, + } + } +} + +impl TryFrom for ToJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::Provisioner(pm) => Ok(Self::Provisioner(pm)), + _ => Err(()), + } + } +} + +impl From for ToJob { + fn from(pm: ProvisionerMessage) -> Self { + Self::Provisioner(pm) + } +} + +enum FromJob { + ChainApi(ChainApiMessage), + Runtime(RuntimeApiMessage), +} + +impl From for AllMessages { + fn from(from_job: FromJob) -> AllMessages { + match from_job { + FromJob::ChainApi(cam) => AllMessages::ChainApi(cam), + FromJob::Runtime(ram) => AllMessages::RuntimeApi(ram), + } + } +} + +impl TryFrom for FromJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::ChainApi(chain) => Ok(FromJob::ChainApi(chain)), + AllMessages::RuntimeApi(runtime) => Ok(FromJob::Runtime(runtime)), + _ => Err(()), + } + } +} + +#[derive(Debug, derive_more::From)] +enum Error { + #[from] + Sending(mpsc::SendError), + #[from] + Util(util::Error), + #[from] + OneshotRecv(oneshot::Canceled), + #[from] + ChainApi(ChainApiError), + #[from] + Runtime(RuntimeApiError), + OneshotSend, +} + +impl JobTrait for ProvisioningJob { + type ToJob = ToJob; + type FromJob = FromJob; + type Error = Error; + type RunArgs = (); + type Metrics = Metrics; + + const NAME: &'static str = "ProvisioningJob"; + + /// Run a job for the parent block indicated + // + // this function is in charge of creating and executing the job's main loop + fn run( + relay_parent: Hash, + _run_args: Self::RunArgs, + metrics: Self::Metrics, + receiver: mpsc::Receiver, + sender: mpsc::Sender, + ) -> Pin> + Send>> { + async move { + let job = ProvisioningJob::new(relay_parent, metrics, sender, receiver); + + // it isn't necessary to break run_loop into its own function, + // but it's convenient to separate the concerns in this way + job.run_loop().await + } + .boxed() + } +} + +impl ProvisioningJob { + pub fn new( + relay_parent: Hash, + metrics: Metrics, + sender: mpsc::Sender, + receiver: mpsc::Receiver, + ) -> Self { + Self { + relay_parent, + sender, + receiver, + provisionable_data_channels: Vec::new(), + backed_candidates: Vec::new(), + signed_bitfields: Vec::new(), + metrics, + } + } + + async fn run_loop(mut self) -> Result<(), Error> { + while let Some(msg) = self.receiver.next().await { + use ProvisionerMessage::{ + ProvisionableData, RequestBlockAuthorshipData, RequestInherentData, + }; + + match msg { + ToJob::Provisioner(RequestInherentData(_, return_sender)) => { + if let Err(err) = send_inherent_data( + self.relay_parent, + &self.signed_bitfields, + &self.backed_candidates, + return_sender, + self.sender.clone(), + ) + .await + { + log::warn!(target: "provisioner", "failed to assemble or send inherent data: {:?}", err); + self.metrics.on_inherent_data_request(false); + } else { + self.metrics.on_inherent_data_request(true); + } + } + ToJob::Provisioner(RequestBlockAuthorshipData(_, sender)) => { + self.provisionable_data_channels.push(sender) + } + ToJob::Provisioner(ProvisionableData(data)) => { + let mut bad_indices = Vec::new(); + for (idx, channel) in self.provisionable_data_channels.iter_mut().enumerate() { + match channel.send(data.clone()).await { + Ok(_) => {} + Err(_) => bad_indices.push(idx), + } + } + self.note_provisionable_data(data); + + // clean up our list of channels by removing the bad indices + // start by reversing it for efficient pop + bad_indices.reverse(); + // Vec::retain would be nicer here, but it doesn't provide + // an easy API for retaining by index, so we re-collect instead. + self.provisionable_data_channels = self + .provisionable_data_channels + .into_iter() + .enumerate() + .filter(|(idx, _)| { + if bad_indices.is_empty() { + return true; + } + let tail = bad_indices[bad_indices.len() - 1]; + let retain = *idx != tail; + if *idx >= tail { + bad_indices.pop(); + } + retain + }) + .map(|(_, item)| item) + .collect(); + } + ToJob::Stop => break, + } + } + + Ok(()) + } + + fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) { + match provisionable_data { + ProvisionableData::Bitfield(_, signed_bitfield) => { + self.signed_bitfields.push(signed_bitfield) + } + ProvisionableData::BackedCandidate(backed_candidate) => { + self.backed_candidates.push(backed_candidate) + } + _ => {} + } + } +} + +type CoreAvailability = BitVec; + +// The provisioner is the subsystem best suited to choosing which specific +// backed candidates and availability bitfields should be assembled into the +// block. To engage this functionality, a +// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of +// non-conflicting candidates and the appropriate bitfields. Non-conflicting +// means that there are never two distinct parachain candidates included for +// the same parachain and that new parachain candidates cannot be included +// until the previous one either gets declared available or expired. +// +// The main complication here is going to be around handling +// occupied-core-assumptions. We might have candidates that are only +// includable when some bitfields are included. And we might have candidates +// that are not includable when certain bitfields are included. +// +// When we're choosing bitfields to include, the rule should be simple: +// maximize availability. So basically, include all bitfields. And then +// choose a coherent set of candidates along with that. +async fn send_inherent_data( + relay_parent: Hash, + bitfields: &[SignedAvailabilityBitfield], + candidates: &[BackedCandidate], + return_sender: oneshot::Sender, + mut from_job: mpsc::Sender, +) -> Result<(), Error> { + let availability_cores = request_availability_cores(relay_parent, &mut from_job) + .await? + .await??; + + let bitfields = select_availability_bitfields(&availability_cores, bitfields); + let candidates = select_candidates( + &availability_cores, + &bitfields, + candidates, + relay_parent, + &mut from_job, + ) + .await?; + + return_sender + .send((bitfields, candidates)) + .map_err(|_| Error::OneshotSend)?; + Ok(()) +} + +// in general, we want to pick all the bitfields. However, we have the following constraints: +// +// - not more than one per validator +// - each must correspond to an occupied core +// +// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability, +// we pick the one with the greatest number of 1 bits. +// +// note: this does not enforce any sorting precondition on the output; the ordering there will be unrelated +// to the sorting of the input. +fn select_availability_bitfields( + cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], +) -> Vec { + let mut fields_by_core: HashMap<_, Vec<_>> = HashMap::new(); + for bitfield in bitfields.iter() { + let core_idx = bitfield.validator_index() as usize; + if let CoreState::Occupied(_) = cores[core_idx] { + fields_by_core + .entry(core_idx) + // there cannot be a value list in field_by_core with len < 1 + .or_default() + .push(bitfield.clone()); + } + } + + let mut out = Vec::with_capacity(fields_by_core.len()); + for (_, core_bitfields) in fields_by_core.iter_mut() { + core_bitfields.sort_by_key(|bitfield| bitfield.payload().0.count_ones()); + out.push( + core_bitfields + .pop() + .expect("every core bitfield has at least 1 member; qed"), + ); + } + + out +} + +// determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +// +// follow the candidate selection algorithm from the guide +async fn select_candidates( + availability_cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], + candidates: &[BackedCandidate], + relay_parent: Hash, + sender: &mut mpsc::Sender, +) -> Result, Error> { + let block_number = get_block_number_under_construction(relay_parent, sender).await?; + + let mut selected_candidates = + Vec::with_capacity(candidates.len().min(availability_cores.len())); + + for (core_idx, core) in availability_cores.iter().enumerate() { + let (scheduled_core, assumption) = match core { + CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free), + CoreState::Occupied(occupied_core) => { + if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) + { + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { + (scheduled_core, OccupiedCoreAssumption::Included) + } else { + continue; + } + } else { + if occupied_core.time_out_at != block_number { + continue; + } + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { + (scheduled_core, OccupiedCoreAssumption::TimedOut) + } else { + continue; + } + } + } + _ => continue, + }; + + let validation_data = match request_persisted_validation_data( + relay_parent, + scheduled_core.para_id, + assumption, + sender, + ) + .await? + .await?? + { + Some(v) => v, + None => continue, + }; + + let computed_validation_data_hash = validation_data.hash(); + + // we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria + if let Some(candidate) = candidates.iter().find(|backed_candidate| { + let descriptor = &backed_candidate.candidate.descriptor; + descriptor.para_id == scheduled_core.para_id + && descriptor.persisted_validation_data_hash == computed_validation_data_hash + }) { + selected_candidates.push(candidate.clone()); + } + } + + Ok(selected_candidates) +} + +// produces a block number 1 higher than that of the relay parent +// in the event of an invalid `relay_parent`, returns `Ok(0)` +async fn get_block_number_under_construction( + relay_parent: Hash, + sender: &mut mpsc::Sender, +) -> Result { + let (tx, rx) = oneshot::channel(); + sender + .send(FromJob::ChainApi(ChainApiMessage::BlockNumber( + relay_parent, + tx, + ))) + .await + .map_err(|_| Error::OneshotSend)?; + match rx.await? { + Ok(Some(n)) => Ok(n + 1), + Ok(None) => Ok(0), + Err(err) => Err(err.into()), + } +} + +// the availability bitfield for a given core is the transpose +// of a set of signed availability bitfields. It goes like this: +// +// - construct a transverse slice along `core_idx` +// - bitwise-or it with the availability slice +// - count the 1 bits, compare to the total length; true on 2/3+ +fn bitfields_indicate_availability( + core_idx: usize, + bitfields: &[SignedAvailabilityBitfield], + availability: &CoreAvailability, +) -> bool { + let mut availability = availability.clone(); + // we need to pre-compute this to avoid a borrow-immutable-while-borrowing-mutable error in the error message + let availability_len = availability.len(); + + for bitfield in bitfields { + let validator_idx = bitfield.validator_index() as usize; + match availability.get_mut(validator_idx) { + None => { + // in principle, this function might return a `Result` so that we can more clearly express this error condition + // however, in practice, that would just push off an error-handling routine which would look a whole lot like this one. + // simpler to just handle the error internally here. + log::warn!(target: "provisioner", "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", validator_idx, availability_len); + return false; + } + Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx], + } + } + 3 * availability.count_ones() >= 2 * availability.len() +} + +#[derive(Clone)] +struct MetricsInner { + inherent_data_requests: prometheus::CounterVec, +} + +/// Candidate backing metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_inherent_data_request(&self, succeeded: bool) { + if let Some(metrics) = &self.0 { + if succeeded { + metrics.inherent_data_requests.with_label_values(&["succeded"]).inc(); + } else { + metrics.inherent_data_requests.with_label_values(&["failed"]).inc(); + } + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + inherent_data_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "parachain_inherent_data_requests_total", + "Number of InherentData requests served by provisioner.", + ), + &["succeeded", "failed"], + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + + +delegated_subsystem!(ProvisioningJob((), Metrics) <- ToJob as ProvisioningSubsystem); + +#[cfg(test)] +mod tests { + use super::*; + use bitvec::bitvec; + use polkadot_primitives::v1::{OccupiedCore, ScheduledCore}; + + pub fn occupied_core(para_id: u32) -> CoreState { + CoreState::Occupied(OccupiedCore { + para_id: para_id.into(), + group_responsible: para_id.into(), + next_up_on_available: None, + occupied_since: 100_u32, + time_out_at: 200_u32, + next_up_on_time_out: None, + availability: default_bitvec(), + }) + } + + pub fn build_occupied_core(para_id: u32, builder: Builder) -> CoreState + where + Builder: FnOnce(&mut OccupiedCore), + { + let mut core = match occupied_core(para_id) { + CoreState::Occupied(core) => core, + _ => unreachable!(), + }; + + builder(&mut core); + + CoreState::Occupied(core) + } + + pub fn default_bitvec() -> CoreAvailability { + bitvec![bitvec::order::Lsb0, u8; 0; 32] + } + + pub fn scheduled_core(id: u32) -> ScheduledCore { + ScheduledCore { + para_id: id.into(), + ..Default::default() + } + } + + mod select_availability_bitfields { + use super::super::*; + use super::{default_bitvec, occupied_core}; + use lazy_static::lazy_static; + use polkadot_primitives::v1::{SigningContext, ValidatorIndex, ValidatorPair}; + use sp_core::crypto::Pair; + use std::sync::Mutex; + + lazy_static! { + // we can use a normal mutex here, not a futures-aware one, because we don't use any futures-based + // concurrency when accessing this. The risk of contention is that multiple tests are run in parallel, + // in independent threads, in which case a standard mutex suffices. + static ref VALIDATORS: Mutex> = Mutex::new(HashMap::new()); + } + + fn signed_bitfield( + field: CoreAvailability, + validator_idx: ValidatorIndex, + ) -> SignedAvailabilityBitfield { + let mut lock = VALIDATORS.lock().unwrap(); + let validator = lock + .entry(validator_idx) + .or_insert_with(|| ValidatorPair::generate().0); + SignedAvailabilityBitfield::sign( + field.into(), + &>::default(), + validator_idx, + validator, + ) + } + + #[test] + fn not_more_than_one_per_validator() { + let bitvec = default_bitvec(); + + let cores = vec![occupied_core(0), occupied_core(1)]; + + // we pass in three bitfields with two validators + // this helps us check the postcondition that we get two bitfields back, for which the validators differ + let bitfields = vec![ + signed_bitfield(bitvec.clone(), 0), + signed_bitfield(bitvec.clone(), 1), + signed_bitfield(bitvec, 1), + ]; + + let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields); + selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index()); + + assert_eq!(selected_bitfields.len(), 2); + assert_eq!(selected_bitfields[0], bitfields[0]); + // we don't know which of the (otherwise equal) bitfields will be selected + assert!(selected_bitfields[1] == bitfields[1] || selected_bitfields[1] == bitfields[2]); + } + + #[test] + fn each_corresponds_to_an_occupied_core() { + let bitvec = default_bitvec(); + + let cores = vec![CoreState::Free, CoreState::Scheduled(Default::default())]; + + let bitfields = vec![ + signed_bitfield(bitvec.clone(), 0), + signed_bitfield(bitvec.clone(), 1), + signed_bitfield(bitvec, 1), + ]; + + let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields); + selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index()); + + // bitfields not corresponding to occupied cores are not selected + assert!(selected_bitfields.is_empty()); + } + + #[test] + fn more_set_bits_win_conflicts() { + let bitvec_zero = default_bitvec(); + let bitvec_one = { + let mut bitvec = bitvec_zero.clone(); + bitvec.set(0, true); + bitvec + }; + + let cores = vec![occupied_core(0)]; + + let bitfields = vec![ + signed_bitfield(bitvec_zero, 0), + signed_bitfield(bitvec_one.clone(), 0), + ]; + + // this test is probablistic: chances are excellent that it does what it claims to. + // it cannot fail unless things are broken. + // however, there is a (very small) chance that it passes when things are broken. + for _ in 0..64 { + let selected_bitfields = select_availability_bitfields(&cores, &bitfields); + assert_eq!(selected_bitfields.len(), 1); + assert_eq!(selected_bitfields[0].payload().0, bitvec_one); + } + } + } + + mod select_candidates { + use super::super::*; + use super::{build_occupied_core, default_bitvec, occupied_core, scheduled_core}; + use polkadot_node_subsystem::messages::RuntimeApiRequest::{ + AvailabilityCores, PersistedValidationData as PersistedValidationDataReq, + }; + use polkadot_primitives::v1::{ + BlockNumber, CandidateDescriptor, CommittedCandidateReceipt, PersistedValidationData, + }; + use FromJob::{ChainApi, Runtime}; + + const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; + + fn test_harness( + overseer_factory: OverseerFactory, + test_factory: TestFactory, + ) where + OverseerFactory: FnOnce(mpsc::Receiver) -> Overseer, + Overseer: Future, + TestFactory: FnOnce(mpsc::Sender) -> Test, + Test: Future, + { + let (tx, rx) = mpsc::channel(64); + let overseer = overseer_factory(rx); + let test = test_factory(tx); + + futures::pin_mut!(overseer, test); + + tokio::runtime::Runtime::new() + .unwrap() + .block_on(future::select(overseer, test)); + } + + // For test purposes, we always return this set of availability cores: + // + // [ + // 0: Free, + // 1: Scheduled(default), + // 2: Occupied(no next_up set), + // 3: Occupied(next_up_on_available set but not available), + // 4: Occupied(next_up_on_available set and available), + // 5: Occupied(next_up_on_time_out set but not timeout), + // 6: Occupied(next_up_on_time_out set and timeout but available), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + // 8: Occupied(both next_up set, available), + // 9: Occupied(both next_up set, not available, no timeout), + // 10: Occupied(both next_up set, not available, timeout), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + // ] + fn mock_availability_cores() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + ] + } + + async fn mock_overseer(mut receiver: mpsc::Receiver) { + use ChainApiMessage::BlockNumber; + use RuntimeApiMessage::Request; + + while let Some(from_job) = receiver.next().await { + match from_job { + ChainApi(BlockNumber(_relay_parent, tx)) => { + tx.send(Ok(Some(BLOCK_UNDER_PRODUCTION - 1))).unwrap() + } + Runtime(Request( + _parent_hash, + PersistedValidationDataReq(_para_id, _assumption, tx), + )) => tx.send(Ok(Some(Default::default()))).unwrap(), + Runtime(Request(_parent_hash, AvailabilityCores(tx))) => { + tx.send(Ok(mock_availability_cores())).unwrap() + } + // non-exhaustive matches are fine for testing + _ => unimplemented!(), + } + } + } + + #[test] + fn handles_overseer_failure() { + let overseer = |rx: mpsc::Receiver| async move { + // drop the receiver so it closes and the sender can't send, then just sleep long enough that + // this is almost certainly not the first of the two futures to complete + std::mem::drop(rx); + tokio::time::delay_for(std::time::Duration::from_secs(1)).await; + }; + + let test = |mut tx: mpsc::Sender| async move { + // wait so that the overseer can drop the rx before we attempt to send + tokio::time::delay_for(std::time::Duration::from_millis(50)).await; + let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await; + println!("{:?}", result); + assert!(std::matches!(result, Err(Error::OneshotSend))); + }; + + test_harness(overseer, test); + } + + #[test] + fn can_succeed() { + test_harness(mock_overseer, |mut tx: mpsc::Sender| async move { + let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await; + println!("{:?}", result); + assert!(result.is_ok()); + }) + } + + // this tests that only the appropriate candidates get selected. + // To accomplish this, we supply a candidate list containing one candidate per possible core; + // the candidate selection algorithm must filter them to the appropriate set + #[test] + fn selects_correct_candidates() { + let mock_cores = mock_availability_cores(); + + let empty_hash = PersistedValidationData::::default().hash(); + + let candidate_template = BackedCandidate { + candidate: CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + persisted_validation_data_hash: empty_hash, + ..Default::default() + }, + ..Default::default() + }, + validity_votes: Vec::new(), + validator_indices: default_bitvec(), + }; + + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(mock_cores.len()) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.candidate.descriptor.para_id = idx.into(); + candidate + }) + .cycle() + .take(mock_cores.len() * 3) + .enumerate() + .map(|(idx, mut candidate)| { + if idx < mock_cores.len() { + // first go-around: use candidates which should work + candidate + } else if idx < mock_cores.len() * 2 { + // for the second repetition of the candidates, give them the wrong hash + candidate.candidate.descriptor.persisted_validation_data_hash + = Default::default(); + candidate + } else { + // third go-around: right hash, wrong para_id + candidate.candidate.descriptor.para_id = idx.into(); + candidate + } + }) + .collect(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = [1, 4, 7, 8, 10] + .iter() + .map(|&idx| candidates[idx].clone()) + .collect(); + + test_harness(mock_overseer, |mut tx: mpsc::Sender| async move { + let result = + select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) + .await; + + if result.is_err() { + println!("{:?}", result); + } + assert_eq!(result.unwrap(), expected_candidates); + }) + } + } +} diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..229366627a36e0c052eda056cda29ad2d248b057 --- /dev/null +++ b/node/core/runtime-api/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "polkadot-node-core-runtime-api" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } + +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = { version = "0.3.5", features = ["thread-pool"] } +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c05b71dd7000405b8e711c1879f90feed2f2bc3 --- /dev/null +++ b/node/core/runtime-api/src/lib.rs @@ -0,0 +1,539 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the Runtime API Subsystem +//! +//! This provides a clean, ownerless wrapper around the parachain-related runtime APIs. This crate +//! can also be used to cache responses from heavy runtime APIs. + +use polkadot_subsystem::{ + Subsystem, SpawnedSubsystem, SubsystemResult, SubsystemContext, + FromOverseer, OverseerSignal, + metrics::{self, prometheus}, +}; +use polkadot_subsystem::messages::{ + RuntimeApiMessage, RuntimeApiRequest as Request, +}; +use polkadot_subsystem::errors::RuntimeApiError; +use polkadot_primitives::v1::{Block, BlockId, Hash, ParachainHost}; + +use sp_api::{ProvideRuntimeApi}; + +use futures::prelude::*; + +/// The `RuntimeApiSubsystem`. See module docs for more details. +pub struct RuntimeApiSubsystem { + client: Client, + metrics: Metrics, +} + +impl RuntimeApiSubsystem { + /// Create a new Runtime API subsystem wrapping the given client and metrics. + pub fn new(client: Client, metrics: Metrics) -> Self { + RuntimeApiSubsystem { client, metrics } + } +} + +impl Subsystem for RuntimeApiSubsystem where + Client: ProvideRuntimeApi + Send + 'static, + Client::Api: ParachainHost, + Context: SubsystemContext +{ + type Metrics = Metrics; + + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx, self).map(|_| ()).boxed(), + name: "runtime-api-subsystem", + } + } +} + +async fn run( + mut ctx: impl SubsystemContext, + subsystem: RuntimeApiSubsystem, +) -> SubsystemResult<()> where + Client: ProvideRuntimeApi, + Client::Api: ParachainHost, +{ + loop { + match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {}, + FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}, + FromOverseer::Communication { msg } => match msg { + RuntimeApiMessage::Request(relay_parent, request) => make_runtime_api_request( + &subsystem.client, + &subsystem.metrics, + relay_parent, + request, + ), + } + } + } +} + +fn make_runtime_api_request( + client: &Client, + metrics: &Metrics, + relay_parent: Hash, + request: Request, +) where + Client: ProvideRuntimeApi, + Client::Api: ParachainHost, +{ + macro_rules! query { + ($api_name:ident ($($param:expr),*), $sender:expr) => {{ + let sender = $sender; + let api = client.runtime_api(); + let res = api.$api_name(&BlockId::Hash(relay_parent), $($param),*) + .map_err(|e| RuntimeApiError::from(format!("{:?}", e))); + metrics.on_request(res.is_ok()); + let _ = sender.send(res); + }} + } + + match request { + Request::Validators(sender) => query!(validators(), sender), + Request::ValidatorGroups(sender) => query!(validator_groups(), sender), + Request::AvailabilityCores(sender) => query!(availability_cores(), sender), + Request::PersistedValidationData(para, assumption, sender) => + query!(persisted_validation_data(para, assumption), sender), + Request::FullValidationData(para, assumption, sender) => + query!(full_validation_data(para, assumption), sender), + Request::SessionIndexForChild(sender) => query!(session_index_for_child(), sender), + Request::ValidationCode(para, assumption, sender) => + query!(validation_code(para, assumption), sender), + Request::CandidatePendingAvailability(para, sender) => + query!(candidate_pending_availability(para), sender), + Request::CandidateEvents(sender) => query!(candidate_events(), sender), + } +} + +#[derive(Clone)] +struct MetricsInner { + chain_api_requests: prometheus::CounterVec, +} + +/// Runtime API metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_request(&self, succeeded: bool) { + if let Some(metrics) = &self.0 { + if succeeded { + metrics.chain_api_requests.with_label_values(&["succeeded"]).inc(); + } else { + metrics.chain_api_requests.with_label_values(&["failed"]).inc(); + } + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + chain_api_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "parachain_runtime_api_requests_total", + "Number of Runtime API requests served.", + ), + &["succeeded", "failed"], + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use polkadot_primitives::v1::{ + ValidatorId, ValidatorIndex, GroupRotationInfo, CoreState, PersistedValidationData, + Id as ParaId, OccupiedCoreAssumption, ValidationData, SessionIndex, ValidationCode, + CommittedCandidateReceipt, CandidateEvent, + }; + use polkadot_node_subsystem_test_helpers as test_helpers; + use sp_core::testing::TaskExecutor; + + use std::collections::HashMap; + use futures::channel::oneshot; + + #[derive(Default, Clone)] + struct MockRuntimeApi { + validators: Vec, + validator_groups: Vec>, + availability_cores: Vec, + validation_data: HashMap, + session_index_for_child: SessionIndex, + validation_code: HashMap, + candidate_pending_availability: HashMap, + candidate_events: Vec, + } + + impl ProvideRuntimeApi for MockRuntimeApi { + type Api = Self; + + fn runtime_api<'a>(&'a self) -> sp_api::ApiRef<'a, Self::Api> { + self.clone().into() + } + } + + sp_api::mock_impl_runtime_apis! { + impl ParachainHost for MockRuntimeApi { + type Error = String; + + fn validators(&self) -> Vec { + self.validators.clone() + } + + fn validator_groups(&self) -> (Vec>, GroupRotationInfo) { + ( + self.validator_groups.clone(), + GroupRotationInfo { + session_start_block: 1, + group_rotation_frequency: 100, + now: 10, + }, + ) + } + + fn availability_cores(&self) -> Vec { + self.availability_cores.clone() + } + + fn persisted_validation_data( + &self, + para: ParaId, + _assumption: OccupiedCoreAssumption, + ) -> Option { + self.validation_data.get(¶).map(|l| l.persisted.clone()) + } + + fn full_validation_data( + &self, + para: ParaId, + _assumption: OccupiedCoreAssumption, + ) -> Option { + self.validation_data.get(¶).map(|l| l.clone()) + } + + fn session_index_for_child(&self) -> SessionIndex { + self.session_index_for_child.clone() + } + + fn validation_code( + &self, + para: ParaId, + _assumption: OccupiedCoreAssumption, + ) -> Option { + self.validation_code.get(¶).map(|c| c.clone()) + } + + fn candidate_pending_availability( + &self, + para: ParaId, + ) -> Option { + self.candidate_pending_availability.get(¶).map(|c| c.clone()) + } + + fn candidate_events(&self) -> Vec { + self.candidate_events.clone() + } + } + } + + #[test] + fn requests_validators() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::Validators(tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), runtime_api.validators); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_validator_groups() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::ValidatorGroups(tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap().0, runtime_api.validator_groups); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_availability_cores() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::AvailabilityCores(tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), runtime_api.availability_cores); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_persisted_validation_data() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let mut runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + let para_a = 5.into(); + let para_b = 6.into(); + + runtime_api.validation_data.insert(para_a, Default::default()); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::PersistedValidationData(para_a, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(Default::default())); + + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::PersistedValidationData(para_b, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), None); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_full_validation_data() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let mut runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + let para_a = 5.into(); + let para_b = 6.into(); + + runtime_api.validation_data.insert(para_a, Default::default()); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::FullValidationData(para_a, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(Default::default())); + + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::FullValidationData(para_b, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), None); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_session_index_for_child() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::SessionIndexForChild(tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), runtime_api.session_index_for_child); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_validation_code() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let mut runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + let para_a = 5.into(); + let para_b = 6.into(); + + runtime_api.validation_code.insert(para_a, Default::default()); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::ValidationCode(para_a, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(Default::default())); + + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::ValidationCode(para_b, OccupiedCoreAssumption::Included, tx) + ), + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), None); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_candidate_pending_availability() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let mut runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + let para_a = 5.into(); + let para_b = 6.into(); + + runtime_api.candidate_pending_availability.insert(para_a, Default::default()); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::CandidatePendingAvailability(para_a, tx), + ) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(Default::default())); + + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::CandidatePendingAvailability(para_b, tx), + ) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), None); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_candidate_events() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = MockRuntimeApi::default(); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::CandidateEvents(tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), runtime_api.candidate_events); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } +} diff --git a/node/messages/Cargo.toml b/node/messages/Cargo.toml deleted file mode 100644 index 9edb5a0519876b3aecfe907fefe3bcca999e2e3b..0000000000000000000000000000000000000000 --- a/node/messages/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "polkadot-node-messages" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -description = "Message types used by Subsystems" - -[dependencies] -polkadot-primitives = { path = "../../primitives" } -polkadot-statement-table = { path = "../../statement-table" } -polkadot-node-primitives = { path = "../primitives" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = "0.3.5" diff --git a/node/messages/src/lib.rs b/node/messages/src/lib.rs deleted file mode 100644 index 3a413f2c67bbdcc951ed0f820061c4061cad321e..0000000000000000000000000000000000000000 --- a/node/messages/src/lib.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Message types for the overseer and subsystems. -//! -//! These messages are intended to define the protocol by which different subsystems communicate with each -//! other and signals that they receive from an overseer to coordinate their work. -//! This is intended for use with the `polkadot-overseer` crate. -//! -//! Subsystems' APIs are defined separately from their implementation, leading to easier mocking. - -use futures::channel::{mpsc, oneshot}; - -use sc_network::{ObservedRole, ReputationChange, PeerId, config::ProtocolId}; -use polkadot_primitives::{BlockNumber, Hash, Signature}; -use polkadot_primitives::parachain::{ - AbridgedCandidateReceipt, PoVBlock, ErasureChunk, BackedCandidate, Id as ParaId, - SignedAvailabilityBitfield, SigningContext, ValidatorId, ValidationCode, ValidatorIndex, -}; -use polkadot_node_primitives::{ - MisbehaviorReport, SignedFullStatement, -}; - -/// Signals sent by an overseer to a subsystem. -#[derive(PartialEq, Clone, Debug)] -pub enum OverseerSignal { - /// `Subsystem` should start working on block-based work, given by the relay-chain block hash. - StartWork(Hash), - /// `Subsystem` should stop working on block-based work specified by the relay-chain block hash. - StopWork(Hash), - /// Conclude the work of the `Overseer` and all `Subsystem`s. - Conclude, -} - -/// A notification of a new backed candidate. -#[derive(Debug)] -pub struct NewBackedCandidate(pub BackedCandidate); - -/// Messages received by the Candidate Selection subsystem. -#[derive(Debug)] -pub enum CandidateSelectionMessage { - /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. - /// The hash is the relay parent. - Invalid(Hash, AbridgedCandidateReceipt), -} - -/// Messages received by the Candidate Backing subsystem. -#[derive(Debug)] -pub enum CandidateBackingMessage { - /// Registers a stream listener for updates to the set of backable candidates that could be backed - /// in a child of the given relay-parent, referenced by its hash. - RegisterBackingWatcher(Hash, mpsc::Sender), - /// Note that the Candidate Backing subsystem should second the given candidate in the context of the - /// given relay-parent (ref. by hash). This candidate must be validated. - Second(Hash, AbridgedCandidateReceipt), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. - Statement(Hash, SignedFullStatement), -} - -/// Blanket error for validation failing. -#[derive(Debug)] -pub struct ValidationFailed; - -/// Messages received by the Validation subsystem -#[derive(Debug)] -pub enum CandidateValidationMessage { - /// Validate a candidate, sending a side-channel response of valid or invalid. - /// - /// Provide the relay-parent in whose context this should be validated, the full candidate receipt, - /// and the PoV. - Validate( - Hash, - AbridgedCandidateReceipt, - PoVBlock, - oneshot::Sender>, - ), -} - -/// Chain heads. -/// -/// Up to `N` (5?) chain heads. -pub struct View(pub Vec); - -/// Events from network. -pub enum NetworkBridgeEvent { - /// A peer has connected. - PeerConnected(PeerId, ObservedRole), - - /// A peer has disconnected. - PeerDisconnected(PeerId), - - /// Peer has sent a message. - PeerMessage(PeerId, Vec), - - /// Peer's `View` has changed. - PeerViewChange(PeerId, View), - - /// Our `View` has changed. - OurViewChange(View), -} - -/// Messages received by the network bridge subsystem. -pub enum NetworkBridgeSubsystemMessage { - /// Register an event producer on startup. - RegisterEventProducer(ProtocolId, fn(NetworkBridgeEvent) -> AllMessages), - - /// Report a peer for their actions. - ReportPeer(PeerId, ReputationChange), - - /// Send a message to multiple peers. - SendMessage(Vec, ProtocolId, Vec), -} - -/// Availability Distribution Message. -pub enum AvailabilityDistributionMessage { - /// Distribute an availability chunk to other validators. - DistributeChunk(Hash, ErasureChunk), - - /// Fetch an erasure chunk from networking by candidate hash and chunk index. - FetchChunk(Hash, u32), - - /// Event from the network bridge. - NetworkBridgeUpdate(NetworkBridgeEvent), -} - -/// Bitfield distribution message. -pub enum BitfieldDistributionMessage { - /// Distribute a bitfield via gossip to other validators. - DistributeBitfield(Hash, SignedAvailabilityBitfield), - - /// Event from the network bridge. - NetworkBridgeUpdate(NetworkBridgeEvent), -} - -/// Availability store subsystem message. -pub enum AvailabilityStoreMessage { - /// Query a `PoVBlock` from the AV store. - QueryPoV(Hash, oneshot::Sender>), - - /// Query an `ErasureChunk` from the AV store. - QueryChunk(Hash, ValidatorIndex, oneshot::Sender), - - /// Store an `ErasureChunk` in the AV store. - StoreChunk(Hash, ValidatorIndex, ErasureChunk), -} - -/// A request to the Runtime API subsystem. -pub enum RuntimeApiRequest { - /// Get the current validator set. - Validators(oneshot::Sender>), - /// Get a signing context for bitfields and statements. - SigningContext(oneshot::Sender), - /// Get the validation code for a specific para, assuming execution under given block number, and - /// an optional block number representing an intermediate parablock executed in the context of - /// that block. - ValidationCode(ParaId, BlockNumber, Option, oneshot::Sender), -} - -/// A message to the Runtime API subsystem. -pub enum RuntimeApiMessage { - /// Make a request of the runtime API against the post-state of the given relay-parent. - Request(Hash, RuntimeApiRequest), -} - -/// Statement distribution message. -pub enum StatementDistributionMessage { - /// We have originated a signed statement in the context of - /// given relay-parent hash and it should be distributed to other validators. - Share(Hash, SignedFullStatement), -} - -/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. -pub enum ProvisionableData { - /// This bitfield indicates the availability of various candidate blocks. - Bitfield(Hash, SignedAvailabilityBitfield), - /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. - BackedCandidate(BackedCandidate), - /// Misbehavior reports are self-contained proofs of validator misbehavior. - MisbehaviorReport(Hash, MisbehaviorReport), - /// Disputes trigger a broad dispute resolution process. - Dispute(Hash, Signature), -} - -/// Message to the Provisioner. -/// -/// In all cases, the Hash is that of the relay parent. -pub enum ProvisionerMessage { - /// This message allows potential block authors to be kept updated with all new authorship data - /// as it becomes available. - RequestBlockAuthorshipData(Hash, mpsc::Sender), - /// This data should become part of a relay chain block - ProvisionableData(ProvisionableData), -} - -/// A message type tying together all message types that are used across Subsystems. -#[derive(Debug)] -pub enum AllMessages { - /// Message for the validation subsystem. - CandidateValidation(CandidateValidationMessage), - /// Message for the candidate backing subsystem. - CandidateBacking(CandidateBackingMessage), -} - -/// A message type that a subsystem receives from an overseer. -/// It wraps signals from an overseer and messages that are circulating -/// between subsystems. -/// -/// It is generic over over the message type `M` that a particular `Subsystem` may use. -#[derive(Debug)] -pub enum FromOverseer { - /// Signal from the `Overseer`. - Signal(OverseerSignal), - - /// Some other `Subsystem`'s message. - Communication { - msg: M, - }, -} diff --git a/node/network/README.md b/node/network/README.md index 64f0f11af529814d449e7db0692d168ed7c4f12b..e035485b85ec8cbf854251d870960ff36500e10e 100644 --- a/node/network/README.md +++ b/node/network/README.md @@ -1 +1 @@ -Stub - This folder will hold networking subsystem implementations, each with their own crate. +This folder holds all networking subsystem implementations, each with their own crate. diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..18c1769a52b2cbf8d57c2134f69d353e591c3b67 --- /dev/null +++ b/node/network/availability-distribution/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "polkadot-availability-distribution" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +log = "0.4.11" +streamunordered = "0.5.1" +codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] } +polkadot-primitives = { path = "../../../primitives" } +polkadot-erasure-coding = { path = "../../../erasure-coding" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-network-bridge = { path = "../../network/bridge" } +polkadot-node-network-protocol = { path = "../../network/protocol" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +derive_more = "0.99.9" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } + +[dev-dependencies] +polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" } +bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +parking_lot = "0.11.0" +futures-timer = "3.0.2" +env_logger = "0.7.1" +assert_matches = "1.3.0" +smallvec = "1" diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..32b61f5817f3a9f549d489fe7f5511c2694e1494 --- /dev/null +++ b/node/network/availability-distribution/src/lib.rs @@ -0,0 +1,1079 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The availability distribution +//! +//! Transforms `AvailableData` into erasure chunks, which are distributed to peers +//! who are interested in the relevant candidates. +//! Gossip messages received from other peers are verified and gossiped to interested +//! peers. Verified in this context means, the erasure chunks contained merkle proof +//! is checked. + +use codec::{Decode, Encode}; +use futures::{channel::oneshot, FutureExt}; + +use keystore::KeyStorePtr; +use sp_core::{ + crypto::Public, + traits::BareCryptoStore, +}; +use sc_keystore as keystore; + +use log::{trace, warn}; +use polkadot_erasure_coding::branch_hash; +use polkadot_primitives::v1::{ + PARACHAIN_KEY_TYPE_ID, + BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, + Hash as Hash, HashT, Id as ParaId, + ValidatorId, ValidatorIndex, SessionIndex, +}; +use polkadot_subsystem::messages::{ + AllMessages, AvailabilityDistributionMessage, NetworkBridgeMessage, RuntimeApiMessage, + RuntimeApiRequest, AvailabilityStoreMessage, ChainApiMessage, +}; +use polkadot_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, + SubsystemContext, SubsystemError, +}; +use polkadot_node_network_protocol::{ + v1 as protocol_v1, View, ReputationChange as Rep, PeerId, + NetworkBridgeEvent, +}; +use std::collections::{HashMap, HashSet}; +use std::io; +use std::iter; + +const TARGET: &'static str = "avad"; + +#[derive(Debug, derive_more::From)] +enum Error { + #[from] + Erasure(polkadot_erasure_coding::Error), + #[from] + Io(io::Error), + #[from] + Oneshot(oneshot::Canceled), + #[from] + Subsystem(SubsystemError), + #[from] + RuntimeApi(RuntimeApiError), + #[from] + ChainApi(ChainApiError), +} + +type Result = std::result::Result; + +const COST_MERKLE_PROOF_INVALID: Rep = Rep::new(-100, "Merkle proof was invalid"); +const COST_NOT_A_LIVE_CANDIDATE: Rep = Rep::new(-51, "Candidate is not live"); +const COST_PEER_DUPLICATE_MESSAGE: Rep = Rep::new(-500, "Peer sent identical messages"); +const BENEFIT_VALID_MESSAGE_FIRST: Rep = Rep::new(15, "Valid message with new information"); +const BENEFIT_VALID_MESSAGE: Rep = Rep::new(10, "Valid message"); + +/// Checked signed availability bitfield that is distributed +/// to other peers. +#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, Hash)] +pub struct AvailabilityGossipMessage { + /// Anchor hash of the candidate the `ErasureChunk` is associated to. + pub candidate_hash: Hash, + /// The erasure chunk, a encoded information part of `AvailabilityData`. + pub erasure_chunk: ErasureChunk, +} + +/// Data used to track information of peers and relay parents the +/// overseer ordered us to work on. +#[derive(Default, Clone, Debug)] +struct ProtocolState { + /// Track all active peers and their views + /// to determine what is relevant to them. + peer_views: HashMap, + + /// Our own view. + view: View, + + /// Caches a mapping of relay parents or ancestor to live candidate receipts. + /// Allows fast intersection of live candidates with views and consecutive unioning. + /// Maps relay parent / ancestor -> live candidate receipts + its hash. + receipts: HashMap>, + + /// Allow reverse caching of view checks. + /// Maps candidate hash -> relay parent for extracting meta information from `PerRelayParent`. + /// Note that the presence of this is not sufficient to determine if deletion is OK, i.e. + /// two histories could cover this. + reverse: HashMap, + + /// Keeps track of which candidate receipts are required due to ancestors of which relay parents + /// of our view. + /// Maps ancestor -> relay parents in view + ancestry: HashMap>, + + /// Track things needed to start and stop work on a particular relay parent. + per_relay_parent: HashMap, + + /// Track data that is specific to a candidate. + per_candidate: HashMap, +} + +#[derive(Debug, Clone, Default)] +struct PerCandidate { + /// A Candidate and a set of known erasure chunks in form of messages to be gossiped / distributed if the peer view wants that. + /// This is _across_ peers and not specific to a particular one. + /// candidate hash + erasure chunk index -> gossip message + message_vault: HashMap, + + /// Track received candidate hashes and chunk indices from peers. + received_messages: HashMap>, + + /// Track already sent candidate hashes and the erasure chunk index to the peers. + sent_messages: HashMap>, + + /// The set of validators. + validators: Vec, + + /// If this node is a validator, note the index in the validator set. + validator_index: Option, +} + +#[derive(Debug, Clone, Default)] +struct PerRelayParent { + /// Set of `K` ancestors for this relay parent. + ancestors: Vec, +} + +impl ProtocolState { + /// Collects the relay_parents ancestors including the relay parents themselfes. + fn extend_with_ancestors<'a>( + &'a self, + relay_parents: impl IntoIterator + 'a, + ) -> HashSet { + relay_parents + .into_iter() + .map(|relay_parent| { + self.per_relay_parent + .get(relay_parent) + .into_iter() + .map(|per_relay_parent| per_relay_parent.ancestors.iter().cloned()) + .flatten() + .chain(iter::once(*relay_parent)) + }) + .flatten() + .collect::>() + } + + /// Unionize all cached entries for the given relay parents and its ancestors. + /// Ignores all non existent relay parents, so this can be used directly with a peers view. + /// Returns a map from candidate hash -> receipt + fn cached_live_candidates_unioned<'a>( + &'a self, + relay_parents: impl IntoIterator + 'a, + ) -> HashMap { + let relay_parents_and_ancestors = self.extend_with_ancestors(relay_parents); + relay_parents_and_ancestors + .into_iter() + .filter_map(|relay_parent_or_ancestor| self.receipts.get(&relay_parent_or_ancestor)) + .map(|receipt_set| receipt_set.into_iter()) + .flatten() + .map(|(receipt_hash, receipt)| (receipt_hash.clone(), receipt.clone())) + .collect::>() + } + + async fn add_relay_parent( + &mut self, + ctx: &mut Context, + relay_parent: Hash, + validators: Vec, + validator_index: Option, + ) -> Result<()> + where + Context: SubsystemContext, + { + let candidates = + query_live_candidates(ctx, self, std::iter::once(relay_parent)).await?; + + // register the relation of relay_parent to candidate.. + // ..and the reverse association. + for (relay_parent_or_ancestor, (receipt_hash, receipt)) in candidates.clone() { + self + .reverse + .insert(receipt_hash.clone(), relay_parent_or_ancestor.clone()); + let per_candidate = self.per_candidate.entry(receipt_hash.clone()) + .or_default(); + per_candidate.validator_index = validator_index.clone(); + per_candidate.validators = validators.clone(); + + self + .receipts + .entry(relay_parent_or_ancestor) + .or_default() + .insert((receipt_hash, receipt)); + } + + // collect the ancestors again from the hash map + let ancestors = candidates + .iter() + .filter_map(|(ancestor_or_relay_parent, _receipt)| { + if ancestor_or_relay_parent == &relay_parent { + None + } else { + Some(*ancestor_or_relay_parent) + } + }) + .collect::>(); + + // mark all the ancestors as "needed" by this newly added relay parent + for ancestor in ancestors.iter() { + self.ancestry + .entry(ancestor.clone()) + .or_default() + .insert(relay_parent); + } + + self + .per_relay_parent + .entry(relay_parent) + .or_default() + .ancestors = ancestors; + + Ok(()) + } + + fn remove_relay_parent(&mut self, relay_parent: &Hash) -> Result<()> { + // we might be ancestor of some other relay_parent + if let Some(ref mut descendants) = self.ancestry.get_mut(relay_parent) { + // if we were the last user, and it is + // not explicitly set to be worked on by the overseer + if descendants.is_empty() { + // remove from the ancestry index + self.ancestry.remove(relay_parent); + // and also remove the actual receipt + self.receipts.remove(relay_parent); + self.per_candidate.remove(relay_parent); + } + } + if let Some(per_relay_parent) = self.per_relay_parent.remove(relay_parent) { + // remove all "references" from the hash maps and sets for all ancestors + for ancestor in per_relay_parent.ancestors { + // one of our decendants might be ancestor of some other relay_parent + if let Some(ref mut descendants) = self.ancestry.get_mut(&ancestor) { + // we do not need this descendant anymore + descendants.remove(&relay_parent); + // if we were the last user, and it is + // not explicitly set to be worked on by the overseer + if descendants.is_empty() && !self.per_relay_parent.contains_key(&ancestor) { + // remove from the ancestry index + self.ancestry.remove(&ancestor); + // and also remove the actual receipt + self.receipts.remove(&ancestor); + self.per_candidate.remove(&ancestor); + } + } + } + } + Ok(()) + } +} + +/// Deal with network bridge updates and track what needs to be tracked +/// which depends on the message type received. +async fn handle_network_msg( + ctx: &mut Context, + keystore: KeyStorePtr, + state: &mut ProtocolState, + bridge_message: NetworkBridgeEvent, +) -> Result<()> +where + Context: SubsystemContext, +{ + match bridge_message { + NetworkBridgeEvent::PeerConnected(peerid, _role) => { + // insert if none already present + state.peer_views.entry(peerid).or_default(); + } + NetworkBridgeEvent::PeerDisconnected(peerid) => { + // get rid of superfluous data + state.peer_views.remove(&peerid); + } + NetworkBridgeEvent::PeerViewChange(peerid, view) => { + handle_peer_view_change(ctx, state, peerid, view).await?; + } + NetworkBridgeEvent::OurViewChange(view) => { + handle_our_view_change(ctx, keystore, state, view).await?; + } + NetworkBridgeEvent::PeerMessage(remote, msg) => { + let gossiped_availability = match msg { + protocol_v1::AvailabilityDistributionMessage::Chunk(candidate_hash, chunk) => + AvailabilityGossipMessage { candidate_hash, erasure_chunk: chunk } + }; + + process_incoming_peer_message(ctx, state, remote, gossiped_availability).await?; + } + } + Ok(()) +} + + +/// Handle the changes necessary when our view changes. +async fn handle_our_view_change( + ctx: &mut Context, + keystore: KeyStorePtr, + state: &mut ProtocolState, + view: View, +) -> Result<()> +where + Context: SubsystemContext, +{ + let old_view = std::mem::replace(&mut (state.view), view); + + // needed due to borrow rules + let view = state.view.clone(); + let added = view.difference(&old_view).collect::>(); + + // add all the relay parents and fill the cache + for added in added.iter() { + let added = **added; + let validators = query_validators(ctx, added).await?; + let validator_index = obtain_our_validator_index( + &validators, + keystore.clone(), + ); + state.add_relay_parent(ctx, added, validators, validator_index).await?; + } + + // handle all candidates + for (candidate_hash, _receipt) in state.cached_live_candidates_unioned(added) { + let per_candidate = state + .per_candidate + .entry(candidate_hash) + .or_default(); + + // assure the node has the validator role + if per_candidate.validator_index.is_none() { + continue; + }; + + // check if the availability is present in the store exists + if !query_data_availability(ctx, candidate_hash).await? { + continue; + } + + let validator_count = per_candidate.validators.len(); + + // obtain interested peers in the candidate hash + let peers: Vec = state + .peer_views + .clone() + .into_iter() + .filter(|(_peer, view)| { + // collect all direct interests of a peer w/o ancestors + state + .cached_live_candidates_unioned(view.0.iter()) + .contains_key(&candidate_hash) + }) + .map(|(peer, _view)| peer.clone()) + .collect(); + + // distribute all erasure messages to interested peers + for chunk_index in 0u32..(validator_count as u32) { + + // only the peers which did not receive this particular erasure chunk + let per_candidate = state + .per_candidate + .entry(candidate_hash) + .or_default(); + + // obtain the chunks from the cache, if not fallback + // and query the availability store + let message_id = (candidate_hash, chunk_index); + let erasure_chunk = if let Some(message) = per_candidate.message_vault.get(&chunk_index) { + message.erasure_chunk.clone() + } else if let Some(erasure_chunk) = query_chunk(ctx, candidate_hash, chunk_index as ValidatorIndex).await? { + erasure_chunk + } else { + continue; + }; + + debug_assert_eq!(erasure_chunk.index, chunk_index); + + let peers = peers + .iter() + .filter(|peer| { + // only pick those which were not sent before + !per_candidate + .sent_messages + .get(*peer) + .filter(|set| { + set.contains(&message_id) + }) + .is_some() + }) + .map(|peer| peer.clone()) + .collect::>(); + let message = AvailabilityGossipMessage { + candidate_hash, + erasure_chunk, + }; + + send_tracked_gossip_message_to_peers(ctx, per_candidate, peers, message).await?; + } + } + + // cleanup the removed relay parents and their states + let removed = old_view.difference(&view).collect::>(); + for removed in removed { + state.remove_relay_parent(&removed)?; + } + Ok(()) +} + +#[inline(always)] +async fn send_tracked_gossip_message_to_peers( + ctx: &mut Context, + per_candidate: &mut PerCandidate, + peers: Vec, + message: AvailabilityGossipMessage, +) -> Result<()> +where + Context: SubsystemContext, +{ + send_tracked_gossip_messages_to_peers(ctx, per_candidate, peers, iter::once(message)).await +} + +#[inline(always)] +async fn send_tracked_gossip_messages_to_peer( + ctx: &mut Context, + per_candidate: &mut PerCandidate, + peer: PeerId, + message_iter: impl IntoIterator, +) -> Result<()> +where + Context: SubsystemContext, +{ + send_tracked_gossip_messages_to_peers(ctx, per_candidate, vec![peer], message_iter).await +} + +async fn send_tracked_gossip_messages_to_peers( + ctx: &mut Context, + per_candidate: &mut PerCandidate, + peers: Vec, + message_iter: impl IntoIterator, +) -> Result<()> +where + Context: SubsystemContext, +{ + if peers.is_empty() { + return Ok(()) + } + for message in message_iter { + for peer in peers.iter() { + let message_id = (message.candidate_hash, message.erasure_chunk.index); + per_candidate + .sent_messages + .entry(peer.clone()) + .or_default() + .insert(message_id); + } + + per_candidate + .message_vault + .insert(message.erasure_chunk.index, message.clone()); + + let wire_message = protocol_v1::AvailabilityDistributionMessage::Chunk( + message.candidate_hash, + message.erasure_chunk, + ); + + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage( + peers.clone(), + protocol_v1::ValidationProtocol::AvailabilityDistribution(wire_message), + ), + )) + .await + .map_err::(Into::into)?; + } + + Ok(()) +} + +// Send the difference between two views which were not sent +// to that particular peer. +async fn handle_peer_view_change( + ctx: &mut Context, + state: &mut ProtocolState, + origin: PeerId, + view: View, +) -> Result<()> +where + Context: SubsystemContext, +{ + let current = state.peer_views.entry(origin.clone()).or_default(); + + let added: Vec = view.difference(&*current).cloned().collect(); + + *current = view; + + // only contains the intersection of what we are interested and + // the union of all relay parent's candidates. + let added_candidates = state.cached_live_candidates_unioned(added.iter()); + + // Send all messages we've seen before and the peer is now interested + // in to that peer. + + for (candidate_hash, _receipt) in added_candidates { + let per_candidate = state.per_candidate.entry(candidate_hash).or_default(); + + // obtain the relevant chunk indices not sent yet + let messages = ((0 as ValidatorIndex) + ..(per_candidate.validators.len() as ValidatorIndex)) + .into_iter() + .filter_map(|erasure_chunk_index: ValidatorIndex| { + let message_id = (candidate_hash, erasure_chunk_index); + + // try to pick up the message from the message vault + // so we send as much as we have + per_candidate + .message_vault + .get(&erasure_chunk_index) + .filter(|_| { + // check if that erasure chunk was already sent before + if let Some(sent_set) = per_candidate.sent_messages.get(&origin) { + if sent_set.contains(&message_id) { + return false; + } + } + true + }) + }) + .cloned() + .collect::>(); + + send_tracked_gossip_messages_to_peer(ctx, per_candidate, origin.clone(), messages).await?; + } + Ok(()) +} + +/// Obtain the first key which has a signing key. +/// Returns the index within the validator set as `ValidatorIndex`, if there exists one, +/// otherwise, `None` is returned. +fn obtain_our_validator_index( + validators: &[ValidatorId], + keystore: KeyStorePtr, +) -> Option { + let keystore = keystore.read(); + validators.iter().enumerate().find_map(|(idx, validator)| { + if keystore.has_keys(&[(validator.to_raw_vec(), PARACHAIN_KEY_TYPE_ID)]) { + Some(idx as ValidatorIndex) + } else { + None + } + }) +} + +/// Handle an incoming message from a peer. +async fn process_incoming_peer_message( + ctx: &mut Context, + state: &mut ProtocolState, + origin: PeerId, + message: AvailabilityGossipMessage, +) -> Result<()> +where + Context: SubsystemContext, +{ + // obtain the set of candidates we are interested in based on our current view + let live_candidates = state.cached_live_candidates_unioned(state.view.0.iter()); + + // check if the candidate is of interest + let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash) + { + live_candidate + } else { + return modify_reputation(ctx, origin, COST_NOT_A_LIVE_CANDIDATE).await; + }; + + // check the merkle proof + let root = &live_candidate.commitments.erasure_root; + let anticipated_hash = if let Ok(hash) = branch_hash( + root, + &message.erasure_chunk.proof, + message.erasure_chunk.index as usize, + ) { + hash + } else { + return modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + }; + + let erasure_chunk_hash = BlakeTwo256::hash(&message.erasure_chunk.chunk); + if anticipated_hash != erasure_chunk_hash { + return modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + } + + // an internal unique identifier of this message + let message_id = (message.candidate_hash, message.erasure_chunk.index); + + { + let per_candidate = state.per_candidate.entry(message_id.0.clone()).or_default(); + + // check if this particular erasure chunk was already sent by that peer before + { + let received_set = per_candidate + .received_messages + .entry(origin.clone()) + .or_default(); + if received_set.contains(&message_id) { + return modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + } else { + received_set.insert(message_id.clone()); + } + } + + // insert into known messages and change reputation + if per_candidate + .message_vault + .insert(message_id.1, message.clone()) + .is_some() + { + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await?; + } else { + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE_FIRST).await?; + + // save the chunk for our index + if let Some(validator_index) = per_candidate.validator_index { + if message.erasure_chunk.index == validator_index { + if let Err(_e) = store_chunk( + ctx, + message.candidate_hash.clone(), + message.erasure_chunk.index, + message.erasure_chunk.clone(), + ).await? { + warn!(target: TARGET, "Failed to store erasure chunk to availability store"); + } + } + } + }; + } + // condense the peers to the peers with interest on the candidate + let peers = state + .peer_views + .clone() + .into_iter() + .filter(|(_peer, view)| { + // peers view must contain the candidate hash too + state + .cached_live_candidates_unioned(view.0.iter()) + .contains_key(&message_id.0) + }) + .map(|(peer, _)| -> PeerId { peer.clone() }) + .collect::>(); + + let per_candidate = state.per_candidate.entry(message_id.0.clone()).or_default(); + + let peers = peers + .into_iter() + .filter(|peer| { + let peer: PeerId = peer.clone(); + // avoid sending duplicate messages + per_candidate + .sent_messages + .entry(peer) + .or_default() + .contains(&message_id) + }) + .collect::>(); + + // gossip that message to interested peers + send_tracked_gossip_message_to_peers(ctx, per_candidate, peers, message).await +} + +/// The bitfield distribution subsystem. +pub struct AvailabilityDistributionSubsystem { + /// Pointer to a keystore, which is required for determining this nodes validator index. + keystore: KeyStorePtr, +} + +impl AvailabilityDistributionSubsystem { + /// Number of ancestors to keep around for the relay-chain heads. + const K: usize = 3; + + /// Create a new instance of the availability distribution. + pub fn new(keystore: KeyStorePtr) -> Self { + Self { keystore } + } + + /// Start processing work as passed on from the Overseer. + async fn run(self, mut ctx: Context) -> Result<()> + where + Context: SubsystemContext, + { + // work: process incoming messages from the overseer. + let mut state = ProtocolState::default(); + loop { + let message = ctx.recv().await.map_err::(Into::into)?; + match message { + FromOverseer::Communication { + msg: AvailabilityDistributionMessage::NetworkBridgeUpdateV1(event), + } => { + if let Err(e) = handle_network_msg( + &mut ctx, + self.keystore.clone(), + &mut state, + event + ).await { + warn!( + target: TARGET, + "Failed to handle incomming network messages: {:?}", e + ); + } + } + FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: _, + deactivated: _, + })) => { + // handled at view change + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {} + FromOverseer::Signal(OverseerSignal::Conclude) => { + return Ok(()); + } + } + } + } +} + +impl Subsystem for AvailabilityDistributionSubsystem +where + Context: SubsystemContext + Sync + Send, +{ + type Metrics = (); + + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + name: "availability-distribution-subsystem", + future: Box::pin(async move { self.run(ctx) }.map(|_| ())), + } + } +} + +/// Obtain all live candidates based on an iterator of relay heads. +async fn query_live_candidates_without_ancestors( + ctx: &mut Context, + relay_parents: impl IntoIterator, +) -> Result> +where + Context: SubsystemContext, +{ + let iter = relay_parents.into_iter(); + let hint = iter.size_hint(); + + let mut live_candidates = HashSet::with_capacity(hint.1.unwrap_or(hint.0)); + for relay_parent in iter { + let paras = query_para_ids(ctx, relay_parent).await?; + for para in paras { + if let Some(ccr) = query_pending_availability(ctx, relay_parent, para).await? { + live_candidates.insert(ccr); + } + } + } + Ok(live_candidates) +} + +/// Obtain all live candidates based on an iterator or relay heads including `k` ancestors. +/// +/// Relay parent. +async fn query_live_candidates( + ctx: &mut Context, + state: &mut ProtocolState, + relay_parents: impl IntoIterator, +) -> Result> +where + Context: SubsystemContext, +{ + let iter = relay_parents.into_iter(); + let hint = iter.size_hint(); + + let capacity = hint.1.unwrap_or(hint.0) * (1 + AvailabilityDistributionSubsystem::K); + let mut live_candidates = + HashMap::::with_capacity(capacity); + + for relay_parent in iter { + + // register one of relay parents (not the ancestors) + let mut ancestors = query_up_to_k_ancestors_in_same_session( + ctx, + relay_parent, + AvailabilityDistributionSubsystem::K, + ) + .await?; + + ancestors.push(relay_parent); + + + // ancestors might overlap, so check the cache too + let unknown = ancestors + .into_iter() + .filter(|relay_parent_or_ancestor| { + // use the ones which we pulled before + // but keep the unknown relay parents + state + .receipts + .get(relay_parent_or_ancestor) + .and_then(|receipts| { + // directly extend the live_candidates with the cached value + live_candidates.extend(receipts.into_iter().map( + |(receipt_hash, receipt)| { + ( + relay_parent, + (receipt_hash.clone(), receipt.clone()), + ) + }, + )); + Some(()) + }) + .is_none() + }) + .collect::>(); + + // query the ones that were not present in the receipts cache + let receipts = query_live_candidates_without_ancestors(ctx, unknown.clone()).await?; + live_candidates.extend( + unknown.into_iter().zip( + receipts + .into_iter() + .map(|receipt| (receipt.hash(), receipt)), + ), + ); + } + Ok(live_candidates) +} + +/// Query all para IDs. +async fn query_para_ids(ctx: &mut Context, relay_parent: Hash) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + ))) + .await + .map_err::(Into::into)?; + + let all_para_ids: Vec<_> = rx + .await??; + + let occupied_para_ids = all_para_ids + .into_iter() + .filter_map(|core_state| { + if let CoreState::Occupied(occupied) = core_state { + Some(occupied.para_id) + } else { + None + } + }) + .collect(); + Ok(occupied_para_ids) +} + +/// Modify the reputation of a peer based on its behavior. +async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) -> Result<()> +where + Context: SubsystemContext, +{ + trace!( + target: TARGET, + "Reputation change of {:?} for peer {:?}", + rep, + peer + ); + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep), + )) + .await + .map_err::(Into::into) +} + +/// Query the proof of validity for a particular candidate hash. +async fn query_data_availability( + ctx: &mut Context, + candidate_hash: Hash, +) -> Result +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryDataAvailability(candidate_hash, tx), + )) + .await?; + rx.await.map_err::(Into::into) +} + + +async fn query_chunk( + ctx: &mut Context, + candidate_hash: Hash, + validator_index: ValidatorIndex, +) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx), + )) + .await?; + rx.await.map_err::(Into::into) +} + + +async fn store_chunk( + ctx: &mut Context, + candidate_hash: Hash, + validator_index: ValidatorIndex, + erasure_chunk: ErasureChunk, +) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreChunk(candidate_hash, validator_index, erasure_chunk, tx), + )).await?; + rx.await.map_err::(Into::into) +} + +/// Request the head data for a particular para. +async fn query_pending_availability( + ctx: &mut Context, + relay_parent: Hash, + para: ParaId, +) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(para, tx), + ))) + .await?; + rx.await? + .map_err::(Into::into) +} + +/// Query the validator set. +async fn query_validators( + ctx: &mut Context, + relay_parent: Hash, +) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + let query_validators = AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )); + + ctx.send_message(query_validators) + .await?; + rx.await? + .map_err::(Into::into) +} + +/// Query the hash of the `K` ancestors +async fn query_k_ancestors( + ctx: &mut Context, + relay_parent: Hash, + k: usize, +) -> Result> +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + let query_ancestors = AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: relay_parent, + k, + response_channel: tx, + }); + + ctx.send_message(query_ancestors) + .await?; + rx.await? + .map_err::(Into::into) +} + +/// Query the session index of a relay parent +async fn query_session_index_for_child( + ctx: &mut Context, + relay_parent: Hash, +) -> Result +where + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + let query_session_idx_for_child = AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )); + + ctx.send_message(query_session_idx_for_child) + .await?; + rx.await? + .map_err::(Into::into) +} + +/// Queries up to k ancestors with the constraints of equiv session +async fn query_up_to_k_ancestors_in_same_session( + ctx: &mut Context, + relay_parent: Hash, + k: usize, +) -> Result> +where + Context: SubsystemContext, +{ + // k + 1 since we always query the child's session index + // ordering is [parent, grandparent, greatgrandparent, greatgreatgrandparent, ...] + let ancestors = query_k_ancestors(ctx, relay_parent, k + 1).await?; + let desired_session = query_session_index_for_child(ctx, relay_parent).await?; + // we would only need `ancestors.len() - 1`, but the one extra could avoid a re-alloc + // if the consumer wants to push the `relay_parent` onto it too and does not hurt otherwise + let mut acc = Vec::with_capacity(ancestors.len()); + + // iterate from youngest to oldest + let mut iter = ancestors.into_iter().peekable(); + + while let Some(ancestor) = iter.next() { + if let Some(ancestor_parent) = iter.peek() { + let session = query_session_index_for_child(ctx, *ancestor_parent).await?; + if session != desired_session { + break; + } + acc.push(ancestor); + } else { + // either ended up at genesis or the blocks were + // already pruned + break; + } + } + + debug_assert!(acc.len() <= k); + Ok(acc) +} + + +#[cfg(test)] +mod tests; diff --git a/node/network/availability-distribution/src/tests.rs b/node/network/availability-distribution/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..9b2b1b35e5330825f6b28392e4bb1bec9fe20b71 --- /dev/null +++ b/node/network/availability-distribution/src/tests.rs @@ -0,0 +1,968 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use assert_matches::assert_matches; +use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; +use polkadot_primitives::v1::{ + AvailableData, BlockData, CandidateCommitments, CandidateDescriptor, GroupIndex, + GroupRotationInfo, HeadData, PersistedValidationData, OccupiedCore, + PoV, ScheduledCore, ValidatorPair, +}; +use polkadot_subsystem_testhelpers::{self as test_helpers, TimeoutExt}; +use polkadot_node_network_protocol::ObservedRole; + +use futures::{executor, future, Future}; +use futures_timer::Delay; +use smallvec::smallvec; +use std::time::Duration; + +macro_rules! view { + ( $( $hash:expr ),* $(,)? ) => [ + View(vec![ $( $hash.clone() ),* ]) + ]; + } + +macro_rules! delay { + ($delay:expr) => { + Delay::new(Duration::from_millis($delay)).await; + }; +} + +fn chunk_protocol_message(message: AvailabilityGossipMessage) + -> protocol_v1::AvailabilityDistributionMessage +{ + protocol_v1::AvailabilityDistributionMessage::Chunk( + message.candidate_hash, + message.erasure_chunk, + ) +} + +struct TestHarness { + virtual_overseer: test_helpers::TestSubsystemContextHandle, +} + +fn test_harness>( + keystore: KeyStorePtr, + test: impl FnOnce(TestHarness) -> T, +) { + let _ = env_logger::builder() + .is_test(true) + .filter( + Some("polkadot_availability_distribution"), + log::LevelFilter::Trace, + ) + .try_init(); + + let pool = sp_core::testing::TaskExecutor::new(); + + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + + let subsystem = AvailabilityDistributionSubsystem::new(keystore); + let subsystem = subsystem.run(context); + + let test_fut = test(TestHarness { virtual_overseer }); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + executor::block_on(future::select(test_fut, subsystem)); +} + +const TIMEOUT: Duration = Duration::from_millis(100); + +async fn overseer_signal( + overseer: &mut test_helpers::TestSubsystemContextHandle, + signal: OverseerSignal, +) { + delay!(50); + overseer + .send(FromOverseer::Signal(signal)) + .timeout(TIMEOUT) + .await + .expect("10ms is more than enough for sending signals."); +} + +async fn overseer_send( + overseer: &mut test_helpers::TestSubsystemContextHandle, + msg: AvailabilityDistributionMessage, +) { + log::trace!("Sending message:\n{:?}", &msg); + overseer + .send(FromOverseer::Communication { msg }) + .timeout(TIMEOUT) + .await + .expect("10ms is more than enough for sending messages."); +} + +async fn overseer_recv( + overseer: &mut test_helpers::TestSubsystemContextHandle, +) -> AllMessages { + log::trace!("Waiting for message ..."); + let msg = overseer + .recv() + .timeout(TIMEOUT) + .await + .expect("TIMEOUT is enough to recv."); + log::trace!("Received message:\n{:?}", &msg); + msg +} + +fn dummy_occupied_core(para: ParaId) -> CoreState { + CoreState::Occupied(OccupiedCore { + para_id: para, + next_up_on_available: None, + occupied_since: 0, + time_out_at: 5, + next_up_on_time_out: None, + availability: Default::default(), + group_responsible: GroupIndex::from(0), + }) +} + +use sp_keyring::Sr25519Keyring; + +#[derive(Clone)] +struct TestState { + chain_ids: Vec, + validators: Vec, + validator_public: Vec, + validator_index: Option, + validator_groups: (Vec>, GroupRotationInfo), + head_data: HashMap, + keystore: KeyStorePtr, + relay_parent: Hash, + ancestors: Vec, + availability_cores: Vec, + persisted_validation_data: PersistedValidationData, +} + +fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() +} + +impl Default for TestState { + fn default() -> Self { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + + let chain_ids = vec![chain_a, chain_b]; + + let validators = vec![ + Sr25519Keyring::Ferdie, // <- this node, role: validator + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + ]; + + let keystore = keystore::Store::new_in_memory(); + + keystore + .write() + .insert_ephemeral_from_seed::(&validators[0].to_seed()) + .expect("Insert key into keystore"); + + let validator_public = validator_pubkeys(&validators); + + let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]; + let group_rotation_info = GroupRotationInfo { + session_start_block: 0, + group_rotation_frequency: 100, + now: 1, + }; + let validator_groups = (validator_groups, group_rotation_info); + + let availability_cores = vec![ + CoreState::Scheduled(ScheduledCore { + para_id: chain_ids[0], + collator: None, + }), + CoreState::Scheduled(ScheduledCore { + para_id: chain_ids[1], + collator: None, + }), + ]; + + let mut head_data = HashMap::new(); + head_data.insert(chain_a, HeadData(vec![4, 5, 6])); + head_data.insert(chain_b, HeadData(vec![7, 8, 9])); + + let ancestors = vec![ + Hash::repeat_byte(0x44), + Hash::repeat_byte(0x33), + Hash::repeat_byte(0x22), + ]; + let relay_parent = Hash::repeat_byte(0x05); + + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + block_number: Default::default(), + hrmp_mqc_heads: Vec::new(), + }; + + let validator_index = Some((validators.len() - 1) as ValidatorIndex); + + Self { + chain_ids, + keystore, + validators, + validator_public, + validator_groups, + availability_cores, + head_data, + persisted_validation_data, + relay_parent, + ancestors, + validator_index, + } + } +} + +fn make_available_data(test: &TestState, pov: PoV) -> AvailableData { + AvailableData { + validation_data: test.persisted_validation_data.clone(), + pov, + } +} + +fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { + let available_data = make_available_data(test, pov); + + let chunks = obtain_chunks(test.validators.len(), &available_data).unwrap(); + branches(&chunks).root() +} + +fn make_valid_availability_gossip( + test: &TestState, + candidate_hash: Hash, + erasure_chunk_index: u32, + pov: PoV, +) -> AvailabilityGossipMessage { + let available_data = make_available_data(test, pov); + + let erasure_chunks = derive_erasure_chunks_with_proofs(test.validators.len(), &available_data); + + let erasure_chunk: ErasureChunk = erasure_chunks + .get(erasure_chunk_index as usize) + .expect("Must be valid or input is oob") + .clone(); + + AvailabilityGossipMessage { + candidate_hash, + erasure_chunk, + } +} + +#[derive(Default)] +struct TestCandidateBuilder { + para_id: ParaId, + head_data: HeadData, + pov_hash: Hash, + relay_parent: Hash, + erasure_root: Hash, +} + +impl TestCandidateBuilder { + fn build(self) -> CommittedCandidateReceipt { + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: self.para_id, + pov_hash: self.pov_hash, + relay_parent: self.relay_parent, + ..Default::default() + }, + commitments: CandidateCommitments { + head_data: self.head_data, + erasure_root: self.erasure_root, + ..Default::default() + }, + } + } +} + +#[test] +fn helper_integrity() { + let test_state = TestState::default(); + + let pov_block = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_hash = pov_block.hash(); + + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_hash, + erasure_root: make_erasure_root(&test_state, pov_block.clone()), + ..Default::default() + } + .build(); + + let message = + make_valid_availability_gossip(&test_state, dbg!(candidate.hash()), 2, pov_block.clone()); + + let root = dbg!(&candidate.commitments.erasure_root); + + let anticipated_hash = branch_hash( + root, + &message.erasure_chunk.proof, + dbg!(message.erasure_chunk.index as usize), + ) + .expect("Must be able to derive branch hash"); + assert_eq!( + anticipated_hash, + BlakeTwo256::hash(&message.erasure_chunk.chunk) + ); +} + +fn derive_erasure_chunks_with_proofs( + n_validators: usize, + available_data: &AvailableData, +) -> Vec { + let chunks: Vec> = obtain_chunks(n_validators, available_data).unwrap(); + + // create proofs for each erasure chunk + let branches = branches(chunks.as_ref()); + + let erasure_chunks = branches + .enumerate() + .map(|(index, (proof, chunk))| ErasureChunk { + chunk: chunk.to_vec(), + index: index as _, + proof, + }) + .collect::>(); + + erasure_chunks +} + +#[test] +fn reputation_verification() { + let test_state = TestState::default(); + + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { + mut virtual_overseer, + } = test_harness; + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_block_a = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_block_b = PoV { + block_data: BlockData(vec![45, 46, 47]), + }; + + let pov_block_c = PoV { + block_data: BlockData(vec![48, 49, 50]), + }; + + let pov_hash_a = pov_block_a.hash(); + let pov_hash_b = pov_block_b.hash(); + let pov_hash_c = pov_block_c.hash(); + + let candidates = vec![ + TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_hash_a, + erasure_root: make_erasure_root(&test_state, pov_block_a.clone()), + ..Default::default() + } + .build(), + TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash: pov_hash_b, + erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), + head_data: expected_head_data.clone(), + ..Default::default() + } + .build(), + TestCandidateBuilder { + para_id: test_state.chain_ids[1], + relay_parent: Hash::repeat_byte(0xFA), + pov_hash: pov_hash_c, + erasure_root: make_erasure_root(&test_state, pov_block_c.clone()), + head_data: test_state + .head_data + .get(&test_state.chain_ids[1]) + .unwrap() + .clone(), + ..Default::default() + } + .build(), + ]; + + let TestState { + chain_ids, + keystore: _, + validators: _, + validator_public, + validator_groups, + availability_cores, + head_data: _, + persisted_validation_data: _, + relay_parent: current, + ancestors, + validator_index: _, + } = test_state.clone(); + + let _ = validator_groups; + let _ = availability_cores; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(&peer_a, &peer_b); + + log::trace!("peer A: {:?}", peer_a); + log::trace!("peer B: {:?}", peer_b); + + log::trace!("candidate A: {:?}", candidates[0].hash()); + log::trace!("candidate B: {:?}", candidates[1].hash()); + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: smallvec![current.clone()], + deactivated: smallvec![], + }), + ) + .await; + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::OurViewChange(view![current,]), + ), + ) + .await; + + // obtain the validators per relay parent + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(validator_public.clone())).unwrap(); + } + ); + + let genesis = Hash::repeat_byte(0xAA); + // query of k ancestors, we only provide one + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: relay_parent, + k, + response_channel: tx, + }) => { + assert_eq!(relay_parent, current); + assert_eq!(k, AvailabilityDistributionSubsystem::K + 1); + // 0xAA..AA will not be included, since there is no mean to determine + // its session index + tx.send(Ok(vec![ancestors[0].clone(), genesis])).unwrap(); + } + ); + + // state query for each of them + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx) + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(1 as SessionIndex)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx) + )) => { + assert_eq!(relay_parent, genesis); + tx.send(Ok(1 as SessionIndex)).unwrap(); + } + ); + + // subsystem peer id collection + // which will query the availability cores + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx) + )) => { + assert_eq!(relay_parent, ancestors[0]); + // respond with a set of availability core states + tx.send(Ok(vec![ + dummy_occupied_core(chain_ids[0]), + dummy_occupied_core(chain_ids[1]) + ])).unwrap(); + } + ); + + // now each of the relay parents in the view (1) will + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(para, tx) + )) => { + assert_eq!(relay_parent, ancestors[0]); + assert_eq!(para, chain_ids[0]); + tx.send(Ok(Some( + candidates[0].clone() + ))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(para, tx) + )) => { + assert_eq!(relay_parent, ancestors[0]); + assert_eq!(para, chain_ids[1]); + tx.send(Ok(Some( + candidates[1].clone() + ))).unwrap(); + } + ); + + for _ in 0usize..1 { + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(vec![ + CoreState::Occupied(OccupiedCore { + para_id: chain_ids[0].clone(), + next_up_on_available: None, + occupied_since: 0, + time_out_at: 10, + next_up_on_time_out: None, + availability: Default::default(), + group_responsible: GroupIndex::from(0), + }), + CoreState::Free, + CoreState::Free, + CoreState::Occupied(OccupiedCore { + para_id: chain_ids[1].clone(), + next_up_on_available: None, + occupied_since: 1, + time_out_at: 7, + next_up_on_time_out: None, + availability: Default::default(), + group_responsible: GroupIndex::from(0), + }), + CoreState::Free, + CoreState::Free, + ])).unwrap(); + } + ); + + // query the availability cores for each of the paras (2) + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(para, tx), + ) + ) => { + assert_eq!(para, chain_ids[0]); + tx.send(Ok(Some( + candidates[0].clone() + ))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(para, tx), + )) => { + assert_eq!(para, chain_ids[1]); + tx.send(Ok(Some( + candidates[1].clone() + ))).unwrap(); + } + ); + } + + let mut candidates2 = candidates.clone(); + // check if the availability store can provide the desired erasure chunks + for i in 0usize..2 { + log::trace!("0000"); + let avail_data = make_available_data(&test_state, pov_block_a.clone()); + let chunks = + derive_erasure_chunks_with_proofs(test_state.validators.len(), &avail_data); + + let expected; + // store the chunk to the av store + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryDataAvailability( + candidate_hash, + tx, + ) + ) => { + let index = candidates2.iter().enumerate().find(|x| { x.1.hash() == candidate_hash }).map(|x| x.0).unwrap(); + expected = dbg!(candidates2.swap_remove(index).hash()); + tx.send( + i == 0 + ).unwrap(); + } + ); + + assert_eq!(chunks.len(), test_state.validators.len()); + + log::trace!("xxxx"); + // retrieve a stored chunk + for (j, chunk) in chunks.into_iter().enumerate() { + log::trace!("yyyy i={}, j={}", i, j); + if i != 0 { + // not a validator, so this never happens + break; + } + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryChunk( + candidate_hash, + idx, + tx, + ) + ) => { + assert_eq!(candidate_hash, expected); + assert_eq!(j as u32, chunk.index); + assert_eq!(idx, j as u32); + tx.send( + Some(chunk.clone()) + ).unwrap(); + } + ); + } + } + // setup peer a with interest in current + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerConnected(peer_a.clone(), ObservedRole::Full), + ), + ) + .await; + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![current]), + ), + ) + .await; + + // setup peer b with interest in ancestor + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full), + ), + ) + .await; + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![ancestors[0]]), + ), + ) + .await; + + delay!(100); + + let valid: AvailabilityGossipMessage = make_valid_availability_gossip( + &test_state, + candidates[0].hash(), + 2, + pov_block_a.clone(), + ); + + { + // valid (first, from b) + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST); + } + ); + } + + { + // valid (duplicate, from b) + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, COST_PEER_DUPLICATE_MESSAGE); + } + ); + } + + { + // valid (second, from a) + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_VALID_MESSAGE); + } + ); + } + + // peer a is not interested in anything anymore + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![]), + ), + ) + .await; + + { + // send the a message again, so we should detect the duplicate + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_PEER_DUPLICATE_MESSAGE); + } + ); + } + + // peer b sends a message before we have the view + // setup peer a with interest in parent x + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerDisconnected(peer_b.clone()), + ), + ) + .await; + + delay!(10); + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full), + ), + ) + .await; + + { + // send another message + let valid2: AvailabilityGossipMessage = make_valid_availability_gossip( + &test_state, + candidates[2].hash(), + 1, + pov_block_c.clone(), + ); + + // send the a message before we send a view update + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + chunk_protocol_message(valid2), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_NOT_A_LIVE_CANDIDATE); + } + ); + } + }); +} + +#[test] +fn k_ancestors_in_session() { + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut virtual_overseer) = + test_helpers::make_subsystem_context::(pool); + + const DATA: &[(Hash, SessionIndex)] = &[ + (Hash::repeat_byte(0x32), 3), // relay parent + (Hash::repeat_byte(0x31), 3), // grand parent + (Hash::repeat_byte(0x30), 3), // great ... + (Hash::repeat_byte(0x20), 2), + (Hash::repeat_byte(0x12), 1), + (Hash::repeat_byte(0x11), 1), + (Hash::repeat_byte(0x10), 1), + ]; + const K: usize = 5; + + const EXPECTED: &[Hash] = &[DATA[1].0, DATA[2].0]; + + let test_fut = async move { + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: relay_parent, + k, + response_channel: tx, + }) => { + assert_eq!(k, K+1); + assert_eq!(relay_parent, DATA[0].0); + tx.send(Ok(DATA[1..=k].into_iter().map(|x| x.0).collect::>())).unwrap(); + } + ); + + // query the desired session index of the relay parent + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert_eq!(relay_parent, DATA[0].0); + let session: SessionIndex = DATA[0].1; + tx.send(Ok(session)).unwrap(); + } + ); + + // query ancestors + for i in 2usize..=(EXPECTED.len() + 1 + 1) { + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + // query is for ancestor_parent + let x = &DATA[i]; + assert_eq!(relay_parent, x.0); + // but needs to yield ancestor_parent's child's session index + let x = &DATA[i-1]; + tx.send(Ok(x.1)).unwrap(); + } + ); + } + }; + + let sut = async move { + let ancestors = query_up_to_k_ancestors_in_same_session(&mut ctx, DATA[0].0, K) + .await + .unwrap(); + assert_eq!(ancestors, EXPECTED.to_vec()); + }; + + futures::pin_mut!(test_fut); + futures::pin_mut!(sut); + + executor::block_on(future::join(test_fut, sut).timeout(Duration::from_millis(1000))); +} diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..67c2671d41b8a146f870ad01f48b3985804b027f --- /dev/null +++ b/node/network/bitfield-distribution/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "polkadot-availability-bitfield-distribution" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +futures-timer = "3.0.2" +log = "0.4.8" +streamunordered = "0.5.1" +codec = { package="parity-scale-codec", version = "1.3.4" } +node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } +polkadot-primitives = { path = "../../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-network-bridge = { path = "../../network/bridge" } +polkadot-node-network-protocol = { path = "../../network/protocol" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +parking_lot = "0.11.0" +maplit = "1.0.2" +smol = "0.3.3" +env_logger = "0.7.1" +assert_matches = "1.3.0" diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4c832fc216e90fc67f6e470e5c077bea83d98b99 --- /dev/null +++ b/node/network/bitfield-distribution/src/lib.rs @@ -0,0 +1,1083 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The bitfield distribution +//! +//! In case this node is a validator, gossips its own signed availability bitfield +//! for a particular relay parent. +//! Independently of that, gossips on received messages from peers to other interested peers. + +use codec::{Decode, Encode}; +use futures::{channel::oneshot, FutureExt}; + +use log::{trace, warn}; +use polkadot_subsystem::messages::*; +use polkadot_subsystem::{ + ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult, +}; +use polkadot_primitives::v1::{Hash, SignedAvailabilityBitfield, SigningContext, ValidatorId}; +use polkadot_node_network_protocol::{v1 as protocol_v1, PeerId, NetworkBridgeEvent, View, ReputationChange}; +use std::collections::{HashMap, HashSet}; + +const COST_SIGNATURE_INVALID: ReputationChange = + ReputationChange::new(-100, "Bitfield signature invalid"); +const COST_VALIDATOR_INDEX_INVALID: ReputationChange = + ReputationChange::new(-100, "Bitfield validator index invalid"); +const COST_MISSING_PEER_SESSION_KEY: ReputationChange = + ReputationChange::new(-133, "Missing peer session key"); +const COST_NOT_IN_VIEW: ReputationChange = + ReputationChange::new(-51, "Not interested in that parent hash"); +const COST_PEER_DUPLICATE_MESSAGE: ReputationChange = + ReputationChange::new(-500, "Peer sent the same message multiple times"); +const BENEFIT_VALID_MESSAGE_FIRST: ReputationChange = + ReputationChange::new(15, "Valid message with new information"); +const BENEFIT_VALID_MESSAGE: ReputationChange = + ReputationChange::new(10, "Valid message"); + +/// Checked signed availability bitfield that is distributed +/// to other peers. +#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq)] +struct BitfieldGossipMessage { + /// The relay parent this message is relative to. + relay_parent: Hash, + /// The actual signed availability bitfield. + signed_availability: SignedAvailabilityBitfield, +} + +impl BitfieldGossipMessage { + fn into_validation_protocol(self) -> protocol_v1::ValidationProtocol { + protocol_v1::ValidationProtocol::BitfieldDistribution( + self.into_network_message() + ) + } + + fn into_network_message(self) + -> protocol_v1::BitfieldDistributionMessage + { + protocol_v1::BitfieldDistributionMessage::Bitfield( + self.relay_parent, + self.signed_availability, + ) + } +} + +/// Data used to track information of peers and relay parents the +/// overseer ordered us to work on. +#[derive(Default, Clone)] +struct ProtocolState { + /// track all active peers and their views + /// to determine what is relevant to them. + peer_views: HashMap, + + /// Our current view. + view: View, + + /// Additional data particular to a relay parent. + per_relay_parent: HashMap, +} + +/// Data for a particular relay parent. +#[derive(Debug, Clone, Default)] +struct PerRelayParentData { + /// Signing context for a particular relay parent. + signing_context: SigningContext, + + /// Set of validators for a particular relay parent. + validator_set: Vec, + + /// Set of validators for a particular relay parent for which we + /// received a valid `BitfieldGossipMessage`. + /// Also serves as the list of known messages for peers connecting + /// after bitfield gossips were already received. + one_per_validator: HashMap, + + /// Avoid duplicate message transmission to our peers. + message_sent_to_peer: HashMap>, + + /// Track messages that were already received by a peer + /// to prevent flooding. + message_received_from_peer: HashMap>, +} + +impl PerRelayParentData { + /// Determines if that particular message signed by a validator is needed by the given peer. + fn message_from_validator_needed_by_peer( + &self, + peer: &PeerId, + validator: &ValidatorId, + ) -> bool { + if let Some(set) = self.message_sent_to_peer.get(peer) { + !set.contains(validator) + } else { + false + } + } +} + +/// The bitfield distribution subsystem. +pub struct BitfieldDistribution; + +impl BitfieldDistribution { + /// Start processing work as passed on from the Overseer. + async fn run(mut ctx: Context) -> SubsystemResult<()> + where + Context: SubsystemContext, + { + // work: process incoming messages from the overseer and process accordingly. + let mut state = ProtocolState::default(); + loop { + let message = ctx.recv().await?; + match message { + FromOverseer::Communication { + msg: BitfieldDistributionMessage::DistributeBitfield(hash, signed_availability), + } => { + trace!(target: "bitd", "Processing DistributeBitfield"); + handle_bitfield_distribution(&mut ctx, &mut state, hash, signed_availability) + .await?; + } + FromOverseer::Communication { + msg: BitfieldDistributionMessage::NetworkBridgeUpdateV1(event), + } => { + trace!(target: "bitd", "Processing NetworkMessage"); + // a network message was received + if let Err(e) = handle_network_msg(&mut ctx, &mut state, event).await { + warn!(target: "bitd", "Failed to handle incomming network messages: {:?}", e); + } + } + FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { + for relay_parent in activated { + trace!(target: "bitd", "Start {:?}", relay_parent); + // query basic system parameters once + if let Some((validator_set, signing_context)) = + query_basics(&mut ctx, relay_parent).await? + { + // If our runtime API fails, we don't take down the node, + // but we might alter peers' reputations erroneously as a result + // of not having the correct bookkeeping. If we have lost a race + // with state pruning, it is unlikely that peers will be sending + // us anything to do with this relay-parent anyway. + let _ = state.per_relay_parent.insert( + relay_parent, + PerRelayParentData { + signing_context, + validator_set, + ..Default::default() + }, + ); + } + } + + for relay_parent in deactivated { + trace!(target: "bitd", "Stop {:?}", relay_parent); + // defer the cleanup to the view change + } + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { + trace!(target: "bitd", "Block finalized {:?}", hash); + } + FromOverseer::Signal(OverseerSignal::Conclude) => { + trace!(target: "bitd", "Conclude"); + return Ok(()); + } + } + } + } +} + +/// Modify the reputation of a peer based on its behaviour. +async fn modify_reputation( + ctx: &mut Context, + peer: PeerId, + rep: ReputationChange, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + trace!(target: "bitd", "Reputation change of {:?} for peer {:?}", rep, peer); + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep), + )) + .await +} + +/// Distribute a given valid and signature checked bitfield message. +/// +/// For this variant the source is this node. +async fn handle_bitfield_distribution( + ctx: &mut Context, + state: &mut ProtocolState, + relay_parent: Hash, + signed_availability: SignedAvailabilityBitfield, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + // Ignore anything the overseer did not tell this subsystem to work on + let mut job_data = state.per_relay_parent.get_mut(&relay_parent); + let job_data: &mut _ = if let Some(ref mut job_data) = job_data { + job_data + } else { + trace!( + target: "bitd", + "Not supposed to work on relay parent {} related data", + relay_parent + ); + + return Ok(()); + }; + let validator_set = &job_data.validator_set; + if validator_set.is_empty() { + trace!(target: "bitd", "Validator set for {:?} is empty", relay_parent); + return Ok(()); + } + + let validator_index = signed_availability.validator_index() as usize; + let validator = if let Some(validator) = validator_set.get(validator_index) { + validator.clone() + } else { + trace!(target: "bitd", "Could not find a validator for index {}", validator_index); + return Ok(()); + }; + + let peer_views = &mut state.peer_views; + let msg = BitfieldGossipMessage { + relay_parent, + signed_availability, + }; + + relay_message(ctx, job_data, peer_views, validator, msg).await?; + + Ok(()) +} + +/// Distribute a given valid and signature checked bitfield message. +/// +/// Can be originated by another subsystem or received via network from another peer. +async fn relay_message( + ctx: &mut Context, + job_data: &mut PerRelayParentData, + peer_views: &mut HashMap, + validator: ValidatorId, + message: BitfieldGossipMessage, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + // notify the overseer about a new and valid signed bitfield + ctx.send_message(AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData(ProvisionableData::Bitfield( + message.relay_parent.clone(), + message.signed_availability.clone(), + )), + )) + .await?; + + let message_sent_to_peer = &mut (job_data.message_sent_to_peer); + + // pass on the bitfield distribution to all interested peers + let interested_peers = peer_views + .iter() + .filter_map(|(peer, view)| { + // check interest in the peer in this message's relay parent + if view.contains(&message.relay_parent) { + // track the message as sent for this peer + message_sent_to_peer + .entry(peer.clone()) + .or_default() + .insert(validator.clone()); + + Some(peer.clone()) + } else { + None + } + }) + .collect::>(); + + if interested_peers.is_empty() { + trace!( + target: "bitd", + "No peers are interested in gossip for relay parent {:?}", + message.relay_parent + ); + } else { + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage( + interested_peers, + message.into_validation_protocol(), + ), + )) + .await?; + } + Ok(()) +} + +/// Handle an incoming message from a peer. +async fn process_incoming_peer_message( + ctx: &mut Context, + state: &mut ProtocolState, + origin: PeerId, + message: BitfieldGossipMessage, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + // we don't care about this, not part of our view + if !state.view.contains(&message.relay_parent) { + return modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + } + + // Ignore anything the overseer did not tell this subsystem to work on + let mut job_data = state.per_relay_parent.get_mut(&message.relay_parent); + let job_data: &mut _ = if let Some(ref mut job_data) = job_data { + job_data + } else { + return modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + }; + + let validator_set = &job_data.validator_set; + if validator_set.is_empty() { + trace!( + target: "bitd", + "Validator set for relay parent {:?} is empty", + &message.relay_parent + ); + return modify_reputation(ctx, origin, COST_MISSING_PEER_SESSION_KEY).await; + } + + // Use the (untrusted) validator index provided by the signed payload + // and see if that one actually signed the availability bitset. + let signing_context = job_data.signing_context.clone(); + let validator_index = message.signed_availability.validator_index() as usize; + let validator = if let Some(validator) = validator_set.get(validator_index) { + validator.clone() + } else { + return modify_reputation(ctx, origin, COST_VALIDATOR_INDEX_INVALID).await; + }; + + // Check if the peer already sent us a message for the validator denoted in the message earlier. + // Must be done after validator index verification, in order to avoid storing an unbounded + // number of set entries. + let received_set = job_data + .message_received_from_peer + .entry(origin.clone()) + .or_default(); + + if !received_set.contains(&validator) { + received_set.insert(validator.clone()); + } else { + return modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + }; + + if message + .signed_availability + .check_signature(&signing_context, &validator) + .is_ok() + { + let one_per_validator = &mut (job_data.one_per_validator); + + // only relay_message a message of a validator once + if one_per_validator.get(&validator).is_some() { + trace!( + target: "bitd", + "Already received a message for validator at index {}", + validator_index + ); + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await?; + return Ok(()); + } + one_per_validator.insert(validator.clone(), message.clone()); + + relay_message(ctx, job_data, &mut state.peer_views, validator, message).await?; + + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE_FIRST).await + } else { + modify_reputation(ctx, origin, COST_SIGNATURE_INVALID).await + } +} +/// Deal with network bridge updates and track what needs to be tracked +/// which depends on the message type received. +async fn handle_network_msg( + ctx: &mut Context, + state: &mut ProtocolState, + bridge_message: NetworkBridgeEvent, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + match bridge_message { + NetworkBridgeEvent::PeerConnected(peerid, _role) => { + // insert if none already present + state.peer_views.entry(peerid).or_default(); + } + NetworkBridgeEvent::PeerDisconnected(peerid) => { + // get rid of superfluous data + state.peer_views.remove(&peerid); + } + NetworkBridgeEvent::PeerViewChange(peerid, view) => { + handle_peer_view_change(ctx, state, peerid, view).await?; + } + NetworkBridgeEvent::OurViewChange(view) => { + handle_our_view_change(state, view)?; + } + NetworkBridgeEvent::PeerMessage(remote, message) => { + match message { + protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { + trace!(target: "bitd", "Received bitfield gossip from peer {:?}", &remote); + let gossiped_bitfield = BitfieldGossipMessage { + relay_parent, + signed_availability: bitfield, + }; + process_incoming_peer_message(ctx, state, remote, gossiped_bitfield).await?; + } + } + } + } + Ok(()) +} + +/// Handle the changes necassary when our view changes. +fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemResult<()> { + let old_view = std::mem::replace(&mut (state.view), view); + + for added in state.view.difference(&old_view) { + if !state.per_relay_parent.contains_key(&added) { + warn!( + target: "bitd", + "Our view contains {} but the overseer never told use we should work on this", + &added + ); + } + } + for removed in old_view.difference(&state.view) { + // cleanup relay parents we are not interested in any more + let _ = state.per_relay_parent.remove(&removed); + } + Ok(()) +} + + +// Send the difference between two views which were not sent +// to that particular peer. +async fn handle_peer_view_change( + ctx: &mut Context, + state: &mut ProtocolState, + origin: PeerId, + view: View, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + let current = state.peer_views.entry(origin.clone()).or_default(); + + let added: Vec = view.difference(&*current).cloned().collect(); + + *current = view; + + // Send all messages we've seen before and the peer is now interested + // in to that peer. + + let delta_set: Vec<(ValidatorId, BitfieldGossipMessage)> = added + .into_iter() + .filter_map(|new_relay_parent_interest| { + if let Some(job_data) = (&*state).per_relay_parent.get(&new_relay_parent_interest) { + // Send all jointly known messages for a validator (given the current relay parent) + // to the peer `origin`... + let one_per_validator = job_data.one_per_validator.clone(); + let origin = origin.clone(); + Some( + one_per_validator + .into_iter() + .filter(move |(validator, _message)| { + // ..except for the ones the peer already has + job_data.message_from_validator_needed_by_peer(&origin, validator) + }), + ) + } else { + // A relay parent is in the peers view, which is not in ours, ignore those. + None + } + }) + .flatten() + .collect(); + + for (validator, message) in delta_set.into_iter() { + send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await?; + } + + Ok(()) +} + +/// Send a gossip message and track it in the per relay parent data. +async fn send_tracked_gossip_message( + ctx: &mut Context, + state: &mut ProtocolState, + dest: PeerId, + validator: ValidatorId, + message: BitfieldGossipMessage, +) -> SubsystemResult<()> +where + Context: SubsystemContext, +{ + let job_data = if let Some(job_data) = state.per_relay_parent.get_mut(&message.relay_parent) { + job_data + } else { + return Ok(()); + }; + + let message_sent_to_peer = &mut (job_data.message_sent_to_peer); + message_sent_to_peer + .entry(dest.clone()) + .or_default() + .insert(validator.clone()); + + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage( + vec![dest], + message.into_validation_protocol(), + ), + )) + .await?; + + Ok(()) +} + +impl Subsystem for BitfieldDistribution +where + C: SubsystemContext + Sync + Send, +{ + type Metrics = (); + + fn start(self, ctx: C) -> SpawnedSubsystem { + SpawnedSubsystem { + name: "bitfield-distribution-subsystem", + future: Box::pin(async move { Self::run(ctx) }.map(|_| ())), + } + } +} + +/// Query our validator set and signing context for a particular relay parent. +async fn query_basics( + ctx: &mut Context, + relay_parent: Hash, +) -> SubsystemResult, SigningContext)>> +where + Context: SubsystemContext, +{ + let (validators_tx, validators_rx) = oneshot::channel(); + let (session_tx, session_rx) = oneshot::channel(); + + let query_validators = AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent.clone(), + RuntimeApiRequest::Validators(validators_tx), + )); + + let query_signing = AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent.clone(), + RuntimeApiRequest::SessionIndexForChild(session_tx), + )); + + ctx.send_messages(std::iter::once(query_validators).chain(std::iter::once(query_signing))) + .await?; + + match (validators_rx.await?, session_rx.await?) { + (Ok(v), Ok(s)) => Ok(Some(( + v, + SigningContext { parent_hash: relay_parent, session_index: s }, + ))), + (Err(e), _) | (_, Err(e)) => { + warn!(target: "bitd", "Failed to fetch basics from runtime API: {:?}", e); + Ok(None) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use bitvec::bitvec; + use futures::executor; + use maplit::hashmap; + use polkadot_primitives::v1::{Signed, ValidatorPair, AvailabilityBitfield}; + use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TimeoutExt}; + use sp_core::crypto::Pair; + use std::time::Duration; + use assert_matches::assert_matches; + use polkadot_node_network_protocol::ObservedRole; + + macro_rules! view { + ( $( $hash:expr ),* $(,)? ) => [ + View(vec![ $( $hash.clone() ),* ]) + ]; + } + + macro_rules! peers { + ( $( $peer:expr ),* $(,)? ) => [ + vec![ $( $peer.clone() ),* ] + ]; + } + + macro_rules! launch { + ($fut:expr) => { + $fut + .timeout(Duration::from_millis(10)) + .await + .expect("10ms is more than enough for sending messages.") + .expect("Error values should really never occur.") + }; + } + + /// A very limited state, only interested in the relay parent of the + /// given message, which must be signed by `validator` and a set of peers + /// which are also only interested in that relay parent. + fn prewarmed_state( + validator: ValidatorId, + signing_context: SigningContext, + known_message: BitfieldGossipMessage, + peers: Vec, + ) -> ProtocolState { + let relay_parent = known_message.relay_parent.clone(); + ProtocolState { + per_relay_parent: hashmap! { + relay_parent.clone() => + PerRelayParentData { + signing_context, + validator_set: vec![validator.clone()], + one_per_validator: hashmap! { + validator.clone() => known_message.clone(), + }, + message_received_from_peer: hashmap!{}, + message_sent_to_peer: hashmap!{}, + }, + }, + peer_views: peers + .into_iter() + .map(|peer| (peer, view!(relay_parent))) + .collect(), + view: view!(relay_parent), + } + } + + fn state_with_view(view: View, relay_parent: Hash) -> (ProtocolState, SigningContext, ValidatorPair) { + let mut state = ProtocolState::default(); + + let (validator_pair, _seed) = ValidatorPair::generate(); + let validator = validator_pair.public(); + + let signing_context = SigningContext { + session_index: 1, + parent_hash: relay_parent.clone(), + }; + + state.per_relay_parent = view.0.iter().map(|relay_parent| {( + relay_parent.clone(), + PerRelayParentData { + signing_context: signing_context.clone(), + validator_set: vec![validator.clone()], + one_per_validator: hashmap!{}, + message_received_from_peer: hashmap!{}, + message_sent_to_peer: hashmap!{}, + }) + }).collect(); + + state.view = view; + + (state, signing_context, validator_pair) + } + + #[test] + fn receive_invalid_signature() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + let signing_context = SigningContext { + session_index: 1, + parent_hash: hash_a.clone(), + }; + + // validator 0 key pair + let (validator_pair, _seed) = ValidatorPair::generate(); + let validator = validator_pair.public(); + + // another validator not part of the validatorset + let (mallicious, _seed) = ValidatorPair::generate(); + + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed = + Signed::::sign(payload, &signing_context, 0, &mallicious); + + let msg = BitfieldGossipMessage { + relay_parent: hash_a.clone(), + signed_availability: signed.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + let mut state = prewarmed_state( + validator.clone(), + signing_context.clone(), + msg.clone(), + vec![peer_b.clone()], + ); + + executor::block_on(async move { + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message()), + )); + + // reputation change due to invalid validator index + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, COST_SIGNATURE_INVALID) + } + ); + }); + } + + #[test] + fn receive_invalid_validator_index() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); // other + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + // validator 0 key pair + let (mut state, signing_context, validator_pair) = + state_with_view(view![hash_a, hash_b], hash_a.clone()); + + state.peer_views.insert(peer_b.clone(), view![hash_a]); + + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed = + Signed::::sign(payload, &signing_context, 42, &validator_pair); + + let msg = BitfieldGossipMessage { + relay_parent: hash_a.clone(), + signed_availability: signed.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + executor::block_on(async move { + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message()), + )); + + // reputation change due to invalid validator index + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, COST_VALIDATOR_INDEX_INVALID) + } + ); + }); + } + + #[test] + fn receive_duplicate_messages() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + // validator 0 key pair + let (mut state, signing_context, validator_pair) = + state_with_view(view![hash_a, hash_b], hash_a.clone()); + + // create a signed message by validator 0 + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed_bitfield = + Signed::::sign(payload, &signing_context, 0, &validator_pair); + + let msg = BitfieldGossipMessage { + relay_parent: hash_a.clone(), + signed_availability: signed_bitfield.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + executor::block_on(async move { + // send a first message + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(), + ), + )); + + // none of our peers has any interest in any messages + // so we do not receive a network send type message here + // but only the one for the next subsystem + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + ProvisionableData::Bitfield(hash, signed) + )) => { + assert_eq!(hash, hash_a); + assert_eq!(signed, signed_bitfield) + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST) + } + ); + + // let peer A send the same message again + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(), + ), + )); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_VALID_MESSAGE) + } + ); + + // let peer B send the initial message again + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(), + ), + )); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, COST_PEER_DUPLICATE_MESSAGE) + } + ); + }); + } + + #[test] + fn changing_view() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + // validator 0 key pair + let (mut state, signing_context, validator_pair) = state_with_view(view![hash_a, hash_b], hash_a.clone()); + + // create a signed message by validator 0 + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed_bitfield = + Signed::::sign(payload, &signing_context, 0, &validator_pair); + + let msg = BitfieldGossipMessage { + relay_parent: hash_a.clone(), + signed_availability: signed_bitfield.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + executor::block_on(async move { + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full), + )); + + // make peer b interested + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![hash_a, hash_b]), + )); + + assert!(state.peer_views.contains_key(&peer_b)); + + // recv a first message from the network + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(), + ), + )); + + // gossip to the overseer + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + ProvisionableData::Bitfield(hash, signed) + )) => { + assert_eq!(hash, hash_a); + assert_eq!(signed, signed_bitfield) + } + ); + + // gossip to the network + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage ( + peers, out_msg, + )) => { + assert_eq!(peers, peers![peer_b]); + assert_eq!(out_msg, msg.clone().into_validation_protocol()); + } + ); + + // reputation change for peer B + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST) + } + ); + + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![]), + )); + + assert!(state.peer_views.contains_key(&peer_b)); + assert_eq!( + state.peer_views.get(&peer_b).expect("Must contain value for peer B"), + &view![] + ); + + // on rx of the same message, since we are not interested, + // should give penalty + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(), + ), + )); + + // reputation change for peer B + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, COST_PEER_DUPLICATE_MESSAGE) + } + ); + + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerDisconnected(peer_b.clone()), + )); + + // we are not interested in any peers at all anymore + state.view = view![]; + + // on rx of the same message, since we are not interested, + // should give penalty + launch!(handle_network_msg( + &mut ctx, + &mut state, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(), + ), + )); + + // reputation change for peer B + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_NOT_IN_VIEW) + } + ); + + }); + } +} diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..555f158b782e75f12438345e636d8c3a23f4f3d4 --- /dev/null +++ b/node/network/bridge/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "polkadot-network-bridge" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +log = "0.4.8" +futures-timer = "3.0.2" +streamunordered = "0.5.1" +polkadot-primitives = { path = "../../../primitives" } +parity-scale-codec = "1.3.4" +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-network-protocol = { path = "../protocol" } + +[dev-dependencies] +assert_matches = "1.3.0" +parking_lot = "0.10.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..c83e8b7c8ba3779f995366596487c83c1f4c7fc6 --- /dev/null +++ b/node/network/bridge/src/lib.rs @@ -0,0 +1,1389 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Network Bridge Subsystem - protocol multiplexer for Polkadot. + +use parity_scale_codec::{Encode, Decode}; +use futures::prelude::*; +use futures::future::BoxFuture; +use futures::stream::BoxStream; +use futures::channel::oneshot; + +use sc_network::Event as NetworkEvent; +use sp_runtime::ConsensusEngineId; + +use polkadot_subsystem::{ + ActiveLeavesUpdate, FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemError, + SubsystemResult, +}; +use polkadot_subsystem::messages::{ + NetworkBridgeMessage, AllMessages, AvailabilityDistributionMessage, + BitfieldDistributionMessage, PoVDistributionMessage, StatementDistributionMessage, + CollatorProtocolMessage, +}; +use polkadot_primitives::v1::{Block, Hash, ValidatorId}; +use polkadot_node_network_protocol::{ + ObservedRole, ReputationChange, PeerId, PeerSet, View, NetworkBridgeEvent, v1 as protocol_v1 +}; + +use std::collections::hash_map::{HashMap, Entry as HEntry}; +use std::iter::ExactSizeIterator; +use std::pin::Pin; +use std::sync::Arc; + +/// The maximum amount of heads a peer is allowed to have in their view at any time. +/// +/// We use the same limit to compute the view sent to peers locally. +const MAX_VIEW_HEADS: usize = 5; + +/// The engine ID of the validation protocol. +pub const VALIDATION_PROTOCOL_ID: ConsensusEngineId = *b"pvn1"; +/// The protocol name for the validation peer-set. +pub const VALIDATION_PROTOCOL_NAME: &[u8] = b"/polkadot/validation/1"; +/// The engine ID of the collation protocol. +pub const COLLATION_PROTOCOL_ID: ConsensusEngineId = *b"pcn1"; +/// The protocol name for the collation peer-set. +pub const COLLATION_PROTOCOL_NAME: &[u8] = b"/polkadot/collation/1"; + +const MALFORMED_MESSAGE_COST: ReputationChange + = ReputationChange::new(-500, "Malformed Network-bridge message"); +const UNCONNECTED_PEERSET_COST: ReputationChange + = ReputationChange::new(-50, "Message sent to un-connected peer-set"); +const MALFORMED_VIEW_COST: ReputationChange + = ReputationChange::new(-500, "Malformed view"); + +// network bridge log target +const TARGET: &'static str = "network_bridge"; + +/// Messages received on the network. +#[derive(Debug, Encode, Decode, Clone)] +pub enum WireMessage { + /// A message from a peer on a specific protocol. + #[codec(index = "1")] + ProtocolMessage(M), + /// A view update from a peer. + #[codec(index = "2")] + ViewUpdate(View), +} + +/// Information about the notifications protocol. Should be used during network configuration +/// or shortly after startup to register the protocol with the network service. +pub fn notifications_protocol_info() -> Vec<(ConsensusEngineId, std::borrow::Cow<'static, [u8]>)> { + vec![ + (VALIDATION_PROTOCOL_ID, VALIDATION_PROTOCOL_NAME.into()), + (COLLATION_PROTOCOL_ID, COLLATION_PROTOCOL_NAME.into()), + ] +} + +/// An action to be carried out by the network. +#[derive(Debug, PartialEq)] +pub enum NetworkAction { + /// Note a change in reputation for a peer. + ReputationChange(PeerId, ReputationChange), + /// Write a notification to a given peer on the given peer-set. + WriteNotification(PeerId, PeerSet, Vec), +} + +/// An abstraction over networking for the purposes of this subsystem. +pub trait Network: Send + 'static { + /// Get a stream of all events occurring on the network. This may include events unrelated + /// to the Polkadot protocol - the user of this function should filter only for events related + /// to the [`VALIDATION_PROTOCOL_ID`](VALIDATION_PROTOCOL_ID) + /// or [`COLLATION_PROTOCOL_ID`](COLLATION_PROTOCOL_ID) + fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent>; + + /// Get access to an underlying sink for all network actions. + fn action_sink<'a>(&'a mut self) -> Pin< + Box + Send + 'a> + >; + + /// Report a given peer as either beneficial (+) or costly (-) according to the given scalar. + fn report_peer(&mut self, who: PeerId, cost_benefit: ReputationChange) + -> BoxFuture> + { + async move { + self.action_sink().send(NetworkAction::ReputationChange(who, cost_benefit)).await + }.boxed() + } + + /// Write a notification to a peer on the given peer-set's protocol. + fn write_notification(&mut self, who: PeerId, peer_set: PeerSet, message: Vec) + -> BoxFuture> + { + async move { + self.action_sink().send(NetworkAction::WriteNotification(who, peer_set, message)).await + }.boxed() + } +} + +impl Network for Arc> { + fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { + sc_network::NetworkService::event_stream(self, "polkadot-network-bridge").boxed() + } + + fn action_sink<'a>(&'a mut self) + -> Pin + Send + 'a>> + { + use futures::task::{Poll, Context}; + + // wrapper around a NetworkService to make it act like a sink. + struct ActionSink<'b>(&'b sc_network::NetworkService); + + impl<'b> Sink for ActionSink<'b> { + type Error = SubsystemError; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, action: NetworkAction) -> SubsystemResult<()> { + match action { + NetworkAction::ReputationChange(peer, cost_benefit) => self.0.report_peer( + peer, + cost_benefit, + ), + NetworkAction::WriteNotification(peer, peer_set, message) => { + match peer_set { + PeerSet::Validation => self.0.write_notification( + peer, + VALIDATION_PROTOCOL_ID, + message, + ), + PeerSet::Collation => self.0.write_notification( + peer, + COLLATION_PROTOCOL_ID, + message, + ), + } + } + } + + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + } + + Box::pin(ActionSink(&**self)) + } +} + +/// The network bridge subsystem. +pub struct NetworkBridge(N); + +impl NetworkBridge { + /// Create a new network bridge subsystem with underlying network service. + /// + /// This assumes that the network service has had the notifications protocol for the network + /// bridge already registered. See [`notifications_protocol_info`](notifications_protocol_info). + pub fn new(net_service: N) -> Self { + NetworkBridge(net_service) + } +} + +impl Subsystem for NetworkBridge + where + Net: Network, + Context: SubsystemContext, +{ + type Metrics = (); + + fn start(self, ctx: Context) -> SpawnedSubsystem { + // Swallow error because failure is fatal to the node and we log with more precision + // within `run_network`. + SpawnedSubsystem { + name: "network-bridge-subsystem", + future: run_network(self.0, ctx).map(|_| ()).boxed(), + } + } +} + +struct PeerData { + /// Latest view sent by the peer. + view: View, +} + +#[derive(Debug)] +enum Action { + SendValidationMessage(Vec, protocol_v1::ValidationProtocol), + SendCollationMessage(Vec, protocol_v1::CollationProtocol), + ConnectToValidators(PeerSet, Vec, oneshot::Sender>), + ReportPeer(PeerId, ReputationChange), + + ActiveLeaves(ActiveLeavesUpdate), + + PeerConnected(PeerSet, PeerId, ObservedRole), + PeerDisconnected(PeerSet, PeerId), + PeerMessages( + PeerId, + Vec>, + Vec>, + ), + + Abort, + Nop, +} + +fn action_from_overseer_message( + res: polkadot_subsystem::SubsystemResult>, +) -> Action { + match res { + Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(active_leaves))) + => Action::ActiveLeaves(active_leaves), + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => Action::Abort, + Ok(FromOverseer::Communication { msg }) => match msg { + NetworkBridgeMessage::ReportPeer(peer, rep) => Action::ReportPeer(peer, rep), + NetworkBridgeMessage::SendValidationMessage(peers, msg) + => Action::SendValidationMessage(peers, msg), + NetworkBridgeMessage::SendCollationMessage(peers, msg) + => Action::SendCollationMessage(peers, msg), + NetworkBridgeMessage::ConnectToValidators(peer_set, validators, res) + => Action::ConnectToValidators(peer_set, validators, res), + }, + Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) + => Action::Nop, + Err(e) => { + log::warn!(target: TARGET, "Shutting down Network Bridge due to error {:?}", e); + Action::Abort + } + } +} + +fn action_from_network_message(event: Option) -> Action { + match event { + None => { + log::info!(target: TARGET, "Shutting down Network Bridge: underlying event stream concluded"); + Action::Abort + } + Some(NetworkEvent::Dht(_)) => Action::Nop, + Some(NetworkEvent::NotificationStreamOpened { remote, engine_id, role }) => { + let role = role.into(); + match engine_id { + x if x == VALIDATION_PROTOCOL_ID + => Action::PeerConnected(PeerSet::Validation, remote, role), + x if x == COLLATION_PROTOCOL_ID + => Action::PeerConnected(PeerSet::Collation, remote, role), + _ => Action::Nop, + } + } + Some(NetworkEvent::NotificationStreamClosed { remote, engine_id }) => { + match engine_id { + x if x == VALIDATION_PROTOCOL_ID + => Action::PeerDisconnected(PeerSet::Validation, remote), + x if x == COLLATION_PROTOCOL_ID + => Action::PeerDisconnected(PeerSet::Collation, remote), + _ => Action::Nop, + } + } + Some(NetworkEvent::NotificationsReceived { remote, messages }) => { + let v_messages: Result, _> = messages.iter() + .filter(|(engine_id, _)| engine_id == &VALIDATION_PROTOCOL_ID) + .map(|(_, msg_bytes)| WireMessage::decode(&mut msg_bytes.as_ref())) + .collect(); + + let v_messages = match v_messages { + Err(_) => return Action::ReportPeer(remote, MALFORMED_MESSAGE_COST), + Ok(v) => v, + }; + + let c_messages: Result, _> = messages.iter() + .filter(|(engine_id, _)| engine_id == &COLLATION_PROTOCOL_ID) + .map(|(_, msg_bytes)| WireMessage::decode(&mut msg_bytes.as_ref())) + .collect(); + + match c_messages { + Err(_) => Action::ReportPeer(remote, MALFORMED_MESSAGE_COST), + Ok(c_messages) => if v_messages.is_empty() && c_messages.is_empty() { + Action::Nop + } else { + Action::PeerMessages(remote, v_messages, c_messages) + }, + } + } + } +} + +fn construct_view(live_heads: &[Hash]) -> View { + View(live_heads.iter().rev().take(MAX_VIEW_HEADS).cloned().collect()) +} + +async fn update_view( + net: &mut impl Network, + ctx: &mut impl SubsystemContext, + live_heads: &[Hash], + local_view: &mut View, + validation_peers: &HashMap, + collation_peers: &HashMap, +) -> SubsystemResult<()> { + let new_view = construct_view(live_heads); + if *local_view == new_view { return Ok(()) } + + *local_view = new_view.clone(); + + send_validation_message( + net, + validation_peers.keys().cloned(), + WireMessage::ViewUpdate(new_view.clone()), + ).await?; + + send_collation_message( + net, + collation_peers.keys().cloned(), + WireMessage::ViewUpdate(new_view.clone()), + ).await?; + + if let Err(e) = dispatch_validation_event_to_all( + NetworkBridgeEvent::OurViewChange(new_view.clone()), + ctx, + ).await { + log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + return Err(e) + } + + if let Err(e) = dispatch_collation_event_to_all( + NetworkBridgeEvent::OurViewChange(new_view.clone()), + ctx, + ).await { + log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); + return Err(e) + } + + Ok(()) +} + +// Handle messages on a specific peer-set. The peer is expected to be connected on that +// peer-set. +async fn handle_peer_messages( + peer: PeerId, + peers: &mut HashMap, + messages: Vec>, + net: &mut impl Network, +) -> SubsystemResult>> { + let peer_data = match peers.get_mut(&peer) { + None => { + net.report_peer(peer, UNCONNECTED_PEERSET_COST).await?; + + return Ok(Vec::new()); + }, + Some(d) => d, + }; + + let mut outgoing_messages = Vec::with_capacity(messages.len()); + for message in messages { + outgoing_messages.push(match message { + WireMessage::ViewUpdate(new_view) => { + if new_view.0.len() > MAX_VIEW_HEADS { + net.report_peer( + peer.clone(), + MALFORMED_VIEW_COST, + ).await?; + + continue + } else if new_view == peer_data.view { + continue + } else { + peer_data.view = new_view; + + NetworkBridgeEvent::PeerViewChange( + peer.clone(), + peer_data.view.clone(), + ) + } + } + WireMessage::ProtocolMessage(message) => { + NetworkBridgeEvent::PeerMessage(peer.clone(), message) + } + }) + } + + Ok(outgoing_messages) +} + +async fn send_validation_message( + net: &mut impl Network, + peers: I, + message: WireMessage, +) -> SubsystemResult<()> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + send_message(net, peers, PeerSet::Validation, message).await +} + +async fn send_collation_message( + net: &mut impl Network, + peers: I, + message: WireMessage, +) -> SubsystemResult<()> + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + send_message(net, peers, PeerSet::Collation, message).await +} + +async fn send_message( + net: &mut impl Network, + peers: I, + peer_set: PeerSet, + message: WireMessage, +) -> SubsystemResult<()> + where + M: Encode + Clone, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let mut message_producer = stream::iter({ + let peers = peers.into_iter(); + let n_peers = peers.len(); + let mut message = Some(message.encode()); + + peers.enumerate().map(move |(i, peer)| { + // optimization: avoid cloning the message for the last peer in the + // list. The message payload can be quite large. If the underlying + // network used `Bytes` this would not be necessary. + let message = if i == n_peers - 1 { + message.take() + .expect("Only taken in last iteration of loop, never afterwards; qed") + } else { + message.as_ref() + .expect("Only taken in last iteration of loop, we are not there yet; qed") + .clone() + }; + + Ok(NetworkAction::WriteNotification(peer, peer_set, message)) + }) + }); + + net.action_sink().send_all(&mut message_producer).await +} + +async fn dispatch_validation_event_to_all( + event: NetworkBridgeEvent, + ctx: &mut impl SubsystemContext, +) -> SubsystemResult<()> { + dispatch_validation_events_to_all(std::iter::once(event), ctx).await +} + +async fn dispatch_collation_event_to_all( + event: NetworkBridgeEvent, + ctx: &mut impl SubsystemContext, +) -> SubsystemResult<()> { + dispatch_collation_events_to_all(std::iter::once(event), ctx).await +} + +async fn dispatch_validation_events_to_all( + events: I, + ctx: &mut impl SubsystemContext, +) -> SubsystemResult<()> + where + I: IntoIterator>, + I::IntoIter: Send, +{ + let messages_for = |event: NetworkBridgeEvent| { + let a = std::iter::once(event.focus().ok().map(|m| AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::NetworkBridgeUpdateV1(m) + ))); + + let b = std::iter::once(event.focus().ok().map(|m| AllMessages::BitfieldDistribution( + BitfieldDistributionMessage::NetworkBridgeUpdateV1(m) + ))); + + let p = std::iter::once(event.focus().ok().map(|m| AllMessages::PoVDistribution( + PoVDistributionMessage::NetworkBridgeUpdateV1(m) + ))); + + let s = std::iter::once(event.focus().ok().map(|m| AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdateV1(m) + ))); + + a.chain(b).chain(p).chain(s).filter_map(|x| x) + }; + + ctx.send_messages(events.into_iter().flat_map(messages_for)).await +} + +async fn dispatch_collation_events_to_all( + events: I, + ctx: &mut impl SubsystemContext, +) -> SubsystemResult<()> + where + I: IntoIterator>, + I::IntoIter: Send, +{ + let messages_for = |event: NetworkBridgeEvent| { + event.focus().ok().map(|m| AllMessages::CollatorProtocol( + CollatorProtocolMessage::NetworkBridgeUpdateV1(m) + )) + }; + + ctx.send_messages(events.into_iter().flat_map(messages_for)).await +} + +async fn run_network( + mut net: N, + mut ctx: impl SubsystemContext, +) -> SubsystemResult<()> { + let mut event_stream = net.event_stream().fuse(); + + // Most recent heads are at the back. + let mut live_heads: Vec = Vec::with_capacity(MAX_VIEW_HEADS); + let mut local_view = View(Vec::new()); + + let mut validation_peers: HashMap = HashMap::new(); + let mut collation_peers: HashMap = HashMap::new(); + + loop { + let action = { + let subsystem_next = ctx.recv().fuse(); + let mut net_event_next = event_stream.next().fuse(); + futures::pin_mut!(subsystem_next); + + futures::select! { + subsystem_msg = subsystem_next => action_from_overseer_message(subsystem_msg), + net_event = net_event_next => action_from_network_message(net_event), + } + }; + + match action { + Action::Nop => {} + Action::Abort => return Ok(()), + + Action::SendValidationMessage(peers, msg) => send_message( + &mut net, + peers, + PeerSet::Validation, + WireMessage::ProtocolMessage(msg), + ).await?, + + Action::SendCollationMessage(peers, msg) => send_message( + &mut net, + peers, + PeerSet::Collation, + WireMessage::ProtocolMessage(msg), + ).await?, + + Action::ConnectToValidators(_peer_set, _validators, _res) => { + // TODO: https://github.com/paritytech/polkadot/issues/1461 + } + + Action::ReportPeer(peer, rep) => net.report_peer(peer, rep).await?, + + Action::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated }) => { + live_heads.extend(activated); + live_heads.retain(|h| !deactivated.contains(h)); + + update_view( + &mut net, + &mut ctx, + &live_heads, + &mut local_view, + &validation_peers, + &collation_peers, + ).await?; + } + + Action::PeerConnected(peer_set, peer, role) => { + let peer_map = match peer_set { + PeerSet::Validation => &mut validation_peers, + PeerSet::Collation => &mut collation_peers, + }; + + match peer_map.entry(peer.clone()) { + HEntry::Occupied(_) => continue, + HEntry::Vacant(vacant) => { + vacant.insert(PeerData { + view: View(Vec::new()), + }); + + let res = match peer_set { + PeerSet::Validation => dispatch_validation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer.clone(), role), + NetworkBridgeEvent::PeerViewChange( + peer, + View(Default::default()), + ), + ], + &mut ctx, + ).await, + PeerSet::Collation => dispatch_collation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer.clone(), role), + NetworkBridgeEvent::PeerViewChange( + peer, + View(Default::default()), + ), + ], + &mut ctx, + ).await, + }; + + if let Err(e) = res { + log::warn!("Aborting - Failure to dispatch messages to overseer"); + return Err(e); + } + } + } + } + Action::PeerDisconnected(peer_set, peer) => { + let peer_map = match peer_set { + PeerSet::Validation => &mut validation_peers, + PeerSet::Collation => &mut collation_peers, + }; + + if peer_map.remove(&peer).is_some() { + let res = match peer_set { + PeerSet::Validation => dispatch_validation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer), + &mut ctx, + ).await, + PeerSet::Collation => dispatch_collation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer), + &mut ctx, + ).await, + }; + + if let Err(e) = res { + log::warn!( + target: TARGET, + "Aborting - Failure to dispatch messages to overseer", + ); + return Err(e) + } + } + }, + Action::PeerMessages(peer, v_messages, c_messages) => { + if !v_messages.is_empty() { + let events = handle_peer_messages( + peer.clone(), + &mut validation_peers, + v_messages, + &mut net, + ).await?; + + if let Err(e) = dispatch_validation_events_to_all( + events, + &mut ctx, + ).await { + log::warn!( + target: TARGET, + "Aborting - Failure to dispatch messages to overseer", + ); + return Err(e) + } + } + + if !c_messages.is_empty() { + let events = handle_peer_messages( + peer.clone(), + &mut collation_peers, + c_messages, + &mut net, + ).await?; + + if let Err(e) = dispatch_collation_events_to_all( + events, + &mut ctx, + ).await { + log::warn!( + target: TARGET, + "Aborting - Failure to dispatch messages to overseer", + ); + return Err(e) + } + } + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::channel::mpsc; + use futures::executor; + + use std::sync::Arc; + use parking_lot::Mutex; + use assert_matches::assert_matches; + + use polkadot_subsystem::messages::{StatementDistributionMessage, BitfieldDistributionMessage}; + use polkadot_node_subsystem_test_helpers::{ + SingleItemSink, SingleItemStream, TestSubsystemContextHandle, + }; + use sp_keyring::Sr25519Keyring; + + // The subsystem's view of the network - only supports a single call to `event_stream`. + struct TestNetwork { + net_events: Arc>>>, + action_tx: mpsc::UnboundedSender, + } + + // The test's view of the network. This receives updates from the subsystem in the form + // of `NetworkAction`s. + struct TestNetworkHandle { + action_rx: mpsc::UnboundedReceiver, + net_tx: SingleItemSink, + } + + fn new_test_network() -> ( + TestNetwork, + TestNetworkHandle, + ) { + let (net_tx, net_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); + let (action_tx, action_rx) = mpsc::unbounded(); + + ( + TestNetwork { + net_events: Arc::new(Mutex::new(Some(net_rx))), + action_tx, + }, + TestNetworkHandle { + action_rx, + net_tx, + }, + ) + } + + fn peer_set_engine_id(peer_set: PeerSet) -> ConsensusEngineId { + match peer_set { + PeerSet::Validation => VALIDATION_PROTOCOL_ID, + PeerSet::Collation => COLLATION_PROTOCOL_ID, + } + } + + impl Network for TestNetwork { + fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { + self.net_events.lock() + .take() + .expect("Subsystem made more than one call to `event_stream`") + .boxed() + } + + fn action_sink<'a>(&'a mut self) + -> Pin + Send + 'a>> + { + Box::pin((&mut self.action_tx).sink_map_err(Into::into)) + } + } + + impl TestNetworkHandle { + // Get the next network action. + async fn next_network_action(&mut self) -> NetworkAction { + self.action_rx.next().await.expect("subsystem concluded early") + } + + // Wait for the next N network actions. + async fn next_network_actions(&mut self, n: usize) -> Vec { + let mut v = Vec::with_capacity(n); + for _ in 0..n { + v.push(self.next_network_action().await); + } + + v + } + + async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { + self.send_network_event(NetworkEvent::NotificationStreamOpened { + remote: peer, + engine_id: peer_set_engine_id(peer_set), + role: role.into(), + }).await; + } + + async fn disconnect_peer(&mut self, peer: PeerId, peer_set: PeerSet) { + self.send_network_event(NetworkEvent::NotificationStreamClosed { + remote: peer, + engine_id: peer_set_engine_id(peer_set), + }).await; + } + + async fn peer_message(&mut self, peer: PeerId, peer_set: PeerSet, message: Vec) { + self.send_network_event(NetworkEvent::NotificationsReceived { + remote: peer, + messages: vec![(peer_set_engine_id(peer_set), message.into())], + }).await; + } + + async fn send_network_event(&mut self, event: NetworkEvent) { + self.net_tx.send(event).await.expect("subsystem concluded early"); + } + } + + // network actions are sensitive to ordering of `PeerId`s within a `HashMap`, so + // we need to use this to prevent fragile reliance on peer ordering. + fn network_actions_contains(actions: &[NetworkAction], action: &NetworkAction) -> bool { + actions.iter().find(|&x| x == action).is_some() + } + + struct TestHarness { + network_handle: TestNetworkHandle, + virtual_overseer: TestSubsystemContextHandle, + } + + fn test_harness>(test: impl FnOnce(TestHarness) -> T) { + let pool = sp_core::testing::TaskExecutor::new(); + let (network, network_handle) = new_test_network(); + let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + let network_bridge = run_network( + network, + context, + ) + .map_err(|_| panic!("subsystem execution failed")) + .map(|_| ()); + + let test_fut = test(TestHarness { + network_handle, + virtual_overseer, + }); + + futures::pin_mut!(test_fut); + futures::pin_mut!(network_bridge); + + executor::block_on(future::select(test_fut, network_bridge)); + } + + async fn assert_sends_validation_event_to_all( + event: NetworkBridgeEvent, + virtual_overseer: &mut TestSubsystemContextHandle, + ) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::NetworkBridgeUpdateV1(e) + ) if e == event.focus().expect("could not focus message") + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::BitfieldDistribution( + BitfieldDistributionMessage::NetworkBridgeUpdateV1(e) + ) if e == event.focus().expect("could not focus message") + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::NetworkBridgeUpdateV1(e) + ) if e == event.focus().expect("could not focus message") + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdateV1(e) + ) if e == event.focus().expect("could not focus message") + ); + } + + async fn assert_sends_collation_event_to_all( + event: NetworkBridgeEvent, + virtual_overseer: &mut TestSubsystemContextHandle, + ) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol( + CollatorProtocolMessage::NetworkBridgeUpdateV1(e) + ) if e == event.focus().expect("could not focus message") + ) + } + + #[test] + fn sends_view_updates_to_peers() { + test_harness(|test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + network_handle.connect_peer( + peer_a.clone(), + PeerSet::Validation, + ObservedRole::Full, + ).await; + network_handle.connect_peer( + peer_b.clone(), + PeerSet::Validation, + ObservedRole::Full, + ).await; + + let hash_a = Hash::from([1; 32]); + + virtual_overseer.send( + FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(hash_a))) + ).await; + + let actions = network_handle.next_network_actions(2).await; + let wire_message = WireMessage::::ViewUpdate( + View(vec![hash_a]) + ).encode(); + + assert!(network_actions_contains( + &actions, + &NetworkAction::WriteNotification( + peer_a, + PeerSet::Validation, + wire_message.clone(), + ), + )); + + assert!(network_actions_contains( + &actions, + &NetworkAction::WriteNotification( + peer_b, + PeerSet::Validation, + wire_message.clone(), + ), + )); + }); + } + + #[test] + fn peer_view_updates_sent_via_overseer() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer = PeerId::random(); + + network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await; + + let view = View(vec![Hash::from([1u8; 32])]); + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + network_handle.peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::::ViewUpdate( + view.clone(), + ).encode(), + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), view), + &mut virtual_overseer, + ).await; + }); + } + + #[test] + fn peer_messages_sent_via_overseer() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer = PeerId::random(); + + network_handle.connect_peer( + peer.clone(), + PeerSet::Validation, + ObservedRole::Full, + ).await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + let pov_distribution_message = protocol_v1::PoVDistributionMessage::Awaiting( + [0; 32].into(), + vec![[1; 32].into()], + ); + + let message = protocol_v1::ValidationProtocol::PoVDistribution( + pov_distribution_message.clone(), + ); + + network_handle.peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(message.clone()).encode(), + ).await; + + network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; + + // PoV distribution message comes first, and the message is only sent to that subsystem. + // then a disconnection event arises that is sent to all validation networking subsystems. + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage(p, m) + ) + ) => { + assert_eq!(p, peer); + assert_eq!(m, pov_distribution_message); + } + ); + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer), + &mut virtual_overseer, + ).await; + }); + } + + #[test] + fn peer_disconnect_from_just_one_peerset() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer = PeerId::random(); + + network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + { + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + network_handle.disconnect_peer(peer.clone(), PeerSet::Validation).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer.clone()), + &mut virtual_overseer, + ).await; + + // to show that we're still connected on the collation protocol, send a view update. + + let hash_a = Hash::from([1; 32]); + + virtual_overseer.send( + FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(hash_a))) + ).await; + + let actions = network_handle.next_network_actions(1).await; + let wire_message = WireMessage::::ViewUpdate( + View(vec![hash_a]) + ).encode(); + + assert!(network_actions_contains( + &actions, + &NetworkAction::WriteNotification( + peer.clone(), + PeerSet::Collation, + wire_message.clone(), + ), + )); + }); + } + + #[test] + fn relays_collation_protocol_messages() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + network_handle.connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full).await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer_a.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + { + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer_b.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer_b.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + // peer A gets reported for sending a collation message. + + let collator_protocol_message = protocol_v1::CollatorProtocolMessage::Declare( + Sr25519Keyring::Alice.public().into() + ); + + let message = protocol_v1::CollationProtocol::CollatorProtocol( + collator_protocol_message.clone() + ); + + network_handle.peer_message( + peer_a.clone(), + PeerSet::Collation, + WireMessage::ProtocolMessage(message.clone()).encode(), + ).await; + + let actions = network_handle.next_network_actions(1).await; + assert!(network_actions_contains( + &actions, + &NetworkAction::ReputationChange( + peer_a.clone(), + UNCONNECTED_PEERSET_COST, + ), + )); + + // peer B has the message relayed. + + network_handle.peer_message( + peer_b.clone(), + PeerSet::Collation, + WireMessage::ProtocolMessage(message.clone()).encode(), + ).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol( + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage(p, m) + ) + ) => { + assert_eq!(p, peer_b); + assert_eq!(m, collator_protocol_message); + } + ); + }); + } + + #[test] + fn different_views_on_different_peer_sets() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer = PeerId::random(); + + network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + { + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + let view_a = View(vec![[1; 32].into()]); + let view_b = View(vec![[2; 32].into()]); + + network_handle.peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::::ViewUpdate(view_a.clone()).encode(), + ).await; + + network_handle.peer_message( + peer.clone(), + PeerSet::Collation, + WireMessage::::ViewUpdate(view_b.clone()).encode(), + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), view_a.clone()), + &mut virtual_overseer, + ).await; + + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), view_b.clone()), + &mut virtual_overseer, + ).await; + }); + } + + #[test] + fn send_messages_to_peers() { + test_harness(|test_harness| async move { + let TestHarness { + mut network_handle, + mut virtual_overseer, + } = test_harness; + + let peer = PeerId::random(); + + network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await; + network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + { + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerConnected(peer.clone(), ObservedRole::Full), + &mut virtual_overseer, + ).await; + + assert_sends_collation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(Default::default())), + &mut virtual_overseer, + ).await; + } + + // send a validation protocol message. + + { + let pov_distribution_message = protocol_v1::PoVDistributionMessage::Awaiting( + [0; 32].into(), + vec![[1; 32].into()], + ); + + let message = protocol_v1::ValidationProtocol::PoVDistribution( + pov_distribution_message.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { + msg: NetworkBridgeMessage::SendValidationMessage( + vec![peer.clone()], + message.clone(), + ) + }).await; + + assert_eq!( + network_handle.next_network_action().await, + NetworkAction::WriteNotification( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(message).encode(), + ) + ); + } + + // send a collation protocol message. + + { + let collator_protocol_message = protocol_v1::CollatorProtocolMessage::Declare( + Sr25519Keyring::Alice.public().into() + ); + + let message = protocol_v1::CollationProtocol::CollatorProtocol( + collator_protocol_message.clone() + ); + + virtual_overseer.send(FromOverseer::Communication { + msg: NetworkBridgeMessage::SendCollationMessage( + vec![peer.clone()], + message.clone(), + ) + }).await; + + assert_eq!( + network_handle.next_network_action().await, + NetworkAction::WriteNotification( + peer.clone(), + PeerSet::Collation, + WireMessage::ProtocolMessage(message).encode(), + ) + ); + } + }); + } +} diff --git a/node/network/pov-distribution/Cargo.toml b/node/network/pov-distribution/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cedc94732cfb832cc7206bc977815a078f95d081 --- /dev/null +++ b/node/network/pov-distribution/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "polkadot-pov-distribution" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.5" +log = "0.4.8" +futures-timer = "3.0.2" +streamunordered = "0.5.1" +polkadot-primitives = { path = "../../../primitives" } +node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } +parity-scale-codec = "1.3.4" +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-network-protocol = { path = "../../network/protocol" } + +[dev-dependencies] +parking_lot = "0.10.0" +assert_matches = "1.3.0" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..552981244d2474960eba81673d9dc155cbebe0d9 --- /dev/null +++ b/node/network/pov-distribution/src/lib.rs @@ -0,0 +1,1458 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! PoV Distribution Subsystem of Polkadot. +//! +//! This is a gossip implementation of code that is responsible for distributing PoVs +//! among validators. + +use polkadot_primitives::v1::{Hash, PoV, CandidateDescriptor}; +use polkadot_subsystem::{ + ActiveLeavesUpdate, OverseerSignal, SubsystemContext, Subsystem, SubsystemResult, FromOverseer, SpawnedSubsystem, +}; +use polkadot_subsystem::messages::{ + PoVDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, AllMessages, NetworkBridgeMessage, +}; +use polkadot_node_network_protocol::{ + v1 as protocol_v1, ReputationChange as Rep, NetworkBridgeEvent, PeerId, View, +}; + +use futures::prelude::*; +use futures::channel::oneshot; + +use std::collections::{hash_map::{Entry, HashMap}, HashSet}; +use std::sync::Arc; + +const COST_APPARENT_FLOOD: Rep = Rep::new(-500, "Peer appears to be flooding us with PoV requests"); +const COST_UNEXPECTED_POV: Rep = Rep::new(-500, "Peer sent us an unexpected PoV"); +const COST_AWAITED_NOT_IN_VIEW: Rep + = Rep::new(-100, "Peer claims to be awaiting something outside of its view"); + +const BENEFIT_FRESH_POV: Rep = Rep::new(25, "Peer supplied us with an awaited PoV"); +const BENEFIT_LATE_POV: Rep = Rep::new(10, "Peer supplied us with an awaited PoV, \ + but was not the first to do so"); + +/// The PoV Distribution Subsystem. +pub struct PoVDistribution; + +impl Subsystem for PoVDistribution + where C: SubsystemContext +{ + type Metrics = (); + + fn start(self, ctx: C) -> SpawnedSubsystem { + // Swallow error because failure is fatal to the node and we log with more precision + // within `run`. + SpawnedSubsystem { + name: "pov-distribution-subsystem", + future: run(ctx).map(|_| ()).boxed(), + } + } +} + +struct State { + relay_parent_state: HashMap, + peer_state: HashMap, + our_view: View, +} + +struct BlockBasedState { + known: HashMap>, + /// All the PoVs we are or were fetching, coupled with channels expecting the data. + /// + /// This may be an empty list, which indicates that we were once awaiting this PoV but have + /// received it already. + fetching: HashMap>>>, + n_validators: usize, +} + +#[derive(Default)] +struct PeerState { + /// A set of awaited PoV-hashes for each relay-parent in the peer's view. + awaited: HashMap>, +} + +fn awaiting_message(relay_parent: Hash, awaiting: Vec) + -> protocol_v1::ValidationProtocol +{ + protocol_v1::ValidationProtocol::PoVDistribution( + protocol_v1::PoVDistributionMessage::Awaiting(relay_parent, awaiting) + ) +} + +fn send_pov_message(relay_parent: Hash, pov_hash: Hash, pov: PoV) + -> protocol_v1::ValidationProtocol +{ + protocol_v1::ValidationProtocol::PoVDistribution( + protocol_v1::PoVDistributionMessage::SendPoV(relay_parent, pov_hash, pov) + ) +} + +/// Handles the signal. If successful, returns `true` if the subsystem should conclude, +/// `false` otherwise. +async fn handle_signal( + state: &mut State, + ctx: &mut impl SubsystemContext, + signal: OverseerSignal, +) -> SubsystemResult { + match signal { + OverseerSignal::Conclude => Ok(true), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated }) => { + for relay_parent in activated { + let (vals_tx, vals_rx) = oneshot::channel(); + ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(vals_tx), + ))).await?; + + let n_validators = match vals_rx.await? { + Ok(v) => v.len(), + Err(e) => { + log::warn!(target: "pov_distribution", + "Error fetching validators from runtime API for active leaf: {:?}", + e + ); + + // Not adding bookkeeping here might make us behave funny, but we + // shouldn't take down the node on spurious runtime API errors. + // + // and this is "behave funny" as in be bad at our job, but not in any + // slashable or security-related way. + continue; + } + }; + + state.relay_parent_state.insert(relay_parent, BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: n_validators, + }); + } + + for relay_parent in deactivated { + state.relay_parent_state.remove(&relay_parent); + } + + Ok(false) + } + OverseerSignal::BlockFinalized(_) => Ok(false), + } +} + +/// Notify peers that we are awaiting a given PoV hash. +/// +/// This only notifies peers who have the relay parent in their view. +async fn notify_all_we_are_awaiting( + peers: &mut HashMap, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + pov_hash: Hash, +) -> SubsystemResult<()> { + // We use `awaited` as a proxy for which heads are in the peer's view. + let peers_to_send: Vec<_> = peers.iter() + .filter_map(|(peer, state)| if state.awaited.contains_key(&relay_parent) { + Some(peer.clone()) + } else { + None + }) + .collect(); + + if peers_to_send.is_empty() { return Ok(()) } + + let payload = awaiting_message(relay_parent, vec![pov_hash]); + + ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + peers_to_send, + payload, + ))).await +} + +/// Notify one peer about everything we're awaiting at a given relay-parent. +async fn notify_one_we_are_awaiting_many( + peer: &PeerId, + ctx: &mut impl SubsystemContext, + relay_parent_state: &HashMap, + relay_parent: Hash, +) -> SubsystemResult<()> { + let awaiting_hashes = relay_parent_state.get(&relay_parent).into_iter().flat_map(|s| { + // Send the peer everything we are fetching at this relay-parent + s.fetching.iter() + .filter(|(_, senders)| !senders.is_empty()) // that has not been completed already. + .map(|(pov_hash, _)| *pov_hash) + }).collect::>(); + + if awaiting_hashes.is_empty() { return Ok(()) } + + let payload = awaiting_message(relay_parent, awaiting_hashes); + + ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + vec![peer.clone()], + payload, + ))).await +} + +/// Distribute a PoV to peers who are awaiting it. +async fn distribute_to_awaiting( + peers: &mut HashMap, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + pov_hash: Hash, + pov: &PoV, +) -> SubsystemResult<()> { + // Send to all peers who are awaiting the PoV and have that relay-parent in their view. + // + // Also removes it from their awaiting set. + let peers_to_send: Vec<_> = peers.iter_mut() + .filter_map(|(peer, state)| state.awaited.get_mut(&relay_parent).and_then(|awaited| { + if awaited.remove(&pov_hash) { + Some(peer.clone()) + } else { + None + } + })) + .collect(); + + if peers_to_send.is_empty() { return Ok(()) } + + let payload = send_pov_message(relay_parent, pov_hash, pov.clone()); + + ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + peers_to_send, + payload, + ))).await +} + +/// Handles a `FetchPoV` message. +async fn handle_fetch( + state: &mut State, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + descriptor: CandidateDescriptor, + response_sender: oneshot::Sender>, +) -> SubsystemResult<()> { + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { + Some(s) => s, + None => return Ok(()), + }; + + if let Some(pov) = relay_parent_state.known.get(&descriptor.pov_hash) { + let _ = response_sender.send(pov.clone()); + return Ok(()); + } + + { + match relay_parent_state.fetching.entry(descriptor.pov_hash) { + Entry::Occupied(mut e) => { + // we are already awaiting this PoV if there is an entry. + e.get_mut().push(response_sender); + return Ok(()); + } + Entry::Vacant(e) => { + e.insert(vec![response_sender]); + } + } + } + + if relay_parent_state.fetching.len() > 2 * relay_parent_state.n_validators { + log::warn!("Other subsystems have requested PoV distribution to \ + fetch more PoVs than reasonably expected: {}", relay_parent_state.fetching.len()); + return Ok(()); + } + + // Issue an `Awaiting` message to all peers with this in their view. + notify_all_we_are_awaiting( + &mut state.peer_state, + ctx, + relay_parent, + descriptor.pov_hash + ).await +} + +/// Handles a `DistributePoV` message. +async fn handle_distribute( + state: &mut State, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + descriptor: CandidateDescriptor, + pov: Arc, +) -> SubsystemResult<()> { + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { + None => return Ok(()), + Some(s) => s, + }; + + if let Some(our_awaited) = relay_parent_state.fetching.get_mut(&descriptor.pov_hash) { + // Drain all the senders, but keep the entry in the map around intentionally. + // + // It signals that we were at one point awaiting this, so we will be able to tell + // why peers are sending it to us. + for response_sender in our_awaited.drain(..) { + let _ = response_sender.send(pov.clone()); + } + } + + relay_parent_state.known.insert(descriptor.pov_hash, pov.clone()); + + distribute_to_awaiting( + &mut state.peer_state, + ctx, + relay_parent, + descriptor.pov_hash, + &*pov, + ).await +} + +/// Report a reputation change for a peer. +async fn report_peer( + ctx: &mut impl SubsystemContext, + peer: PeerId, + rep: Rep, +) -> SubsystemResult<()> { + ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(peer, rep))).await +} + +/// Handle a notification from a peer that they are awaiting some PoVs. +async fn handle_awaiting( + state: &mut State, + ctx: &mut impl SubsystemContext, + peer: PeerId, + relay_parent: Hash, + pov_hashes: Vec, +) -> SubsystemResult<()> { + if !state.our_view.0.contains(&relay_parent) { + report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await?; + return Ok(()); + } + + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { + None => { + log::warn!("PoV Distribution relay parent state out-of-sync with our view"); + return Ok(()); + } + Some(s) => s, + }; + + let peer_awaiting = match + state.peer_state.get_mut(&peer).and_then(|s| s.awaited.get_mut(&relay_parent)) + { + None => { + report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await?; + return Ok(()); + } + Some(a) => a, + }; + + let will_be_awaited = peer_awaiting.len() + pov_hashes.len(); + if will_be_awaited <= 2 * relay_parent_state.n_validators { + for pov_hash in pov_hashes { + // For all requested PoV hashes, if we have it, we complete the request immediately. + // Otherwise, we note that the peer is awaiting the PoV. + if let Some(pov) = relay_parent_state.known.get(&pov_hash) { + let payload = send_pov_message(relay_parent, pov_hash, (&**pov).clone()); + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) + )).await?; + } else { + peer_awaiting.insert(pov_hash); + } + } + } else { + report_peer(ctx, peer, COST_APPARENT_FLOOD).await?; + } + + Ok(()) +} + +/// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. +/// +/// Completes any requests awaiting that PoV. +async fn handle_incoming_pov( + state: &mut State, + ctx: &mut impl SubsystemContext, + peer: PeerId, + relay_parent: Hash, + pov_hash: Hash, + pov: PoV, +) -> SubsystemResult<()> { + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { + None => { + report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; + return Ok(()); + }, + Some(r) => r, + }; + + let pov = { + // Do validity checks and complete all senders awaiting this PoV. + let fetching = match relay_parent_state.fetching.get_mut(&pov_hash) { + None => { + report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; + return Ok(()); + } + Some(f) => f, + }; + + let hash = pov.hash(); + if hash != pov_hash { + report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; + return Ok(()); + } + + let pov = Arc::new(pov); + + if fetching.is_empty() { + // fetching is empty whenever we were awaiting something and + // it was completed afterwards. + report_peer(ctx, peer.clone(), BENEFIT_LATE_POV).await?; + } else { + // fetching is non-empty when the peer just provided us with data we needed. + report_peer(ctx, peer.clone(), BENEFIT_FRESH_POV).await?; + } + + for response_sender in fetching.drain(..) { + let _ = response_sender.send(pov.clone()); + } + + pov + }; + + // make sure we don't consider this peer as awaiting that PoV anymore. + if let Some(peer_state) = state.peer_state.get_mut(&peer) { + peer_state.awaited.remove(&pov_hash); + } + + // distribute the PoV to all other peers who are awaiting it. + distribute_to_awaiting( + &mut state.peer_state, + ctx, + relay_parent, + pov_hash, + &*pov, + ).await +} + +/// Handles a network bridge update. +async fn handle_network_update( + state: &mut State, + ctx: &mut impl SubsystemContext, + update: NetworkBridgeEvent, +) -> SubsystemResult<()> { + match update { + NetworkBridgeEvent::PeerConnected(peer, _observed_role) => { + state.peer_state.insert(peer, PeerState { awaited: HashMap::new() }); + Ok(()) + } + NetworkBridgeEvent::PeerDisconnected(peer) => { + state.peer_state.remove(&peer); + Ok(()) + } + NetworkBridgeEvent::PeerViewChange(peer_id, view) => { + if let Some(peer_state) = state.peer_state.get_mut(&peer_id) { + // prune anything not in the new view. + peer_state.awaited.retain(|relay_parent, _| view.0.contains(&relay_parent)); + + // introduce things from the new view. + for relay_parent in view.0.iter() { + if let Entry::Vacant(entry) = peer_state.awaited.entry(*relay_parent) { + entry.insert(HashSet::new()); + + // Notify the peer about everything we're awaiting at the new relay-parent. + notify_one_we_are_awaiting_many( + &peer_id, + ctx, + &state.relay_parent_state, + *relay_parent, + ).await?; + } + } + } + + Ok(()) + } + NetworkBridgeEvent::PeerMessage(peer, message) => { + match message { + protocol_v1::PoVDistributionMessage::Awaiting(relay_parent, pov_hashes) + => handle_awaiting( + state, + ctx, + peer, + relay_parent, + pov_hashes, + ).await, + protocol_v1::PoVDistributionMessage::SendPoV(relay_parent, pov_hash, pov) + => handle_incoming_pov( + state, + ctx, + peer, + relay_parent, + pov_hash, + pov, + ).await, + } + } + NetworkBridgeEvent::OurViewChange(view) => { + state.our_view = view; + Ok(()) + } + } +} + +async fn run( + mut ctx: impl SubsystemContext, +) -> SubsystemResult<()> { + let mut state = State { + relay_parent_state: HashMap::new(), + peer_state: HashMap::new(), + our_view: View(Vec::new()), + }; + + loop { + match ctx.recv().await? { + FromOverseer::Signal(signal) => if handle_signal(&mut state, &mut ctx, signal).await? { + return Ok(()); + }, + FromOverseer::Communication { msg } => match msg { + PoVDistributionMessage::FetchPoV(relay_parent, descriptor, response_sender) => + handle_fetch( + &mut state, + &mut ctx, + relay_parent, + descriptor, + response_sender, + ).await?, + PoVDistributionMessage::DistributePoV(relay_parent, descriptor, pov) => + handle_distribute( + &mut state, + &mut ctx, + relay_parent, + descriptor, + pov, + ).await?, + PoVDistributionMessage::NetworkBridgeUpdateV1(event) => + handle_network_update( + &mut state, + &mut ctx, + event, + ).await?, + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::executor; + use polkadot_primitives::v1::BlockData; + use assert_matches::assert_matches; + + fn make_pov(data: Vec) -> PoV { + PoV { block_data: BlockData(data) } + } + + fn make_peer_state(awaited: Vec<(Hash, Vec)>) + -> PeerState + { + PeerState { + awaited: awaited.into_iter().map(|(rp, h)| (rp, h.into_iter().collect())).collect() + } + } + + #[test] + fn distributes_to_those_awaiting_and_completes_local() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + let (pov_send, pov_recv) = oneshot::channel(); + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + b.fetching.insert(pov_hash, vec![pov_send]); + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + // peer A has hash_a in its view and is awaiting the PoV. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![pov_hash])]), + ); + + // peer B has hash_a in its view but is not awaiting. + s.insert( + peer_b.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + // peer C doesn't have hash_a in its view but is awaiting the PoV under hash_b. + s.insert( + peer_c.clone(), + make_peer_state(vec![(hash_b, vec![pov_hash])]), + ); + + s + }, + our_view: View(vec![hash_a, hash_b]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov_hash; + + executor::block_on(async move { + handle_distribute( + &mut state, + &mut ctx, + hash_a, + descriptor, + Arc::new(pov.clone()), + ).await.unwrap(); + + assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash)); + assert!(state.peer_state[&peer_c].awaited[&hash_b].contains(&pov_hash)); + + // our local sender also completed + assert_eq!(&*pov_recv.await.unwrap(), &pov); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, message) + ) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!( + message, + send_pov_message(hash_a, pov_hash, pov.clone()), + ); + } + ) + }); + } + + #[test] + fn we_inform_peers_with_same_view_we_are_awaiting() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + let (pov_send, _) = oneshot::channel(); + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + // peer A has hash_a in its view. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + // peer B doesn't have hash_a in its view. + s.insert( + peer_b.clone(), + make_peer_state(vec![(hash_b, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + let mut descriptor = CandidateDescriptor::default(); + descriptor.pov_hash = pov_hash; + + executor::block_on(async move { + handle_fetch( + &mut state, + &mut ctx, + hash_a, + descriptor, + pov_send, + ).await.unwrap(); + + assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, message) + ) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!( + message, + awaiting_message(hash_a, vec![pov_hash]), + ); + } + ) + }); + } + + #[test] + fn peer_view_change_leads_to_us_informing() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + + let (pov_a_send, _) = oneshot::channel(); + + let pov_a = make_pov(vec![1, 2, 3]); + let pov_a_hash = pov_a.hash(); + + let pov_b = make_pov(vec![4, 5, 6]); + let pov_b_hash = pov_b.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + // pov_a is still being fetched, whereas the fetch of pov_b has already + // completed, as implied by the empty vector. + b.fetching.insert(pov_a_hash, vec![pov_a_send]); + b.fetching.insert(pov_b_hash, vec![]); + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + // peer A doesn't yet have hash_a in its view. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_b, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(vec![hash_a, hash_b])), + ).await.unwrap(); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, message) + ) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!( + message, + awaiting_message(hash_a, vec![pov_a_hash]), + ); + } + ) + }); + } + + #[test] + fn peer_complete_fetch_and_is_rewarded() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + let (pov_send, pov_recv) = oneshot::channel(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + // pov is being fetched. + b.fetching.insert(pov_hash, vec![pov_send]); + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + // peers A and B are functionally the same. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s.insert( + peer_b.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + // Peer A answers our request before peer B. + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_a, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + send_pov_message(hash_a, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + assert_eq!(&*pov_recv.await.unwrap(), &pov); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_FRESH_POV); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_LATE_POV); + } + ); + }); + } + + #[test] + fn peer_punished_for_sending_bad_pov() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + + let (pov_send, _) = oneshot::channel(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let bad_pov = make_pov(vec![6, 6, 6]); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + // pov is being fetched. + b.fetching.insert(pov_hash, vec![pov_send]); + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + // Peer A answers our request: right relay parent, awaited hash, wrong PoV. + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_a, pov_hash, bad_pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + // didn't complete our sender. + assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_UNEXPECTED_POV); + } + ); + }); + } + + #[test] + fn peer_punished_for_sending_unexpected_pov() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + // Peer A answers our request: right relay parent, awaited hash, wrong PoV. + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_a, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_UNEXPECTED_POV); + } + ); + }); + } + + #[test] + fn peer_punished_for_sending_pov_out_of_our_view() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + // Peer A answers our request: right relay parent, awaited hash, wrong PoV. + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_b, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_UNEXPECTED_POV); + } + ); + }); + } + + #[test] + fn peer_reported_for_awaiting_too_much() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + let n_validators = 10; + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators, + }; + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + let max_plausibly_awaited = n_validators * 2; + + // The peer awaits a plausible (albeit unlikely) amount of PoVs. + for i in 0..max_plausibly_awaited { + let pov_hash = make_pov(vec![i as u8; 32]).hash(); + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + awaiting_message(hash_a, vec![pov_hash]), + ).focus().unwrap(), + ).await.unwrap(); + } + + assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited); + + // The last straw: + let last_pov_hash = make_pov(vec![max_plausibly_awaited as u8; 32]).hash(); + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + awaiting_message(hash_a, vec![last_pov_hash]), + ).focus().unwrap(), + ).await.unwrap(); + + // No more bookkeeping for you! + assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_APPARENT_FLOOD); + } + ); + }); + } + + #[test] + fn peer_reported_for_awaiting_outside_their_view() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + s.insert(hash_a, BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }); + + s.insert(hash_b, BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }); + + s + }, + peer_state: { + let mut s = HashMap::new(); + + // Peer has only hash A in its view. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a, hash_b]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + let pov_hash = make_pov(vec![1, 2, 3]).hash(); + + // Hash B is in our view but not the peer's + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + awaiting_message(hash_b, vec![pov_hash]), + ).focus().unwrap(), + ).await.unwrap(); + + assert!(state.peer_state[&peer_a].awaited.get(&hash_b).is_none()); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW); + } + ); + }); + } + + #[test] + fn peer_reported_for_awaiting_outside_our_view() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + s.insert(hash_a, BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }); + + s + }, + peer_state: { + let mut s = HashMap::new(); + + // Peer has hashes A and B in their view. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![]), (hash_b, vec![])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + let pov_hash = make_pov(vec![1, 2, 3]).hash(); + + // Hash B is in peer's view but not ours. + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + awaiting_message(hash_b, vec![pov_hash]), + ).focus().unwrap(), + ).await.unwrap(); + + // Illegal `awaited` is ignored. + assert!(state.peer_state[&peer_a].awaited[&hash_b].is_empty()); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, COST_AWAITED_NOT_IN_VIEW); + } + ); + }); + } + + #[test] + fn peer_complete_fetch_leads_to_us_completing_others() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + let (pov_send, pov_recv) = oneshot::channel(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + // pov is being fetched. + b.fetching.insert(pov_hash, vec![pov_send]); + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![])]), + ); + + // peer B is awaiting peer A's request. + s.insert( + peer_b.clone(), + make_peer_state(vec![(hash_a, vec![pov_hash])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_a, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + assert_eq!(&*pov_recv.await.unwrap(), &pov); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_FRESH_POV); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, message) + ) => { + assert_eq!(peers, vec![peer_b.clone()]); + assert_eq!( + message, + send_pov_message(hash_a, pov_hash, pov.clone()), + ); + } + ); + + assert!(!state.peer_state[&peer_b].awaited[&hash_a].contains(&pov_hash)); + }); + } + + #[test] + fn peer_completing_request_no_longer_awaiting() { + let hash_a: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + + let (pov_send, pov_recv) = oneshot::channel(); + + let pov = make_pov(vec![1, 2, 3]); + let pov_hash = pov.hash(); + + let mut state = State { + relay_parent_state: { + let mut s = HashMap::new(); + let mut b = BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators: 10, + }; + + // pov is being fetched. + b.fetching.insert(pov_hash, vec![pov_send]); + + s.insert(hash_a, b); + s + }, + peer_state: { + let mut s = HashMap::new(); + + // peer A is registered as awaiting. + s.insert( + peer_a.clone(), + make_peer_state(vec![(hash_a, vec![pov_hash])]), + ); + + s + }, + our_view: View(vec![hash_a]), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + handle_network_update( + &mut state, + &mut ctx, + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + send_pov_message(hash_a, pov_hash, pov.clone()), + ).focus().unwrap(), + ).await.unwrap(); + + assert_eq!(&*pov_recv.await.unwrap(), &pov); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_FRESH_POV); + } + ); + + // We received the PoV from peer A, so we do not consider it awaited by peer A anymore. + assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash)); + }); + } +} diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..abcb6ae2adda370f3778d9c7ec476d53ae3a6337 --- /dev/null +++ b/node/network/protocol/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "polkadot-node-network-protocol" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +description = "Primitives types for the Node-side" + +[dependencies] +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +parity-scale-codec = { version = "1.3.4", default-features = false, features = ["derive"] } +runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..7658048ca5c8dbf59e024676744c1a807b7f7a9e --- /dev/null +++ b/node/network/protocol/src/lib.rs @@ -0,0 +1,269 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Network protocol types for parachains. + +use polkadot_primitives::v1::Hash; +use parity_scale_codec::{Encode, Decode}; +use std::convert::TryFrom; + +pub use sc_network::{ReputationChange, PeerId}; + +/// A unique identifier of a request. +pub type RequestId = u64; + +/// A version of the protocol. +pub type ProtocolVersion = u32; + +/// An error indicating that this the over-arching message type had the wrong variant +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct WrongVariant; + +/// The peer-sets that the network manages. Different subsystems will use different peer-sets. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum PeerSet { + /// The validation peer-set is responsible for all messages related to candidate validation and communication among validators. + Validation, + /// The collation peer-set is used for validator<>collator communication. + Collation, +} + +/// The advertised role of a node. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ObservedRole { + /// A light node. + Light, + /// A full node. + Full, + /// A node claiming to be an authority (unauthenticated) + Authority, +} + +impl From for ObservedRole { + fn from(role: sc_network::ObservedRole) -> ObservedRole { + match role { + sc_network::ObservedRole::Light => ObservedRole::Light, + sc_network::ObservedRole::Authority => ObservedRole::Authority, + sc_network::ObservedRole::Full + | sc_network::ObservedRole::OurSentry + | sc_network::ObservedRole::OurGuardedAuthority + => ObservedRole::Full, + } + } +} + +impl Into for ObservedRole { + fn into(self) -> sc_network::ObservedRole { + match self { + ObservedRole::Light => sc_network::ObservedRole::Light, + ObservedRole::Full => sc_network::ObservedRole::Full, + ObservedRole::Authority => sc_network::ObservedRole::Authority, + } + } +} + +/// Events from network. +#[derive(Debug, Clone, PartialEq)] +pub enum NetworkBridgeEvent { + /// A peer has connected. + PeerConnected(PeerId, ObservedRole), + + /// A peer has disconnected. + PeerDisconnected(PeerId), + + /// Peer has sent a message. + PeerMessage(PeerId, M), + + /// Peer's `View` has changed. + PeerViewChange(PeerId, View), + + /// Our `View` has changed. + OurViewChange(View), +} + +macro_rules! impl_try_from { + ($m_ty:ident, $variant:ident, $out:ty) => { + impl TryFrom<$m_ty> for $out { + type Error = crate::WrongVariant; + + #[allow(unreachable_patterns)] // when there is only one variant + fn try_from(x: $m_ty) -> Result<$out, Self::Error> { + match x { + $m_ty::$variant(y) => Ok(y), + _ => Err(crate::WrongVariant), + } + } + } + + impl<'a> TryFrom<&'a $m_ty> for &'a $out { + type Error = crate::WrongVariant; + + fn try_from(x: &'a $m_ty) -> Result<&'a $out, Self::Error> { + #[allow(unreachable_patterns)] // when there is only one variant + match *x { + $m_ty::$variant(ref y) => Ok(y), + _ => Err(crate::WrongVariant), + } + } + } + } +} + +impl NetworkBridgeEvent { + /// Focus an overarching network-bridge event into some more specific variant. + /// + /// This acts as a call to `clone`, except in the case where the event is a message event, + /// in which case the clone can be expensive and it only clones if the message type can + /// be focused. + pub fn focus<'a, T>(&'a self) -> Result, WrongVariant> + where T: 'a + Clone, &'a T: TryFrom<&'a M, Error = WrongVariant> + { + Ok(match *self { + NetworkBridgeEvent::PeerConnected(ref peer, ref role) + => NetworkBridgeEvent::PeerConnected(peer.clone(), role.clone()), + NetworkBridgeEvent::PeerDisconnected(ref peer) + => NetworkBridgeEvent::PeerDisconnected(peer.clone()), + NetworkBridgeEvent::PeerMessage(ref peer, ref msg) + => NetworkBridgeEvent::PeerMessage(peer.clone(), <&'a T>::try_from(msg)?.clone()), + NetworkBridgeEvent::PeerViewChange(ref peer, ref view) + => NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()), + NetworkBridgeEvent::OurViewChange(ref view) + => NetworkBridgeEvent::OurViewChange(view.clone()), + }) + } +} + +/// A succinct representation of a peer's view. This consists of a bounded amount of chain heads. +/// +/// Up to `N` (5?) chain heads. +#[derive(Default, Debug, Clone, PartialEq, Eq, Encode, Decode)] +pub struct View(pub Vec); + +impl View { + /// Returns an iterator of the hashes present in `Self` but not in `other`. + pub fn difference<'a>(&'a self, other: &'a View) -> impl Iterator + 'a { + self.0.iter().filter(move |h| !other.contains(h)) + } + + /// An iterator containing hashes present in both `Self` and in `other`. + pub fn intersection<'a>(&'a self, other: &'a View) -> impl Iterator + 'a { + self.0.iter().filter(move |h| other.contains(h)) + } + + /// Whether the view contains a given hash. + pub fn contains(&self, hash: &Hash) -> bool { + self.0.contains(hash) + } +} + +/// v1 protocol types. +pub mod v1 { + use polkadot_primitives::v1::{ + Hash, CollatorId, Id as ParaId, ErasureChunk, CandidateReceipt, + SignedAvailabilityBitfield, PoV, + }; + use polkadot_node_primitives::SignedFullStatement; + use parity_scale_codec::{Encode, Decode}; + use std::convert::TryFrom; + use super::RequestId; + + /// Network messages used by the availability distribution subsystem + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum AvailabilityDistributionMessage { + /// An erasure chunk for a given candidate hash. + #[codec(index = "0")] + Chunk(Hash, ErasureChunk), + } + + /// Network messages used by the bitfield distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum BitfieldDistributionMessage { + /// A signed availability bitfield for a given relay-parent hash. + #[codec(index = "0")] + Bitfield(Hash, SignedAvailabilityBitfield), + } + + /// Network messages used by the PoV distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum PoVDistributionMessage { + /// Notification that we are awaiting the given PoVs (by hash) against a + /// specific relay-parent hash. + #[codec(index = "0")] + Awaiting(Hash, Vec), + /// Notification of an awaited PoV, in a given relay-parent context. + /// (relay_parent, pov_hash, pov) + #[codec(index = "1")] + SendPoV(Hash, Hash, PoV), + } + + /// Network messages used by the statement distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum StatementDistributionMessage { + /// A signed full statement under a given relay-parent. + #[codec(index = "0")] + Statement(Hash, SignedFullStatement) + } + + /// Network messages used by the collator protocol subsystem + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum CollatorProtocolMessage { + /// Declare the intent to advertise collations under a collator ID. + #[codec(index = "0")] + Declare(CollatorId), + /// Advertise a collation to a validator. Can only be sent once the peer has declared + /// that they are a collator with given ID. + #[codec(index = "1")] + AdvertiseCollation(Hash, ParaId), + /// Request the advertised collation at that relay-parent. + #[codec(index = "2")] + RequestCollation(RequestId, Hash, ParaId), + /// A requested collation. + #[codec(index = "3")] + Collation(RequestId, CandidateReceipt, PoV), + } + + /// All network messages on the validation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum ValidationProtocol { + /// Availability distribution messages + #[codec(index = "0")] + AvailabilityDistribution(AvailabilityDistributionMessage), + /// Bitfield distribution messages + #[codec(index = "1")] + BitfieldDistribution(BitfieldDistributionMessage), + /// PoV Distribution messages + #[codec(index = "2")] + PoVDistribution(PoVDistributionMessage), + /// Statement distribution messages + #[codec(index = "3")] + StatementDistribution(StatementDistributionMessage), + } + + impl_try_from!(ValidationProtocol, AvailabilityDistribution, AvailabilityDistributionMessage); + impl_try_from!(ValidationProtocol, BitfieldDistribution, BitfieldDistributionMessage); + impl_try_from!(ValidationProtocol, PoVDistribution, PoVDistributionMessage); + impl_try_from!(ValidationProtocol, StatementDistribution, StatementDistributionMessage); + + /// All network messages on the collation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq)] + pub enum CollationProtocol { + /// Collator protocol messages + #[codec(index = "0")] + CollatorProtocol(CollatorProtocolMessage), + } + + impl_try_from!(CollationProtocol, CollatorProtocol, CollatorProtocolMessage); +} diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..c92fdcf6db902876b2fb271a92d88e197a6b11cd --- /dev/null +++ b/node/network/statement-distribution/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "polkadot-statement-distribution" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Statement Distribution Subsystem" +edition = "2018" + +[dependencies] +futures = "0.3.5" +log = "0.4.8" +futures-timer = "3.0.2" +streamunordered = "0.5.1" +polkadot-primitives = { path = "../../../primitives" } +node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } +parity-scale-codec = "1.3.4" +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-network-protocol = { path = "../../network/protocol" } +arrayvec = "0.5.1" +indexmap = "1.4.0" + +[dev-dependencies] +parking_lot = "0.10.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +assert_matches = "1.3.0" +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..cbdc8e5845a03d85fc413adeeb3c9f0819c5e544 --- /dev/null +++ b/node/network/statement-distribution/src/lib.rs @@ -0,0 +1,1385 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Statement Distribution Subsystem. +//! +//! This is responsible for distributing signed statements about candidate +//! validity amongst validators. + +use polkadot_subsystem::{ + Subsystem, SubsystemResult, SubsystemContext, SpawnedSubsystem, + ActiveLeavesUpdate, FromOverseer, OverseerSignal, +}; +use polkadot_subsystem::messages::{ + AllMessages, NetworkBridgeMessage, StatementDistributionMessage, CandidateBackingMessage, + RuntimeApiMessage, RuntimeApiRequest, +}; +use node_primitives::SignedFullStatement; +use polkadot_primitives::v1::{ + Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature, +}; +use polkadot_node_network_protocol::{ + v1 as protocol_v1, View, PeerId, ReputationChange as Rep, NetworkBridgeEvent, +}; + +use futures::prelude::*; +use futures::channel::oneshot; +use indexmap::IndexSet; + +use std::collections::{HashMap, HashSet}; + +const COST_UNEXPECTED_STATEMENT: Rep = Rep::new(-100, "Unexpected Statement"); +const COST_INVALID_SIGNATURE: Rep = Rep::new(-500, "Invalid Statement Signature"); +const COST_DUPLICATE_STATEMENT: Rep = Rep::new(-250, "Statement sent more than once by peer"); +const COST_APPARENT_FLOOD: Rep = Rep::new(-1000, "Peer appears to be flooding us with statements"); + +const BENEFIT_VALID_STATEMENT: Rep = Rep::new(5, "Peer provided a valid statement"); +const BENEFIT_VALID_STATEMENT_FIRST: Rep = Rep::new( + 25, + "Peer was the first to provide a valid statement", +); + +/// The maximum amount of candidates each validator is allowed to second at any relay-parent. +/// Short for "Validator Candidate Threshold". +/// +/// This is the amount of candidates we keep per validator at any relay-parent. +/// Typically we will only keep 1, but when a validator equivocates we will need to track 2. +const VC_THRESHOLD: usize = 2; + +/// The statement distribution subsystem. +pub struct StatementDistribution; + +impl Subsystem for StatementDistribution + where C: SubsystemContext +{ + type Metrics = (); + + fn start(self, ctx: C) -> SpawnedSubsystem { + // Swallow error because failure is fatal to the node and we log with more precision + // within `run`. + SpawnedSubsystem { + name: "statement-distribution-subsystem", + future: run(ctx).map(|_| ()).boxed(), + } + } +} + +/// Tracks our impression of a single peer's view of the candidates a validator has seconded +/// for a given relay-parent. +/// +/// It is expected to receive at most `VC_THRESHOLD` from us and be aware of at most `VC_THRESHOLD` +/// via other means. +#[derive(Default)] +struct VcPerPeerTracker { + local_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, + remote_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, +} + +impl VcPerPeerTracker { + // Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + // based on a message that we have sent it from our local pool. + fn note_local(&mut self, h: Hash) { + if !note_hash(&mut self.local_observed, h) { + log::warn!("Statement distribution is erroneously attempting to distribute more \ + than {} candidate(s) per validator index. Ignoring", VC_THRESHOLD); + } + } + + // Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + // based on a message that it has sent us. + // + // Returns `true` if the peer was allowed to send us such a message, `false` otherwise. + fn note_remote(&mut self, h: Hash) -> bool { + note_hash(&mut self.remote_observed, h) + } +} + +fn note_hash( + observed: &mut arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, + h: Hash, +) -> bool { + if observed.contains(&h) { return true; } + + if observed.is_full() { + false + } else { + observed.try_push(h).expect("length of storage guarded above; \ + only panics if length exceeds capacity; qed"); + + true + } +} + +/// knowledge that a peer has about goings-on in a relay parent. +#[derive(Default)] +struct PeerRelayParentKnowledge { + /// candidates that the peer is aware of. This indicates that we can + /// send other statements pertaining to that candidate. + known_candidates: HashSet, + /// fingerprints of all statements a peer should be aware of: those that + /// were sent to the peer by us. + sent_statements: HashSet<(CompactStatement, ValidatorIndex)>, + /// fingerprints of all statements a peer should be aware of: those that + /// were sent to us by the peer. + received_statements: HashSet<(CompactStatement, ValidatorIndex)>, + /// How many candidates this peer is aware of for each given validator index. + seconded_counts: HashMap, + /// How many statements we've received for each candidate that we're aware of. + received_message_count: HashMap, +} + +impl PeerRelayParentKnowledge { + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on something that we would like to send to the peer. + /// + /// This returns `None` if the peer cannot accept this statement, without altering internal + /// state. + /// + /// If the peer can accept the statement, this returns `Some` and updates the internal state. + /// Once the knowledge has incorporated a statement, it cannot be incorporated again. + /// + /// This returns `Some(true)` if this is the first time the peer has become aware of a + /// candidate with the given hash. + fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { + let already_known = self.sent_statements.contains(fingerprint) + || self.received_statements.contains(fingerprint); + + if already_known { + return None; + } + + let new_known = match fingerprint.0 { + CompactStatement::Candidate(ref h) => { + self.seconded_counts.entry(fingerprint.1) + .or_default() + .note_local(h.clone()); + + self.known_candidates.insert(h.clone()) + }, + CompactStatement::Valid(ref h) | CompactStatement::Invalid(ref h) => { + // The peer can only accept Valid and Invalid statements for which it is aware + // of the corresponding candidate. + if !self.known_candidates.contains(h) { + return None; + } + + false + } + }; + + self.sent_statements.insert(fingerprint.clone()); + + Some(new_known) + } + + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on + /// a message we are receiving from the peer. + /// + /// Provide the maximum message count that we can receive per candidate. In practice we should + /// not receive more statements for any one candidate than there are members in the group assigned + /// to that para, but this maximum needs to be lenient to account for equivocations that may be + /// cross-group. As such, a maximum of 2 * n_validators is recommended. + /// + /// This returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + /// + /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new + /// candidate, we are then cleared to send the peer further statements about that candidate. + /// + /// This returns `Ok(true)` if this is the first time the peer has become aware of a + /// candidate with given hash. + fn receive( + &mut self, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> Result { + // We don't check `sent_statements` because a statement could be in-flight from both + // sides at the same time. + if self.received_statements.contains(fingerprint) { + return Err(COST_DUPLICATE_STATEMENT); + } + + let candidate_hash = match fingerprint.0 { + CompactStatement::Candidate(ref h) => { + let allowed_remote = self.seconded_counts.entry(fingerprint.1) + .or_insert_with(Default::default) + .note_remote(h.clone()); + + if !allowed_remote { + return Err(COST_UNEXPECTED_STATEMENT); + } + + h + } + CompactStatement::Valid(ref h)| CompactStatement::Invalid(ref h) => { + if !self.known_candidates.contains(&h) { + return Err(COST_UNEXPECTED_STATEMENT); + } + + h + } + }; + + { + let received_per_candidate = self.received_message_count + .entry(candidate_hash.clone()) + .or_insert(0); + + if *received_per_candidate >= max_message_count { + return Err(COST_APPARENT_FLOOD); + } + + *received_per_candidate += 1; + } + + self.received_statements.insert(fingerprint.clone()); + Ok(self.known_candidates.insert(candidate_hash.clone())) + } +} + +struct PeerData { + view: View, + view_knowledge: HashMap, +} + +impl PeerData { + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based + /// on something that we would like to send to the peer. + /// + /// This returns `None` if the peer cannot accept this statement, without altering internal + /// state. + /// + /// If the peer can accept the statement, this returns `Some` and updates the internal state. + /// Once the knowledge has incorporated a statement, it cannot be incorporated again. + /// + /// This returns `Some(true)` if this is the first time the peer has become aware of a + /// candidate with the given hash. + fn send( + &mut self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + ) -> Option { + self.view_knowledge.get_mut(relay_parent).map_or(None, |k| k.send(fingerprint)) + } + + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on + /// a message we are receiving from the peer. + /// + /// Provide the maximum message count that we can receive per candidate. In practice we should + /// not receive more statements for any one candidate than there are members in the group assigned + /// to that para, but this maximum needs to be lenient to account for equivocations that may be + /// cross-group. As such, a maximum of 2 * n_validators is recommended. + /// + /// This returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + /// + /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new + /// candidate, we are then cleared to send the peer further statements about that candidate. + /// + /// This returns `Ok(true)` if this is the first time the peer has become aware of a + /// candidate with given hash. + fn receive( + &mut self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> Result { + self.view_knowledge.get_mut(relay_parent).ok_or(COST_UNEXPECTED_STATEMENT)? + .receive(fingerprint, max_message_count) + } +} + +// A statement stored while a relay chain head is active. +#[derive(Debug)] +struct StoredStatement { + comparator: StoredStatementComparator, + statement: SignedFullStatement, +} + +// A value used for comparison of stored statements to each other. +// +// The compact version of the statement, the validator index, and the signature of the validator +// is enough to differentiate between all types of equivocations, as long as the signature is +// actually checked to be valid. The same statement with 2 signatures and 2 statements with +// different (or same) signatures wll all be correctly judged to be unequal with this comparator. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct StoredStatementComparator { + compact: CompactStatement, + validator_index: ValidatorIndex, + signature: ValidatorSignature, +} + +impl StoredStatement { + fn compact(&self) -> &CompactStatement { + &self.comparator.compact + } + + fn fingerprint(&self) -> (CompactStatement, ValidatorIndex) { + (self.comparator.compact.clone(), self.statement.validator_index()) + } +} + +impl std::borrow::Borrow for StoredStatement { + fn borrow(&self) -> &StoredStatementComparator { + &self.comparator + } +} + +impl std::hash::Hash for StoredStatement { + fn hash(&self, state: &mut H) { + self.comparator.hash(state) + } +} + +impl std::cmp::PartialEq for StoredStatement { + fn eq(&self, other: &Self) -> bool { + &self.comparator == &other.comparator + } +} + +impl std::cmp::Eq for StoredStatement {} + +#[derive(Debug)] +enum NotedStatement<'a> { + NotUseful, + Fresh(&'a StoredStatement), + UsefulButKnown +} + +struct ActiveHeadData { + /// All candidates we are aware of for this head, keyed by hash. + candidates: HashSet, + /// Stored statements for circulation to peers. + /// + /// These are iterable in insertion order, and `Seconded` statements are always + /// accepted before dependent statements. + statements: IndexSet, + /// The validators at this head. + validators: Vec, + /// The session index this head is at. + session_index: sp_staking::SessionIndex, + /// How many `Seconded` statements we've seen per validator. + seconded_counts: HashMap, +} + +impl ActiveHeadData { + fn new(validators: Vec, session_index: sp_staking::SessionIndex) -> Self { + ActiveHeadData { + candidates: Default::default(), + statements: Default::default(), + validators, + session_index, + seconded_counts: Default::default(), + } + } + + /// Note the given statement. + /// + /// If it was not already known and can be accepted, returns `NotedStatement::Fresh`, + /// with a handle to the statement. + /// + /// If it can be accepted, but we already know it, returns `NotedStatement::UsefulButKnown`. + /// + /// We accept up to `VC_THRESHOLD` (2 at time of writing) `Seconded` statements + /// per validator. These will be the first ones we see. The statement is assumed + /// to have been checked, including that the validator index is not out-of-bounds and + /// the signature is valid. + /// + /// Any other statements or those that reference a candidate we are not aware of cannot be accepted + /// and will return `NotedStatement::NotUseful`. + fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { + let validator_index = statement.validator_index(); + let comparator = StoredStatementComparator { + compact: statement.payload().to_compact(), + validator_index, + signature: statement.signature().clone(), + }; + + let stored = StoredStatement { + comparator: comparator.clone(), + statement, + }; + + match comparator.compact { + CompactStatement::Candidate(h) => { + let seconded_so_far = self.seconded_counts.entry(validator_index).or_insert(0); + if *seconded_so_far >= VC_THRESHOLD { + return NotedStatement::NotUseful; + } + + self.candidates.insert(h); + if self.statements.insert(stored) { + *seconded_so_far += 1; + + // This will always return `Some` because it was just inserted. + NotedStatement::Fresh(self.statements.get(&comparator) + .expect("Statement was just inserted; qed")) + } else { + NotedStatement::UsefulButKnown + } + } + CompactStatement::Valid(h) | CompactStatement::Invalid(h) => { + if !self.candidates.contains(&h) { + return NotedStatement::NotUseful; + } + + if self.statements.insert(stored) { + // This will always return `Some` because it was just inserted. + NotedStatement::Fresh(self.statements.get(&comparator) + .expect("Statement was just inserted; qed")) + } else { + NotedStatement::UsefulButKnown + } + } + } + } + + /// Get an iterator over all statements for the active head. Seconded statements come first. + fn statements(&self) -> impl Iterator + '_ { + self.statements.iter() + } + + /// Get an iterator over all statements for the active head that are for a particular candidate. + fn statements_about(&self, candidate_hash: Hash) + -> impl Iterator + '_ + { + self.statements().filter(move |s| s.compact().candidate_hash() == &candidate_hash) + } +} + +/// Check a statement signature under this parent hash. +fn check_statement_signature( + head: &ActiveHeadData, + relay_parent: Hash, + statement: &SignedFullStatement, +) -> Result<(), ()> { + let signing_context = SigningContext { + session_index: head.session_index, + parent_hash: relay_parent, + }; + + head.validators.get(statement.validator_index() as usize) + .ok_or(()) + .and_then(|v| statement.check_signature(&signing_context, v)) +} + +/// Places the statement in storage if it is new, and then +/// circulates the statement to all peers who have not seen it yet, and +/// sends all statements dependent on that statement to peers who could previously not receive +/// them but now can. +async fn circulate_statement_and_dependents( + peers: &mut HashMap, + active_heads: &mut HashMap, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + statement: SignedFullStatement, +) -> SubsystemResult<()> { + if let Some(active_head)= active_heads.get_mut(&relay_parent) { + + // First circulate the statement directly to all peers needing it. + // The borrow of `active_head` needs to encompass only this (Rust) statement. + let outputs: Option<(Hash, Vec)> = { + match active_head.note_statement(statement) { + NotedStatement::Fresh(stored) => Some(( + stored.compact().candidate_hash().clone(), + circulate_statement(peers, ctx, relay_parent, stored).await?, + )), + _ => None, + } + }; + + // Now send dependent statements to all peers needing them, if any. + if let Some((candidate_hash, peers_needing_dependents)) = outputs { + for peer in peers_needing_dependents { + if let Some(peer_data) = peers.get_mut(&peer) { + // defensive: the peer data should always be some because the iterator + // of peers is derived from the set of peers. + send_statements_about( + peer, + peer_data, + ctx, + relay_parent, + candidate_hash, + &*active_head + ).await?; + } + } + } + } + + Ok(()) +} + +fn statement_message(relay_parent: Hash, statement: SignedFullStatement) + -> protocol_v1::ValidationProtocol +{ + protocol_v1::ValidationProtocol::StatementDistribution( + protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement) + ) +} + +/// Circulates a statement to all peers who have not seen it yet, and returns +/// an iterator over peers who need to have dependent statements sent. +async fn circulate_statement( + peers: &mut HashMap, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + stored: &StoredStatement, +) -> SubsystemResult> { + let fingerprint = stored.fingerprint(); + + let mut peers_to_send = HashMap::new(); + + for (peer, data) in peers.iter_mut() { + if let Some(new_known) = data.send(&relay_parent, &fingerprint) { + peers_to_send.insert(peer.clone(), new_known); + } + } + + // Send all these peers the initial statement. + if !peers_to_send.is_empty() { + let payload = statement_message(relay_parent, stored.statement.clone()); + ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + peers_to_send.keys().cloned().collect(), + payload, + ))).await?; + } + + Ok(peers_to_send.into_iter().filter_map(|(peer, needs_dependent)| if needs_dependent { + Some(peer) + } else { + None + }).collect()) +} + +/// Send all statements about a given candidate hash to a peer. +async fn send_statements_about( + peer: PeerId, + peer_data: &mut PeerData, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + candidate_hash: Hash, + active_head: &ActiveHeadData, +) -> SubsystemResult<()> { + for statement in active_head.statements_about(candidate_hash) { + if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() { + let payload = statement_message( + relay_parent, + statement.statement.clone(), + ); + + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) + )).await?; + } + } + + Ok(()) +} + +/// Send all statements at a given relay-parent to a peer. +async fn send_statements( + peer: PeerId, + peer_data: &mut PeerData, + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + active_head: &ActiveHeadData +) -> SubsystemResult<()> { + for statement in active_head.statements() { + if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() { + let payload = statement_message( + relay_parent, + statement.statement.clone(), + ); + + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) + )).await?; + } + } + + Ok(()) +} + +async fn report_peer( + ctx: &mut impl SubsystemContext, + peer: PeerId, + rep: Rep, +) -> SubsystemResult<()> { + ctx.send_message(AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + )).await +} + +// Handle an incoming wire message. Returns a reference to a newly-stored statement +// if we were not already aware of it, along with the corresponding relay-parent. +// +// This function checks the signature and ensures the statement is compatible with our +// view. +async fn handle_incoming_message<'a>( + peer: PeerId, + peer_data: &mut PeerData, + our_view: &View, + active_heads: &'a mut HashMap, + ctx: &mut impl SubsystemContext, + message: protocol_v1::StatementDistributionMessage, +) -> SubsystemResult> { + let (relay_parent, statement) = match message { + protocol_v1::StatementDistributionMessage::Statement(r, s) => (r, s), + }; + + if !our_view.contains(&relay_parent) { + return report_peer(ctx, peer, COST_UNEXPECTED_STATEMENT).await.map(|_| None); + } + + let active_head = match active_heads.get_mut(&relay_parent) { + Some(h) => h, + None => { + // This should never be out-of-sync with our view if the view updates + // correspond to actual `StartWork` messages. So we just log and ignore. + log::warn!("Our view out-of-sync with active heads. Head {} not found", relay_parent); + return Ok(None); + } + }; + + // check the signature on the statement. + if let Err(()) = check_statement_signature(&active_head, relay_parent, &statement) { + return report_peer(ctx, peer, COST_INVALID_SIGNATURE).await.map(|_| None); + } + + // Ensure the statement is stored in the peer data. + // + // Note that if the peer is sending us something that is not within their view, + // it will not be kept within their log. + let fingerprint = (statement.payload().to_compact(), statement.validator_index()); + let max_message_count = active_head.validators.len() * 2; + match peer_data.receive(&relay_parent, &fingerprint, max_message_count) { + Err(rep) => { + report_peer(ctx, peer, rep).await?; + return Ok(None) + } + Ok(true) => { + // Send the peer all statements concerning the candidate that we have, + // since it appears to have just learned about the candidate. + send_statements_about( + peer.clone(), + peer_data, + ctx, + relay_parent, + fingerprint.0.candidate_hash().clone(), + &*active_head, + ).await? + } + Ok(false) => {} + } + + // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation + // or unpinned to a seconded candidate. So it is safe to place it into the storage. + match active_head.note_statement(statement) { + NotedStatement::NotUseful => Ok(None), + NotedStatement::UsefulButKnown => { + report_peer(ctx, peer, BENEFIT_VALID_STATEMENT).await?; + Ok(None) + } + NotedStatement::Fresh(statement) => { + report_peer(ctx, peer, BENEFIT_VALID_STATEMENT_FIRST).await?; + Ok(Some((relay_parent, statement))) + } + } +} + +/// Update a peer's view. Sends all newly unlocked statements based on the previous +async fn update_peer_view_and_send_unlocked( + peer: PeerId, + peer_data: &mut PeerData, + ctx: &mut impl SubsystemContext, + active_heads: &HashMap, + new_view: View, +) -> SubsystemResult<()> { + let old_view = std::mem::replace(&mut peer_data.view, new_view); + + // Remove entries for all relay-parents in the old view but not the new. + for removed in old_view.difference(&peer_data.view) { + let _ = peer_data.view_knowledge.remove(removed); + } + + // Add entries for all relay-parents in the new view but not the old. + // Furthermore, send all statements we have for those relay parents. + let new_view = peer_data.view.difference(&old_view).copied().collect::>(); + for new in new_view.iter().copied() { + peer_data.view_knowledge.insert(new, Default::default()); + + if let Some(active_head) = active_heads.get(&new) { + send_statements( + peer.clone(), + peer_data, + ctx, + new, + active_head, + ).await?; + } + } + + Ok(()) +} + +async fn handle_network_update( + peers: &mut HashMap, + active_heads: &mut HashMap, + ctx: &mut impl SubsystemContext, + our_view: &mut View, + update: NetworkBridgeEvent, +) -> SubsystemResult<()> { + match update { + NetworkBridgeEvent::PeerConnected(peer, _role) => { + peers.insert(peer, PeerData { + view: Default::default(), + view_knowledge: Default::default(), + }); + + Ok(()) + } + NetworkBridgeEvent::PeerDisconnected(peer) => { + peers.remove(&peer); + Ok(()) + } + NetworkBridgeEvent::PeerMessage(peer, message) => { + match peers.get_mut(&peer) { + Some(data) => { + let new_stored = handle_incoming_message( + peer, + data, + &*our_view, + active_heads, + ctx, + message, + ).await?; + + if let Some((relay_parent, new)) = new_stored { + // When we receive a new message from a peer, we forward it to the + // candidate backing subsystem. + let message = AllMessages::CandidateBacking( + CandidateBackingMessage::Statement(relay_parent, new.statement.clone()) + ); + ctx.send_message(message).await?; + } + + Ok(()) + } + None => Ok(()), + } + + } + NetworkBridgeEvent::PeerViewChange(peer, view) => { + match peers.get_mut(&peer) { + Some(data) => { + update_peer_view_and_send_unlocked( + peer, + data, + ctx, + &*active_heads, + view, + ).await + } + None => Ok(()), + } + } + NetworkBridgeEvent::OurViewChange(view) => { + let old_view = std::mem::replace(our_view, view); + active_heads.retain(|head, _| our_view.contains(head)); + + for new in our_view.difference(&old_view) { + if !active_heads.contains_key(&new) { + log::warn!(target: "statement_distribution", "Our network bridge view update \ + inconsistent with `StartWork` messages we have received from overseer. \ + Contains unknown hash {}", new); + } + } + + Ok(()) + } + } + +} + +async fn run( + mut ctx: impl SubsystemContext, +) -> SubsystemResult<()> { + let mut peers: HashMap = HashMap::new(); + let mut our_view = View::default(); + let mut active_heads: HashMap = HashMap::new(); + + loop { + let message = ctx.recv().await?; + match message { + FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. })) => { + for relay_parent in activated { + let (validators, session_index) = { + let (val_tx, val_rx) = oneshot::channel(); + let (session_tx, session_rx) = oneshot::channel(); + + let val_message = AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(val_tx), + ), + ); + let session_message = AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(session_tx), + ), + ); + + ctx.send_messages( + std::iter::once(val_message).chain(std::iter::once(session_message)) + ).await?; + + match (val_rx.await?, session_rx.await?) { + (Ok(v), Ok(s)) => (v, s), + (Err(e), _) | (_, Err(e)) => { + log::warn!( + target: "statement_distribution", + "Failed to fetch runtime API data for active leaf: {:?}", + e, + ); + + // Lacking this bookkeeping might make us behave funny, although + // not in any slashable way. But we shouldn't take down the node + // on what are likely spurious runtime API errors. + continue; + } + } + }; + + active_heads.entry(relay_parent) + .or_insert(ActiveHeadData::new(validators, session_index)); + } + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(_block_hash)) => { + // do nothing + } + FromOverseer::Signal(OverseerSignal::Conclude) => break, + FromOverseer::Communication { msg } => match msg { + StatementDistributionMessage::Share(relay_parent, statement) => + circulate_statement_and_dependents( + &mut peers, + &mut active_heads, + &mut ctx, + relay_parent, + statement, + ).await?, + StatementDistributionMessage::NetworkBridgeUpdateV1(event) => + handle_network_update( + &mut peers, + &mut active_heads, + &mut ctx, + &mut our_view, + event, + ).await?, + } + } + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_keyring::Sr25519Keyring; + use node_primitives::Statement; + use polkadot_primitives::v1::CommittedCandidateReceipt; + use assert_matches::assert_matches; + use futures::executor; + + #[test] + fn active_head_accepts_only_2_seconded_per_validator() { + let validators = vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ]; + let parent_hash: Hash = [1; 32].into(); + + let session_index = 1; + let signing_context = SigningContext { + parent_hash, + session_index, + }; + + let candidate_a = { + let mut c = CommittedCandidateReceipt::default(); + c.descriptor.relay_parent = parent_hash; + c.descriptor.para_id = 1.into(); + c + }; + + let candidate_b = { + let mut c = CommittedCandidateReceipt::default(); + c.descriptor.relay_parent = parent_hash; + c.descriptor.para_id = 2.into(); + c + }; + + let candidate_c = { + let mut c = CommittedCandidateReceipt::default(); + c.descriptor.relay_parent = parent_hash; + c.descriptor.para_id = 3.into(); + c + }; + + let mut head_data = ActiveHeadData::new(validators, session_index); + + // note A + let a_seconded_val_0 = SignedFullStatement::sign( + Statement::Seconded(candidate_a.clone()), + &signing_context, + 0, + &Sr25519Keyring::Alice.pair().into(), + ); + let noted = head_data.note_statement(a_seconded_val_0.clone()); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + // note A (duplicate) + let noted = head_data.note_statement(a_seconded_val_0); + + assert_matches!(noted, NotedStatement::UsefulButKnown); + + // note B + let noted = head_data.note_statement(SignedFullStatement::sign( + Statement::Seconded(candidate_b.clone()), + &signing_context, + 0, + &Sr25519Keyring::Alice.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + // note C (beyond 2 - ignored) + let noted = head_data.note_statement(SignedFullStatement::sign( + Statement::Seconded(candidate_c.clone()), + &signing_context, + 0, + &Sr25519Keyring::Alice.pair().into(), + )); + + assert_matches!(noted, NotedStatement::NotUseful); + + // note B (new validator) + let noted = head_data.note_statement(SignedFullStatement::sign( + Statement::Seconded(candidate_b.clone()), + &signing_context, + 1, + &Sr25519Keyring::Bob.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + // note C (new validator) + let noted = head_data.note_statement(SignedFullStatement::sign( + Statement::Seconded(candidate_c.clone()), + &signing_context, + 1, + &Sr25519Keyring::Bob.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + } + + #[test] + fn note_local_works() { + let hash_a: Hash = [1; 32].into(); + let hash_b: Hash = [2; 32].into(); + + let mut per_peer_tracker = VcPerPeerTracker::default(); + per_peer_tracker.note_local(hash_a.clone()); + per_peer_tracker.note_local(hash_b.clone()); + + assert!(per_peer_tracker.local_observed.contains(&hash_a)); + assert!(per_peer_tracker.local_observed.contains(&hash_b)); + + assert!(!per_peer_tracker.remote_observed.contains(&hash_a)); + assert!(!per_peer_tracker.remote_observed.contains(&hash_b)); + } + + #[test] + fn note_remote_works() { + let hash_a: Hash = [1; 32].into(); + let hash_b: Hash = [2; 32].into(); + let hash_c: Hash = [3; 32].into(); + + let mut per_peer_tracker = VcPerPeerTracker::default(); + assert!(per_peer_tracker.note_remote(hash_a.clone())); + assert!(per_peer_tracker.note_remote(hash_b.clone())); + assert!(!per_peer_tracker.note_remote(hash_c.clone())); + + assert!(per_peer_tracker.remote_observed.contains(&hash_a)); + assert!(per_peer_tracker.remote_observed.contains(&hash_b)); + assert!(!per_peer_tracker.remote_observed.contains(&hash_c)); + + assert!(!per_peer_tracker.local_observed.contains(&hash_a)); + assert!(!per_peer_tracker.local_observed.contains(&hash_b)); + assert!(!per_peer_tracker.local_observed.contains(&hash_c)); + } + + #[test] + fn per_peer_relay_parent_knowledge_send() { + let mut knowledge = PeerRelayParentKnowledge::default(); + + let hash_a: Hash = [1; 32].into(); + + // Sending an un-pinned statement should not work and should have no effect. + assert!(knowledge.send(&(CompactStatement::Valid(hash_a), 0)).is_none()); + assert!(!knowledge.known_candidates.contains(&hash_a)); + assert!(knowledge.sent_statements.is_empty()); + assert!(knowledge.received_statements.is_empty()); + assert!(knowledge.seconded_counts.is_empty()); + assert!(knowledge.received_message_count.is_empty()); + + // Make the peer aware of the candidate. + assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), 0)), Some(true)); + assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), 1)), Some(false)); + assert!(knowledge.known_candidates.contains(&hash_a)); + assert_eq!(knowledge.sent_statements.len(), 2); + assert!(knowledge.received_statements.is_empty()); + assert_eq!(knowledge.seconded_counts.len(), 2); + assert!(knowledge.received_message_count.get(&hash_a).is_none()); + + // And now it should accept the dependent message. + assert_eq!(knowledge.send(&(CompactStatement::Valid(hash_a), 0)), Some(false)); + assert!(knowledge.known_candidates.contains(&hash_a)); + assert_eq!(knowledge.sent_statements.len(), 3); + assert!(knowledge.received_statements.is_empty()); + assert_eq!(knowledge.seconded_counts.len(), 2); + assert!(knowledge.received_message_count.get(&hash_a).is_none()); + } + + #[test] + fn cant_send_after_receiving() { + let mut knowledge = PeerRelayParentKnowledge::default(); + + let hash_a: Hash = [1; 32].into(); + assert!(knowledge.receive(&(CompactStatement::Candidate(hash_a), 0), 3).unwrap()); + assert!(knowledge.send(&(CompactStatement::Candidate(hash_a), 0)).is_none()); + } + + #[test] + fn per_peer_relay_parent_knowledge_receive() { + let mut knowledge = PeerRelayParentKnowledge::default(); + + let hash_a: Hash = [1; 32].into(); + + assert_eq!( + knowledge.receive(&(CompactStatement::Valid(hash_a), 0), 3), + Err(COST_UNEXPECTED_STATEMENT), + ); + + assert_eq!( + knowledge.receive(&(CompactStatement::Candidate(hash_a), 0), 3), + Ok(true), + ); + + // Push statements up to the flood limit. + assert_eq!( + knowledge.receive(&(CompactStatement::Valid(hash_a), 1), 3), + Ok(false), + ); + + assert!(knowledge.known_candidates.contains(&hash_a)); + assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 2); + + assert_eq!( + knowledge.receive(&(CompactStatement::Valid(hash_a), 2), 3), + Ok(false), + ); + + assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3); + + assert_eq!( + knowledge.receive(&(CompactStatement::Valid(hash_a), 7), 3), + Err(COST_APPARENT_FLOOD), + ); + + assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3); + assert_eq!(knowledge.received_statements.len(), 3); // number of prior `Ok`s. + + // Now make sure that the seconding limit is respected. + let hash_b: Hash = [2; 32].into(); + let hash_c: Hash = [3; 32].into(); + + assert_eq!( + knowledge.receive(&(CompactStatement::Candidate(hash_b), 0), 3), + Ok(true), + ); + + assert_eq!( + knowledge.receive(&(CompactStatement::Candidate(hash_c), 0), 3), + Err(COST_UNEXPECTED_STATEMENT), + ); + + // Last, make sure that already-known statements are disregarded. + assert_eq!( + knowledge.receive(&(CompactStatement::Valid(hash_a), 2), 3), + Err(COST_DUPLICATE_STATEMENT), + ); + + assert_eq!( + knowledge.receive(&(CompactStatement::Candidate(hash_b), 0), 3), + Err(COST_DUPLICATE_STATEMENT), + ); + } + + #[test] + fn peer_view_update_sends_messages() { + let hash_a = [1; 32].into(); + let hash_b = [2; 32].into(); + let hash_c = [3; 32].into(); + + let candidate = { + let mut c = CommittedCandidateReceipt::default(); + c.descriptor.relay_parent = hash_c; + c.descriptor.para_id = 1.into(); + c + }; + let candidate_hash = candidate.hash(); + + let old_view = View(vec![hash_a, hash_b]); + let new_view = View(vec![hash_b, hash_c]); + + let mut active_heads = HashMap::new(); + let validators = vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ]; + + let session_index = 1; + let signing_context = SigningContext { + parent_hash: hash_c, + session_index, + }; + + let new_head_data = { + let mut data = ActiveHeadData::new(validators, session_index); + + let noted = data.note_statement(SignedFullStatement::sign( + Statement::Seconded(candidate.clone()), + &signing_context, + 0, + &Sr25519Keyring::Alice.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + let noted = data.note_statement(SignedFullStatement::sign( + Statement::Valid(candidate_hash), + &signing_context, + 1, + &Sr25519Keyring::Bob.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + let noted = data.note_statement(SignedFullStatement::sign( + Statement::Valid(candidate_hash), + &signing_context, + 2, + &Sr25519Keyring::Charlie.pair().into(), + )); + + assert_matches!(noted, NotedStatement::Fresh(_)); + + data + }; + + active_heads.insert(hash_c, new_head_data); + + let mut peer_data = PeerData { + view: old_view, + view_knowledge: { + let mut k = HashMap::new(); + + k.insert(hash_a, Default::default()); + k.insert(hash_b, Default::default()); + + k + }, + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + let peer = PeerId::random(); + + executor::block_on(async move { + update_peer_view_and_send_unlocked( + peer.clone(), + &mut peer_data, + &mut ctx, + &active_heads, + new_view.clone(), + ).await.unwrap(); + + assert_eq!(peer_data.view, new_view); + assert!(!peer_data.view_knowledge.contains_key(&hash_a)); + assert!(peer_data.view_knowledge.contains_key(&hash_b)); + + let c_knowledge = peer_data.view_knowledge.get(&hash_c).unwrap(); + + assert!(c_knowledge.known_candidates.contains(&candidate_hash)); + assert!(c_knowledge.sent_statements.contains( + &(CompactStatement::Candidate(candidate_hash), 0) + )); + assert!(c_knowledge.sent_statements.contains( + &(CompactStatement::Valid(candidate_hash), 1) + )); + assert!(c_knowledge.sent_statements.contains( + &(CompactStatement::Valid(candidate_hash), 2) + )); + + // now see if we got the 3 messages from the active head data. + let active_head = active_heads.get(&hash_c).unwrap(); + + // semi-fragile because hashmap iterator ordering is undefined, but in practice + // it will not change between runs of the program. + for statement in active_head.statements_about(candidate_hash) { + let message = handle.recv().await; + let expected_to = vec![peer.clone()]; + let expected_payload + = statement_message(hash_c, statement.statement.clone()); + + assert_matches!( + message, + AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + to, + payload, + )) => { + assert_eq!(to, expected_to); + assert_eq!(payload, expected_payload) + } + ) + } + }); + } + + #[test] + fn circulated_statement_goes_to_all_peers_with_view() { + let hash_a = [1; 32].into(); + let hash_b = [2; 32].into(); + let hash_c = [3; 32].into(); + + let candidate = { + let mut c = CommittedCandidateReceipt::default(); + c.descriptor.relay_parent = hash_b; + c.descriptor.para_id = 1.into(); + c + }; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + let peer_a_view = View(vec![hash_a]); + let peer_b_view = View(vec![hash_a, hash_b]); + let peer_c_view = View(vec![hash_b, hash_c]); + + let session_index = 1; + + let peer_data_from_view = |view: View| PeerData { + view: view.clone(), + view_knowledge: view.0.iter().map(|v| (v.clone(), Default::default())).collect(), + }; + + let mut peer_data: HashMap<_, _> = vec![ + (peer_a.clone(), peer_data_from_view(peer_a_view)), + (peer_b.clone(), peer_data_from_view(peer_b_view)), + (peer_c.clone(), peer_data_from_view(peer_c_view)), + ].into_iter().collect(); + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + + executor::block_on(async move { + let statement = { + let signing_context = SigningContext { + parent_hash: hash_b, + session_index, + }; + + let statement = SignedFullStatement::sign( + Statement::Seconded(candidate), + &signing_context, + 0, + &Sr25519Keyring::Alice.pair().into(), + ); + + StoredStatement { + comparator: StoredStatementComparator { + compact: statement.payload().to_compact(), + validator_index: 0, + signature: statement.signature().clone() + }, + statement, + } + }; + + let needs_dependents = circulate_statement( + &mut peer_data, + &mut ctx, + hash_b, + &statement, + ).await.unwrap(); + + { + assert_eq!(needs_dependents.len(), 2); + assert!(needs_dependents.contains(&peer_b)); + assert!(needs_dependents.contains(&peer_c)); + } + + let fingerprint = (statement.compact().clone(), 0); + + assert!( + peer_data.get(&peer_b).unwrap() + .view_knowledge.get(&hash_b).unwrap() + .sent_statements.contains(&fingerprint), + ); + + assert!( + peer_data.get(&peer_c).unwrap() + .view_knowledge.get(&hash_b).unwrap() + .sent_statements.contains(&fingerprint), + ); + + let message = handle.recv().await; + assert_matches!( + message, + AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + to, + payload, + )) => { + assert_eq!(to.len(), 2); + assert!(to.contains(&peer_b)); + assert!(to.contains(&peer_c)); + + assert_eq!( + payload, + statement_message(hash_b, statement.statement.clone()), + ); + } + ) + }); + } +} diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 88626e2e05f3e76c13f89d280b14c301a5cb0d37..e21cb93631461247d8416f075e98b232686c9fc1 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -11,9 +11,13 @@ futures-timer = "3.0.2" streamunordered = "0.5.1" polkadot-primitives = { path = "../../primitives" } client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "master" } -messages = { package = "polkadot-node-messages", path = "../messages" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } +polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../primitives" } +async-trait = "0.1" [dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +polkadot-node-network-protocol = { path = "../network/protocol" } futures = { version = "0.3.5", features = ["thread-pool"] } futures-timer = "3.0.2" femme = "2.0.1" diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index 77b99a3a3b3fab0ab38b4362dbdb5cda35423266..4087429e6dc2d8275b774076f3f314d332e0bca2 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -21,23 +21,27 @@ use std::time::Duration; use futures::{ channel::oneshot, - pending, pin_mut, executor, select, stream, + pending, pin_mut, select, stream, FutureExt, StreamExt, }; use futures_timer::Delay; use kv_log_macro as log; -use polkadot_primitives::parachain::{BlockData, PoVBlock}; -use polkadot_overseer::{Overseer, Subsystem, SubsystemContext, SpawnedSubsystem}; +use polkadot_primitives::v1::{BlockData, PoV}; +use polkadot_overseer::{Overseer, AllSubsystems}; -use messages::{ - AllMessages, CandidateBackingMessage, FromOverseer, CandidateValidationMessage +use polkadot_subsystem::{ + Subsystem, SubsystemContext, DummySubsystem, + SpawnedSubsystem, FromOverseer, +}; +use polkadot_subsystem::messages::{ + CandidateValidationMessage, CandidateBackingMessage, AllMessages, }; struct Subsystem1; impl Subsystem1 { - async fn run(mut ctx: SubsystemContext) { + async fn run(mut ctx: impl SubsystemContext) { loop { match ctx.try_recv().await { Ok(Some(msg)) => { @@ -56,13 +60,12 @@ impl Subsystem1 { Delay::new(Duration::from_secs(1)).await; let (tx, _) = oneshot::channel(); - ctx.send_msg(AllMessages::CandidateValidation( - CandidateValidationMessage::Validate( - Default::default(), + ctx.send_message(AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( Default::default(), - PoVBlock { + PoV { block_data: BlockData(Vec::new()), - }, + }.into(), tx, ) )).await.unwrap(); @@ -70,24 +73,36 @@ impl Subsystem1 { } } -impl Subsystem for Subsystem1 { - fn start(&mut self, ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { +impl Subsystem for Subsystem1 + where C: SubsystemContext +{ + type Metrics = (); // no Prometheus metrics + + fn start(self, ctx: C) -> SpawnedSubsystem { + let future = Box::pin(async move { Self::run(ctx).await; - })) + }); + + SpawnedSubsystem { + name: "subsystem-1", + future, + } } } struct Subsystem2; impl Subsystem2 { - async fn run(mut ctx: SubsystemContext) { - ctx.spawn(Box::pin(async { - loop { - log::info!("Job tick"); - Delay::new(Duration::from_secs(1)).await; - } - })).await.unwrap(); + async fn run(mut ctx: impl SubsystemContext) { + ctx.spawn( + "subsystem-2-job", + Box::pin(async { + loop { + log::info!("Job tick"); + Delay::new(Duration::from_secs(1)).await; + } + }), + ).await.unwrap(); loop { match ctx.try_recv().await { @@ -105,27 +120,52 @@ impl Subsystem2 { } } -impl Subsystem for Subsystem2 { - fn start(&mut self, ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { +impl Subsystem for Subsystem2 + where C: SubsystemContext +{ + type Metrics = (); // no Prometheus metrics + + fn start(self, ctx: C) -> SpawnedSubsystem { + let future = Box::pin(async move { Self::run(ctx).await; - })) + }); + + SpawnedSubsystem { + name: "subsystem-2", + future, + } } } fn main() { femme::with_level(femme::LevelFilter::Trace); - let spawner = executor::ThreadPool::new().unwrap(); - + let spawner = sp_core::testing::TaskExecutor::new(); futures::executor::block_on(async { let timer_stream = stream::repeat(()).then(|_| async { Delay::new(Duration::from_secs(1)).await; }); + let all_subsystems = AllSubsystems { + candidate_validation: Subsystem2, + candidate_backing: Subsystem1, + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; let (overseer, _handler) = Overseer::new( vec![], - Box::new(Subsystem2), - Box::new(Subsystem1), + all_subsystems, + None, spawner, ).unwrap(); let overseer_fut = overseer.run().fuse(); diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 0d3c9b7b509552a92e2a3151183490188032114c..8228682fcdf8bbf0bdfc5b92784e5db7890b3e3e 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -17,7 +17,7 @@ //! # Overseer //! //! `overseer` implements the Overseer architecture described in the -//! [implementors-guide](https://github.com/paritytech/polkadot/blob/master/roadmap/implementors-guide/guide.md). +//! [implementers-guide](https://github.com/paritytech/polkadot/blob/master/roadmap/implementers-guide/guide.md). //! For the motivations behind implementing the overseer itself you should //! check out that guide, documentation in this crate will be mostly discussing //! technical stuff. @@ -59,71 +59,43 @@ use std::pin::Pin; use std::sync::Arc; use std::task::Poll; use std::time::Duration; -use std::collections::HashSet; +use std::collections::{hash_map, HashMap}; use futures::channel::{mpsc, oneshot}; use futures::{ pending, poll, select, - future::{BoxFuture, RemoteHandle}, - stream::FuturesUnordered, - task::{Spawn, SpawnError, SpawnExt}, + future::BoxFuture, + stream::{self, FuturesUnordered}, Future, FutureExt, SinkExt, StreamExt, }; use futures_timer::Delay; use streamunordered::{StreamYield, StreamUnordered}; -use polkadot_primitives::{Block, BlockNumber, Hash}; +use polkadot_primitives::v1::{Block, BlockNumber, Hash}; use client::{BlockImportNotification, BlockchainEvents, FinalityNotification}; -pub use messages::{ - OverseerSignal, CandidateValidationMessage, CandidateBackingMessage, AllMessages, - FromOverseer, +use polkadot_subsystem::messages::{ + CandidateValidationMessage, CandidateBackingMessage, + CandidateSelectionMessage, ChainApiMessage, StatementDistributionMessage, + AvailabilityDistributionMessage, BitfieldSigningMessage, BitfieldDistributionMessage, + ProvisionerMessage, PoVDistributionMessage, RuntimeApiMessage, + AvailabilityStoreMessage, NetworkBridgeMessage, AllMessages, CollationGenerationMessage, CollatorProtocolMessage, }; +pub use polkadot_subsystem::{ + Subsystem, SubsystemContext, OverseerSignal, FromOverseer, SubsystemError, SubsystemResult, + SpawnedSubsystem, ActiveLeavesUpdate, + metrics::{self, prometheus}, +}; +use polkadot_node_primitives::SpawnNamed; -/// An error type that describes faults that may happen -/// -/// These are: -/// * Channels being closed -/// * Subsystems dying when they are not expected to -/// * Subsystems not dying when they are told to die -/// * etc. -#[derive(Debug)] -pub struct SubsystemError; - -impl From for SubsystemError { - fn from(_: mpsc::SendError) -> Self { - Self - } -} - -impl From for SubsystemError { - fn from(_: oneshot::Canceled) -> Self { - Self - } -} - -impl From for SubsystemError { - fn from(_: SpawnError) -> Self { - Self - } -} - -/// A `Result` type that wraps [`SubsystemError`]. -/// -/// [`SubsystemError`]: struct.SubsystemError.html -pub type SubsystemResult = Result; - -/// An asynchronous subsystem task that runs inside and being overseen by the [`Overseer`]. -/// -/// In essence it's just a newtype wrapping a `BoxFuture`. -/// -/// [`Overseer`]: struct.Overseer.html -pub struct SpawnedSubsystem(pub BoxFuture<'static, ()>); // A capacity of bounded channels inside the overseer. const CHANNEL_CAPACITY: usize = 1024; // A graceful `Overseer` teardown time delay. const STOP_DELAY: u64 = 1; +// Target for logs. +const LOG_TARGET: &'static str = "overseer"; + /// A type of messages that are sent from [`Subsystem`] to [`Overseer`]. /// @@ -141,8 +113,15 @@ enum ToOverseer { /// spawn on the overseer and a `oneshot::Sender` to signal the result /// of the spawn. SpawnJob { + name: &'static str, + s: BoxFuture<'static, ()>, + }, + + /// Same as `SpawnJob` but for blocking tasks to be executed on a + /// dedicated thread pool. + SpawnBlockingJob { + name: &'static str, s: BoxFuture<'static, ()>, - res: oneshot::Sender>, }, } @@ -186,9 +165,18 @@ enum Event { BlockImported(BlockInfo), BlockFinalized(BlockInfo), MsgToSubsystem(AllMessages), + ExternalRequest(ExternalRequest), Stop, } +/// Some request from outer world. +enum ExternalRequest { + WaitForActivation { + hash: Hash, + response_channel: oneshot::Sender<()>, + }, +} + /// A handler used to communicate with the [`Overseer`]. /// /// [`Overseer`]: struct.Overseer.html @@ -200,30 +188,36 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. pub async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { - self.events_tx.send(Event::BlockImported(block)).await?; - - Ok(()) + self.events_tx.send(Event::BlockImported(block)).await.map_err(Into::into) } /// Send some message to one of the `Subsystem`s. pub async fn send_msg(&mut self, msg: AllMessages) -> SubsystemResult<()> { - self.events_tx.send(Event::MsgToSubsystem(msg)).await?; - - Ok(()) + self.events_tx.send(Event::MsgToSubsystem(msg)).await.map_err(Into::into) } /// Inform the `Overseer` that that some block was finalized. pub async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { - self.events_tx.send(Event::BlockFinalized(block)).await?; + self.events_tx.send(Event::BlockFinalized(block)).await.map_err(Into::into) + } - Ok(()) + /// Wait for a block with the given hash to be in the active-leaves set. + /// This method is used for external code like `Proposer` that doesn't subscribe to Overseer's signals. + /// + /// The response channel responds if the hash was activated and is closed if the hash was deactivated. + /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, + /// the response channel may never return if the hash was deactivated before this call. + /// In this case, it's the caller's responsibility to ensure a timeout is set. + pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender<()>) -> SubsystemResult<()> { + self.events_tx.send(Event::ExternalRequest(ExternalRequest::WaitForActivation { + hash, + response_channel + })).await.map_err(Into::into) } /// Tell `Overseer` to shutdown. pub async fn stop(&mut self) -> SubsystemResult<()> { - self.events_tx.send(Event::Stop).await?; - - Ok(()) + self.events_tx.send(Event::Stop).await.map_err(Into::into) } } @@ -270,7 +264,8 @@ impl Debug for ToOverseer { ToOverseer::SubsystemMessage(msg) => { write!(f, "OverseerMessage::SubsystemMessage({:?})", msg) } - ToOverseer::SpawnJob { .. } => write!(f, "OverseerMessage::Spawn(..)") + ToOverseer::SpawnJob { .. } => write!(f, "OverseerMessage::Spawn(..)"), + ToOverseer::SpawnBlockingJob { .. } => write!(f, "OverseerMessage::SpawnBlocking(..)") } } } @@ -278,7 +273,7 @@ impl Debug for ToOverseer { /// A running instance of some [`Subsystem`]. /// /// [`Subsystem`]: trait.Subsystem.html -struct SubsystemInstance { +struct SubsystemInstance { tx: mpsc::Sender>, } @@ -289,17 +284,17 @@ struct SubsystemInstance { /// [`Overseer`]: struct.Overseer.html /// [`Subsystem`]: trait.Subsystem.html /// [`SubsystemJob`]: trait.SubsystemJob.html -pub struct SubsystemContext{ +#[derive(Debug)] +pub struct OverseerSubsystemContext{ rx: mpsc::Receiver>, tx: mpsc::Sender, } -impl SubsystemContext { - /// Try to asyncronously receive a message. - /// - /// This has to be used with caution, if you loop over this without - /// using `pending!()` macro you will end up with a busy loop! - pub async fn try_recv(&mut self) -> Result>, ()> { +#[async_trait::async_trait] +impl SubsystemContext for OverseerSubsystemContext { + type Message = M; + + async fn try_recv(&mut self) -> Result>, ()> { match poll!(self.rx.next()) { Poll::Ready(Some(msg)) => Ok(Some(msg)), Poll::Ready(None) => Err(()), @@ -307,48 +302,38 @@ impl SubsystemContext { } } - /// Receive a message. - pub async fn recv(&mut self) -> SubsystemResult> { + async fn recv(&mut self) -> SubsystemResult> { self.rx.next().await.ok_or(SubsystemError) } - /// Spawn a child task on the executor. - pub async fn spawn(&mut self, s: Pin + Send>>) -> SubsystemResult<()> { - let (tx, rx) = oneshot::channel(); + async fn spawn(&mut self, name: &'static str, s: Pin + Send>>) + -> SubsystemResult<()> + { self.tx.send(ToOverseer::SpawnJob { + name, s, - res: tx, - }).await?; - - rx.await? + }).await.map_err(Into::into) } - /// Send a direct message to some other `Subsystem`, routed based on message type. - pub async fn send_msg(&mut self, msg: AllMessages) -> SubsystemResult<()> { - self.tx.send(ToOverseer::SubsystemMessage(msg)).await?; - - Ok(()) + async fn spawn_blocking(&mut self, name: &'static str, s: Pin + Send>>) + -> SubsystemResult<()> + { + self.tx.send(ToOverseer::SpawnBlockingJob { + name, + s, + }).await.map_err(Into::into) } - fn new(rx: mpsc::Receiver>, tx: mpsc::Sender) -> Self { - Self { - rx, - tx, - } + async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()> { + self.tx.send(ToOverseer::SubsystemMessage(msg)).await.map_err(Into::into) } -} -/// A trait that describes the [`Subsystem`]s that can run on the [`Overseer`]. -/// -/// It is generic over the message type circulating in the system. -/// The idea that we want some type contaning persistent state that -/// can spawn actually running subsystems when asked to. -/// -/// [`Overseer`]: struct.Overseer.html -/// [`Subsystem`]: trait.Subsystem.html -pub trait Subsystem { - /// Start this `Subsystem` and return `SpawnedSubsystem`. - fn start(&mut self, ctx: SubsystemContext) -> SpawnedSubsystem; + async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + where T: IntoIterator + Send, T::IntoIter: Send + { + let mut msgs = stream::iter(msgs.into_iter().map(ToOverseer::SubsystemMessage).map(Ok)); + self.tx.send_all(&mut msgs).await.map_err(Into::into) + } } /// A subsystem that we oversee. @@ -358,25 +343,62 @@ pub trait Subsystem { /// for whatever reason). /// /// [`Subsystem`]: trait.Subsystem.html -#[allow(dead_code)] -struct OverseenSubsystem { - subsystem: Box + Send>, +struct OverseenSubsystem { instance: Option>, } /// The `Overseer` itself. -pub struct Overseer { - /// A validation subsystem - validation_subsystem: OverseenSubsystem, +pub struct Overseer { + /// A candidate validation subsystem. + candidate_validation_subsystem: OverseenSubsystem, - /// A candidate backing subsystem + /// A candidate backing subsystem. candidate_backing_subsystem: OverseenSubsystem, + /// A candidate selection subsystem. + candidate_selection_subsystem: OverseenSubsystem, + + /// A statement distribution subsystem. + statement_distribution_subsystem: OverseenSubsystem, + + /// An availability distribution subsystem. + availability_distribution_subsystem: OverseenSubsystem, + + /// A bitfield signing subsystem. + bitfield_signing_subsystem: OverseenSubsystem, + + /// A bitfield distribution subsystem. + bitfield_distribution_subsystem: OverseenSubsystem, + + /// A provisioner subsystem. + provisioner_subsystem: OverseenSubsystem, + + /// A PoV distribution subsystem. + pov_distribution_subsystem: OverseenSubsystem, + + /// A runtime API subsystem. + runtime_api_subsystem: OverseenSubsystem, + + /// An availability store subsystem. + availability_store_subsystem: OverseenSubsystem, + + /// A network bridge subsystem. + network_bridge_subsystem: OverseenSubsystem, + + /// A Chain API subsystem. + chain_api_subsystem: OverseenSubsystem, + + /// A Collation Generation subsystem. + collation_generation_subsystem: OverseenSubsystem, + + /// A Collator Protocol subsystem. + collator_protocol_subsystem: OverseenSubsystem, + /// Spawner to spawn tasks to. s: S, /// Here we keep handles to spawned subsystems to be notified when they terminate. - running_subsystems: FuturesUnordered>, + running_subsystems: FuturesUnordered>, /// Gather running subsystms' outbound streams into one. running_subsystems_rx: StreamUnordered>, @@ -384,31 +406,117 @@ pub struct Overseer { /// Events that are sent to the overseer from the outside world events_rx: mpsc::Receiver, + /// External listeners waiting for a hash to be in the active-leave set. + activation_external_listeners: HashMap>>, + /// A set of leaves that `Overseer` starts working with. /// /// Drained at the beginning of `run` and never used again. leaves: Vec<(Hash, BlockNumber)>, /// The set of the "active leaves". - active_leaves: HashSet<(Hash, BlockNumber)>, + active_leaves: HashMap, + + /// Various Prometheus metrics. + metrics: Metrics, +} + +/// This struct is passed as an argument to create a new instance of an [`Overseer`]. +/// +/// As any entity that satisfies the interface may act as a [`Subsystem`] this allows +/// mocking in the test code: +/// +/// Each [`Subsystem`] is supposed to implement some interface that is generic over +/// message type that is specific to this [`Subsystem`]. At the moment not all +/// subsystems are implemented and the rest can be mocked with the [`DummySubsystem`]. +/// +/// [`Subsystem`]: trait.Subsystem.html +/// [`DummySubsystem`]: struct.DummySubsystem.html +pub struct AllSubsystems { + /// A candidate validation subsystem. + pub candidate_validation: CV, + /// A candidate backing subsystem. + pub candidate_backing: CB, + /// A candidate selection subsystem. + pub candidate_selection: CS, + /// A statement distribution subsystem. + pub statement_distribution: SD, + /// An availability distribution subsystem. + pub availability_distribution: AD, + /// A bitfield signing subsystem. + pub bitfield_signing: BS, + /// A bitfield distribution subsystem. + pub bitfield_distribution: BD, + /// A provisioner subsystem. + pub provisioner: P, + /// A PoV distribution subsystem. + pub pov_distribution: PoVD, + /// A runtime API subsystem. + pub runtime_api: RA, + /// An availability store subsystem. + pub availability_store: AS, + /// A network bridge subsystem. + pub network_bridge: NB, + /// A Chain API subsystem. + pub chain_api: CA, + /// A Collation Generation subsystem. + pub collation_generation: CG, + /// A Collator Protocol subsystem. + pub collator_protocol: CP, +} + +/// Overseer Prometheus metrics. +#[derive(Clone)] +struct MetricsInner { + activated_heads_total: prometheus::Counter, + deactivated_heads_total: prometheus::Counter, +} + +#[derive(Default, Clone)] +struct Metrics(Option); + +impl Metrics { + fn on_head_activated(&self) { + if let Some(metrics) = &self.0 { + metrics.activated_heads_total.inc(); + } + } + + fn on_head_deactivated(&self) { + if let Some(metrics) = &self.0 { + metrics.deactivated_heads_total.inc(); + } + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + activated_heads_total: prometheus::register( + prometheus::Counter::new( + "parachain_activated_heads_total", + "Number of activated heads." + )?, + registry, + )?, + deactivated_heads_total: prometheus::register( + prometheus::Counter::new( + "parachain_deactivated_heads_total", + "Number of deactivated heads." + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } } impl Overseer where - S: Spawn, + S: SpawnNamed, { /// Create a new intance of the `Overseer` with a fixed set of [`Subsystem`]s. /// - /// Each [`Subsystem`] is passed to this function as an explicit parameter - /// and is supposed to implement some interface that is generic over message type - /// that is specific to this [`Subsystem`]. At the moment there are only two - /// subsystems: - /// * Validation - /// * CandidateBacking - /// - /// As any entity that satisfies the interface may act as a [`Subsystem`] this allows - /// mocking in the test code: - /// /// ```text /// +------------------------------------+ /// | Overseer | @@ -435,51 +543,63 @@ where /// # Example /// /// The [`Subsystems`] may be any type as long as they implement an expected interface. - /// Here, we create two mock subsystems and start the `Overseer` with them. For the sake - /// of simplicity the termination of the example is done with a timeout. + /// Here, we create a mock validation subsystem and a few dummy ones and start the `Overseer` with them. + /// For the sake of simplicity the termination of the example is done with a timeout. /// ``` /// # use std::time::Duration; /// # use futures::{executor, pin_mut, select, FutureExt}; /// # use futures_timer::Delay; - /// # use polkadot_overseer::{ - /// # Overseer, Subsystem, SpawnedSubsystem, SubsystemContext, - /// # CandidateValidationMessage, CandidateBackingMessage, + /// # use polkadot_overseer::{Overseer, AllSubsystems}; + /// # use polkadot_subsystem::{ + /// # Subsystem, DummySubsystem, SpawnedSubsystem, SubsystemContext, + /// # messages::CandidateValidationMessage, /// # }; /// /// struct ValidationSubsystem; - /// impl Subsystem for ValidationSubsystem { - /// fn start( - /// &mut self, - /// mut ctx: SubsystemContext, - /// ) -> SpawnedSubsystem { - /// SpawnedSubsystem(Box::pin(async move { - /// loop { - /// Delay::new(Duration::from_secs(1)).await; - /// } - /// })) - /// } - /// } /// - /// struct CandidateBackingSubsystem; - /// impl Subsystem for CandidateBackingSubsystem { + /// impl Subsystem for ValidationSubsystem + /// where C: SubsystemContext + /// { + /// type Metrics = (); + /// /// fn start( - /// &mut self, - /// mut ctx: SubsystemContext, + /// self, + /// mut ctx: C, /// ) -> SpawnedSubsystem { - /// SpawnedSubsystem(Box::pin(async move { - /// loop { - /// Delay::new(Duration::from_secs(1)).await; - /// } - /// })) + /// SpawnedSubsystem { + /// name: "validation-subsystem", + /// future: Box::pin(async move { + /// loop { + /// Delay::new(Duration::from_secs(1)).await; + /// } + /// }), + /// } /// } /// } /// /// # fn main() { executor::block_on(async move { - /// let spawner = executor::ThreadPool::new().unwrap(); + /// let spawner = sp_core::testing::TaskExecutor::new(); + /// let all_subsystems = AllSubsystems { + /// candidate_validation: ValidationSubsystem, + /// candidate_backing: DummySubsystem, + /// candidate_selection: DummySubsystem, + /// statement_distribution: DummySubsystem, + /// availability_distribution: DummySubsystem, + /// bitfield_signing: DummySubsystem, + /// bitfield_distribution: DummySubsystem, + /// provisioner: DummySubsystem, + /// pov_distribution: DummySubsystem, + /// runtime_api: DummySubsystem, + /// availability_store: DummySubsystem, + /// network_bridge: DummySubsystem, + /// chain_api: DummySubsystem, + /// collation_generation: DummySubsystem, + /// collator_protocol: DummySubsystem, + /// }; /// let (overseer, _handler) = Overseer::new( /// vec![], - /// Box::new(ValidationSubsystem), - /// Box::new(CandidateBackingSubsystem), + /// all_subsystems, + /// None, /// spawner, /// ).unwrap(); /// @@ -496,12 +616,29 @@ where /// # /// # }); } /// ``` - pub fn new( + pub fn new( leaves: impl IntoIterator, - validation: Box + Send>, - candidate_backing: Box + Send>, + all_subsystems: AllSubsystems, + prometheus_registry: Option<&prometheus::Registry>, mut s: S, - ) -> SubsystemResult<(Self, OverseerHandler)> { + ) -> SubsystemResult<(Self, OverseerHandler)> + where + CV: Subsystem> + Send, + CB: Subsystem> + Send, + CS: Subsystem> + Send, + SD: Subsystem> + Send, + AD: Subsystem> + Send, + BS: Subsystem> + Send, + BD: Subsystem> + Send, + P: Subsystem> + Send, + PoVD: Subsystem> + Send, + RA: Subsystem> + Send, + AS: Subsystem> + Send, + NB: Subsystem> + Send, + CA: Subsystem> + Send, + CG: Subsystem> + Send, + CP: Subsystem> + Send, + { let (events_tx, events_rx) = mpsc::channel(CHANNEL_CAPACITY); let handler = OverseerHandler { @@ -511,36 +648,146 @@ where let mut running_subsystems_rx = StreamUnordered::new(); let mut running_subsystems = FuturesUnordered::new(); - let validation_subsystem = spawn( + let candidate_validation_subsystem = spawn( &mut s, &mut running_subsystems, &mut running_subsystems_rx, - validation, + all_subsystems.candidate_validation, )?; let candidate_backing_subsystem = spawn( &mut s, &mut running_subsystems, &mut running_subsystems_rx, - candidate_backing, + all_subsystems.candidate_backing, + )?; + + let candidate_selection_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.candidate_selection, + )?; + + let statement_distribution_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.statement_distribution, )?; - let active_leaves = HashSet::new(); + let availability_distribution_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.availability_distribution, + )?; + + let bitfield_signing_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.bitfield_signing, + )?; + + let bitfield_distribution_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.bitfield_distribution, + )?; + + let provisioner_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.provisioner, + )?; + + let pov_distribution_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.pov_distribution, + )?; + + let runtime_api_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.runtime_api, + )?; + + let availability_store_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.availability_store, + )?; + + let network_bridge_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.network_bridge, + )?; + + let chain_api_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.chain_api, + )?; + + let collation_generation_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.collation_generation, + )?; + + + let collator_protocol_subsystem = spawn( + &mut s, + &mut running_subsystems, + &mut running_subsystems_rx, + all_subsystems.collator_protocol, + )?; let leaves = leaves .into_iter() .map(|BlockInfo { hash, parent_hash: _, number }| (hash, number)) .collect(); + let active_leaves = HashMap::new(); + + let metrics = ::register(prometheus_registry); + let activation_external_listeners = HashMap::new(); + let this = Self { - validation_subsystem, + candidate_validation_subsystem, candidate_backing_subsystem, + candidate_selection_subsystem, + statement_distribution_subsystem, + availability_distribution_subsystem, + bitfield_signing_subsystem, + bitfield_distribution_subsystem, + provisioner_subsystem, + pov_distribution_subsystem, + runtime_api_subsystem, + availability_store_subsystem, + network_bridge_subsystem, + chain_api_subsystem, + collation_generation_subsystem, + collator_protocol_subsystem, s, running_subsystems, running_subsystems_rx, events_rx, + activation_external_listeners, leaves, active_leaves, + metrics, }; Ok((this, handler)) @@ -548,7 +795,7 @@ where // Stop the overseer. async fn stop(mut self) { - if let Some(ref mut s) = self.validation_subsystem.instance { + if let Some(ref mut s) = self.candidate_validation_subsystem.instance { let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; } @@ -556,6 +803,58 @@ where let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; } + if let Some(ref mut s) = self.candidate_selection_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.statement_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.availability_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.provisioner_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.pov_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.runtime_api_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.availability_store_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.network_bridge_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.chain_api_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.collator_protocol_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + + if let Some(ref mut s) = self.collation_generation_subsystem.instance { + let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + } + let mut stop_delay = Delay::new(Duration::from_secs(STOP_DELAY)).fuse(); loop { @@ -574,12 +873,16 @@ where /// Run the `Overseer`. pub async fn run(mut self) -> SubsystemResult<()> { let leaves = std::mem::take(&mut self.leaves); + let mut update = ActiveLeavesUpdate::default(); - for leaf in leaves.into_iter() { - self.broadcast_signal(OverseerSignal::StartWork(leaf.0)).await?; - self.active_leaves.insert(leaf); + for (hash, number) in leaves.into_iter() { + update.activated.push(hash); + self.active_leaves.insert(hash, number); + self.on_head_activated(&hash); } + self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; + loop { while let Poll::Ready(Some(msg)) = poll!(&mut self.events_rx.next()) { match msg { @@ -596,6 +899,9 @@ where Event::BlockFinalized(block) => { self.block_finalized(block).await?; } + Event::ExternalRequest(request) => { + self.handle_external_request(request); + } } } @@ -604,17 +910,18 @@ where ) { match msg { ToOverseer::SubsystemMessage(msg) => self.route_message(msg).await, - ToOverseer::SpawnJob { s, res } => { - let s = self.spawn_job(s); - - let _ = res.send(s); + ToOverseer::SpawnJob { name, s } => { + self.spawn_job(name, s); + } + ToOverseer::SpawnBlockingJob { name, s } => { + self.spawn_blocking_job(name, s); } } } // Some subsystem exited? It's time to panic. if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - log::error!("Subsystem finished unexpectedly {:?}", finished); + log::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished); self.stop().await; return Err(SubsystemError); } @@ -625,44 +932,116 @@ where } async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { - if let Some(parent) = self.active_leaves.take(&(block.parent_hash, block.number - 1)) { - self.broadcast_signal(OverseerSignal::StopWork(parent.0)).await?; + let mut update = ActiveLeavesUpdate::default(); + + if let Some(number) = self.active_leaves.remove(&block.parent_hash) { + if let Some(expected_parent_number) = block.number.checked_sub(1) { + debug_assert_eq!(expected_parent_number, number); + } + update.deactivated.push(block.parent_hash); + self.on_head_deactivated(&block.parent_hash); } - if !self.active_leaves.contains(&(block.hash, block.number)) { - self.broadcast_signal(OverseerSignal::StartWork(block.hash)).await?; - self.active_leaves.insert((block.hash, block.number)); + match self.active_leaves.entry(block.hash) { + hash_map::Entry::Vacant(entry) => { + update.activated.push(block.hash); + entry.insert(block.number); + self.on_head_activated(&block.hash); + }, + hash_map::Entry::Occupied(entry) => { + debug_assert_eq!(*entry.get(), block.number); + } } + self.clean_up_external_listeners(); + + self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; + Ok(()) } async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { - let mut stop_these = Vec::new(); + let mut update = ActiveLeavesUpdate::default(); - self.active_leaves.retain(|(h, n)| { + self.active_leaves.retain(|h, n| { if *n <= block.number { - stop_these.push(*h); + update.deactivated.push(*h); false } else { true } }); - for hash in stop_these.into_iter() { - self.broadcast_signal(OverseerSignal::StopWork(hash)).await? + for deactivated in &update.deactivated { + self.on_head_deactivated(deactivated) } + self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; + + self.broadcast_signal(OverseerSignal::BlockFinalized(block.hash)).await?; + Ok(()) } async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { - if let Some(ref mut s) = self.validation_subsystem.instance { + if let Some(ref mut s) = self.candidate_validation_subsystem.instance { s.tx.send(FromOverseer::Signal(signal.clone())).await?; } if let Some(ref mut s) = self.candidate_backing_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal)).await?; + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.candidate_selection_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.statement_distribution_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.availability_distribution_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.provisioner_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.pov_distribution_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.runtime_api_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.availability_store_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.network_bridge_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.chain_api_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.collator_protocol_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; + } + + if let Some(ref mut s) = self.collation_generation_subsystem.instance { + s.tx.send(FromOverseer::Signal(signal.clone())).await?; } Ok(()) @@ -671,7 +1050,7 @@ where async fn route_message(&mut self, msg: AllMessages) { match msg { AllMessages::CandidateValidation(msg) => { - if let Some(ref mut s) = self.validation_subsystem.instance { + if let Some(ref mut s) = self.candidate_validation_subsystem.instance { let _= s.tx.send(FromOverseer::Communication { msg }).await; } } @@ -680,133 +1059,297 @@ where let _ = s.tx.send(FromOverseer::Communication { msg }).await; } } + AllMessages::CandidateSelection(msg) => { + if let Some(ref mut s) = self.candidate_selection_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::StatementDistribution(msg) => { + if let Some(ref mut s) = self.statement_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::AvailabilityDistribution(msg) => { + if let Some(ref mut s) = self.availability_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::BitfieldDistribution(msg) => { + if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::BitfieldSigning(msg) => { + if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication{ msg }).await; + } + } + AllMessages::Provisioner(msg) => { + if let Some(ref mut s) = self.provisioner_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::PoVDistribution(msg) => { + if let Some(ref mut s) = self.pov_distribution_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::RuntimeApi(msg) => { + if let Some(ref mut s) = self.runtime_api_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::AvailabilityStore(msg) => { + if let Some(ref mut s) = self.availability_store_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::NetworkBridge(msg) => { + if let Some(ref mut s) = self.network_bridge_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::ChainApi(msg) => { + if let Some(ref mut s) = self.chain_api_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::CollationGeneration(msg) => { + if let Some(ref mut s) = self.collation_generation_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + AllMessages::CollatorProtocol(msg) => { + if let Some(ref mut s) = self.collator_protocol_subsystem.instance { + let _ = s.tx.send(FromOverseer::Communication { msg }).await; + } + } + } + } + + fn on_head_activated(&mut self, hash: &Hash) { + self.metrics.on_head_activated(); + if let Some(listeners) = self.activation_external_listeners.remove(hash) { + for listener in listeners { + // it's fine if the listener is no longer interested + let _ = listener.send(()); + } } } - fn spawn_job(&mut self, j: BoxFuture<'static, ()>) -> SubsystemResult<()> { - self.s.spawn(j).map_err(|_| SubsystemError) + fn on_head_deactivated(&mut self, hash: &Hash) { + self.metrics.on_head_deactivated(); + if let Some(listeners) = self.activation_external_listeners.remove(hash) { + // clean up and signal to listeners the block is deactivated + drop(listeners); + } + } + + fn clean_up_external_listeners(&mut self) { + self.activation_external_listeners.retain(|_, v| { + // remove dead listeners + v.retain(|c| !c.is_canceled()); + !v.is_empty() + }) + } + + fn handle_external_request(&mut self, request: ExternalRequest) { + match request { + ExternalRequest::WaitForActivation { hash, response_channel } => { + if self.active_leaves.get(&hash).is_some() { + // it's fine if the listener is no longer interested + let _ = response_channel.send(()); + } else { + self.activation_external_listeners.entry(hash).or_default().push(response_channel); + } + } + } + } + + fn spawn_job(&mut self, name: &'static str, j: BoxFuture<'static, ()>) { + self.s.spawn(name, j); + } + + fn spawn_blocking_job(&mut self, name: &'static str, j: BoxFuture<'static, ()>) { + self.s.spawn_blocking(name, j); } } -fn spawn( +fn spawn( spawner: &mut S, - futures: &mut FuturesUnordered>, + futures: &mut FuturesUnordered>, streams: &mut StreamUnordered>, - mut s: Box + Send>, + s: impl Subsystem>, ) -> SubsystemResult> { let (to_tx, to_rx) = mpsc::channel(CHANNEL_CAPACITY); let (from_tx, from_rx) = mpsc::channel(CHANNEL_CAPACITY); - let ctx = SubsystemContext::new(to_rx, from_tx); - let f = s.start(ctx); + let ctx = OverseerSubsystemContext { rx: to_rx, tx: from_tx }; + let SpawnedSubsystem { future, name } = s.start(ctx); + + let (tx, rx) = oneshot::channel(); + + let fut = Box::pin(async move { + future.await; + let _ = tx.send(()); + }); - let handle = spawner.spawn_with_handle(f.0)?; + spawner.spawn(name, fut); streams.push(from_rx); - futures.push(handle); + futures.push(Box::pin(rx.map(|_| ()))); let instance = Some(SubsystemInstance { tx: to_tx, }); Ok(OverseenSubsystem { - subsystem: s, instance, }) } + #[cfg(test)] mod tests { + use std::sync::atomic; use futures::{executor, pin_mut, select, channel::mpsc, FutureExt}; - use polkadot_primitives::parachain::{BlockData, PoVBlock}; + use polkadot_primitives::v1::{BlockData, CollatorPair, PoV}; + use polkadot_subsystem::DummySubsystem; + use polkadot_subsystem::messages::RuntimeApiRequest; + use polkadot_node_primitives::{Collation, CollationGenerationConfig}; + use polkadot_node_network_protocol::{PeerId, ReputationChange, NetworkBridgeEvent}; + + use sp_core::crypto::Pair as _; + use super::*; + struct TestSubsystem1(mpsc::Sender); - impl Subsystem for TestSubsystem1 { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { - let mut sender = self.0.clone(); - SpawnedSubsystem(Box::pin(async move { - let mut i = 0; - loop { - match ctx.recv().await { - Ok(FromOverseer::Communication { .. }) => { - let _ = sender.send(i).await; - i += 1; - continue; + impl Subsystem for TestSubsystem1 + where C: SubsystemContext + { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { + let mut sender = self.0; + SpawnedSubsystem { + name: "test-subsystem-1", + future: Box::pin(async move { + let mut i = 0; + loop { + match ctx.recv().await { + Ok(FromOverseer::Communication { .. }) => { + let _ = sender.send(i).await; + i += 1; + continue; + } + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, + Err(_) => return, + _ => (), } - Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, - Err(_) => return, - _ => (), } - } - })) + }), + } } } struct TestSubsystem2(mpsc::Sender); - impl Subsystem for TestSubsystem2 { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { - let mut c: usize = 0; - loop { - if c < 10 { - let (tx, _) = oneshot::channel(); - ctx.send_msg( - AllMessages::CandidateValidation( - CandidateValidationMessage::Validate( - Default::default(), - Default::default(), - PoVBlock { - block_data: BlockData(Vec::new()), - }, - tx, + impl Subsystem for TestSubsystem2 + where C: SubsystemContext + { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { + let sender = self.0.clone(); + SpawnedSubsystem { + name: "test-subsystem-2", + future: Box::pin(async move { + let _sender = sender; + let mut c: usize = 0; + loop { + if c < 10 { + let (tx, _) = oneshot::channel(); + ctx.send_message( + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + Default::default(), + PoV { + block_data: BlockData(Vec::new()), + }.into(), + tx, + ) ) - ) - ).await.unwrap(); - c += 1; - continue; - } - match ctx.try_recv().await { - Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => { - break; - } - Ok(Some(_)) => { + ).await.unwrap(); + c += 1; continue; } - Err(_) => return, - _ => (), + match ctx.try_recv().await { + Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => { + break; + } + Ok(Some(_)) => { + continue; + } + Err(_) => return, + _ => (), + } + pending!(); } - pending!(); - } - })) + }), + } } } struct TestSubsystem4; - impl Subsystem for TestSubsystem4 { - fn start(&mut self, mut _ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { - // Do nothing and exit. - })) + impl Subsystem for TestSubsystem4 + where C: SubsystemContext + { + type Metrics = (); + + fn start(self, mut _ctx: C) -> SpawnedSubsystem { + SpawnedSubsystem { + name: "test-subsystem-4", + future: Box::pin(async move { + // Do nothing and exit. + }), + } } } + // Checks that a minimal configuration of two jobs can run and exchange messages. #[test] fn overseer_works() { - let spawner = executor::ThreadPool::new().unwrap(); + let spawner = sp_core::testing::TaskExecutor::new(); executor::block_on(async move { let (s1_tx, mut s1_rx) = mpsc::channel(64); let (s2_tx, mut s2_rx) = mpsc::channel(64); + let all_subsystems = AllSubsystems { + candidate_validation: TestSubsystem1(s1_tx), + candidate_backing: TestSubsystem2(s2_tx), + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; let (overseer, mut handler) = Overseer::new( vec![], - Box::new(TestSubsystem1(s1_tx)), - Box::new(TestSubsystem2(s2_tx)), + all_subsystems, + None, spawner, ).unwrap(); let overseer_fut = overseer.run().fuse(); @@ -844,19 +1387,115 @@ mod tests { }); } + // Checks activated/deactivated metrics are updated properly. + #[test] + fn overseer_metrics_work() { + let spawner = sp_core::testing::TaskExecutor::new(); + + executor::block_on(async move { + let first_block_hash = [1; 32].into(); + let second_block_hash = [2; 32].into(); + let third_block_hash = [3; 32].into(); + + let first_block = BlockInfo { + hash: first_block_hash, + parent_hash: [0; 32].into(), + number: 1, + }; + let second_block = BlockInfo { + hash: second_block_hash, + parent_hash: first_block_hash, + number: 2, + }; + let third_block = BlockInfo { + hash: third_block_hash, + parent_hash: second_block_hash, + number: 3, + }; + + let all_subsystems = AllSubsystems { + collation_generation: DummySubsystem, + candidate_validation: DummySubsystem, + candidate_backing: DummySubsystem, + candidate_selection: DummySubsystem, + collator_protocol: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + }; + let registry = prometheus::Registry::new(); + let (overseer, mut handler) = Overseer::new( + vec![first_block], + all_subsystems, + Some(®istry), + spawner, + ).unwrap(); + let overseer_fut = overseer.run().fuse(); + + pin_mut!(overseer_fut); + + handler.block_imported(second_block).await.unwrap(); + handler.block_imported(third_block).await.unwrap(); + handler.stop().await.unwrap(); + + select! { + res = overseer_fut => { + assert!(res.is_ok()); + let (activated, deactivated) = extract_metrics(®istry); + assert_eq!(activated, 3); + assert_eq!(deactivated, 2); + }, + complete => (), + } + }); + } + + fn extract_metrics(registry: &prometheus::Registry) -> (u64, u64) { + let gather = registry.gather(); + assert_eq!(gather[0].get_name(), "parachain_activated_heads_total"); + assert_eq!(gather[1].get_name(), "parachain_deactivated_heads_total"); + let activated = gather[0].get_metric()[0].get_counter().get_value() as u64; + let deactivated = gather[1].get_metric()[0].get_counter().get_value() as u64; + (activated, deactivated) + } + // Spawn a subsystem that immediately exits. // // Should immediately conclude the overseer itself with an error. #[test] fn overseer_panics_on_subsystem_exit() { - let spawner = executor::ThreadPool::new().unwrap(); + let spawner = sp_core::testing::TaskExecutor::new(); executor::block_on(async move { let (s1_tx, _) = mpsc::channel(64); + let all_subsystems = AllSubsystems { + candidate_validation: TestSubsystem1(s1_tx), + candidate_backing: TestSubsystem4, + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; let (overseer, _handle) = Overseer::new( vec![], - Box::new(TestSubsystem1(s1_tx)), - Box::new(TestSubsystem4), + all_subsystems, + None, spawner, ).unwrap(); let overseer_fut = overseer.run().fuse(); @@ -871,49 +1510,63 @@ mod tests { struct TestSubsystem5(mpsc::Sender); - impl Subsystem for TestSubsystem5 { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { + impl Subsystem for TestSubsystem5 + where C: SubsystemContext + { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { let mut sender = self.0.clone(); - SpawnedSubsystem(Box::pin(async move { - loop { - match ctx.try_recv().await { - Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => break, - Ok(Some(FromOverseer::Signal(s))) => { - sender.send(s).await.unwrap(); - continue; - }, - Ok(Some(_)) => continue, - Err(_) => return, - _ => (), + SpawnedSubsystem { + name: "test-subsystem-5", + future: Box::pin(async move { + loop { + match ctx.try_recv().await { + Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => break, + Ok(Some(FromOverseer::Signal(s))) => { + sender.send(s).await.unwrap(); + continue; + }, + Ok(Some(_)) => continue, + Err(_) => return, + _ => (), + } + pending!(); } - pending!(); - } - })) + }), + } } } struct TestSubsystem6(mpsc::Sender); - impl Subsystem for TestSubsystem6 { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { + impl Subsystem for TestSubsystem6 + where C: SubsystemContext + { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { let mut sender = self.0.clone(); - SpawnedSubsystem(Box::pin(async move { - loop { - match ctx.try_recv().await { - Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => break, - Ok(Some(FromOverseer::Signal(s))) => { - sender.send(s).await.unwrap(); - continue; - }, - Ok(Some(_)) => continue, - Err(_) => return, - _ => (), + SpawnedSubsystem { + name: "test-subsystem-6", + future: Box::pin(async move { + loop { + match ctx.try_recv().await { + Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => break, + Ok(Some(FromOverseer::Signal(s))) => { + sender.send(s).await.unwrap(); + continue; + }, + Ok(Some(_)) => continue, + Err(_) => return, + _ => (), + } + pending!(); } - pending!(); - } - })) + }), + } } } @@ -921,7 +1574,7 @@ mod tests { // notifications on imported blocks triggers expected `StartWork` and `StopWork` heartbeats. #[test] fn overseer_start_stop_works() { - let spawner = executor::ThreadPool::new().unwrap(); + let spawner = sp_core::testing::TaskExecutor::new(); executor::block_on(async move { let first_block_hash = [1; 32].into(); @@ -946,11 +1599,27 @@ mod tests { let (tx_5, mut rx_5) = mpsc::channel(64); let (tx_6, mut rx_6) = mpsc::channel(64); - + let all_subsystems = AllSubsystems { + candidate_validation: TestSubsystem5(tx_5), + candidate_backing: TestSubsystem6(tx_6), + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; let (overseer, mut handler) = Overseer::new( vec![first_block], - Box::new(TestSubsystem5(tx_5)), - Box::new(TestSubsystem6(tx_6)), + all_subsystems, + None, spawner, ).unwrap(); @@ -964,11 +1633,15 @@ mod tests { handler.block_imported(third_block).await.unwrap(); let expected_heartbeats = vec![ - OverseerSignal::StartWork(first_block_hash), - OverseerSignal::StopWork(first_block_hash), - OverseerSignal::StartWork(second_block_hash), - OverseerSignal::StopWork(second_block_hash), - OverseerSignal::StartWork(third_block_hash), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(first_block_hash)), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: [second_block_hash].as_ref().into(), + deactivated: [first_block_hash].as_ref().into(), + }), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: [third_block_hash].as_ref().into(), + deactivated: [second_block_hash].as_ref().into(), + }), ]; loop { @@ -1005,7 +1678,7 @@ mod tests { // notifications on imported blocks triggers expected `StartWork` and `StopWork` heartbeats. #[test] fn overseer_finalize_works() { - let spawner = executor::ThreadPool::new().unwrap(); + let spawner = sp_core::testing::TaskExecutor::new(); executor::block_on(async move { let first_block_hash = [1; 32].into(); @@ -1031,11 +1704,28 @@ mod tests { let (tx_5, mut rx_5) = mpsc::channel(64); let (tx_6, mut rx_6) = mpsc::channel(64); + let all_subsystems = AllSubsystems { + candidate_validation: TestSubsystem5(tx_5), + candidate_backing: TestSubsystem6(tx_6), + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; // start with two forks of different height. let (overseer, mut handler) = Overseer::new( vec![first_block, second_block], - Box::new(TestSubsystem5(tx_5)), - Box::new(TestSubsystem6(tx_6)), + all_subsystems, + None, spawner, ).unwrap(); @@ -1049,10 +1739,15 @@ mod tests { handler.block_finalized(third_block).await.unwrap(); let expected_heartbeats = vec![ - OverseerSignal::StartWork(first_block_hash), - OverseerSignal::StartWork(second_block_hash), - OverseerSignal::StopWork(first_block_hash), - OverseerSignal::StopWork(second_block_hash), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: [first_block_hash, second_block_hash].as_ref().into(), + ..Default::default() + }), + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + deactivated: [first_block_hash, second_block_hash].as_ref().into(), + ..Default::default() + }), + OverseerSignal::BlockFinalized(third_block_hash), ]; loop { @@ -1091,4 +1786,230 @@ mod tests { } }); } + + #[derive(Clone)] + struct CounterSubsystem { + stop_signals_received: Arc, + signals_received: Arc, + msgs_received: Arc, + } + + impl CounterSubsystem { + fn new( + stop_signals_received: Arc, + signals_received: Arc, + msgs_received: Arc, + ) -> Self { + Self { + stop_signals_received, + signals_received, + msgs_received, + } + } + } + + impl Subsystem for CounterSubsystem + where + C: SubsystemContext, + M: Send, + { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { + SpawnedSubsystem { + name: "counter-subsystem", + future: Box::pin(async move { + loop { + match ctx.try_recv().await { + Ok(Some(FromOverseer::Signal(OverseerSignal::Conclude))) => { + self.stop_signals_received.fetch_add(1, atomic::Ordering::SeqCst); + break; + }, + Ok(Some(FromOverseer::Signal(_))) => { + self.signals_received.fetch_add(1, atomic::Ordering::SeqCst); + continue; + }, + Ok(Some(FromOverseer::Communication { .. })) => { + self.msgs_received.fetch_add(1, atomic::Ordering::SeqCst); + continue; + }, + Err(_) => (), + _ => (), + } + pending!(); + } + }), + } + } + } + + fn test_candidate_validation_msg() -> CandidateValidationMessage { + let (sender, _) = oneshot::channel(); + let pov = Arc::new(PoV { block_data: BlockData(Vec::new()) }); + CandidateValidationMessage::ValidateFromChainState(Default::default(), pov, sender) + } + + fn test_candidate_backing_msg() -> CandidateBackingMessage { + let (sender, _) = oneshot::channel(); + CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) + } + + fn test_candidate_selection_msg() -> CandidateSelectionMessage { + CandidateSelectionMessage::default() + } + + fn test_chain_api_msg() -> ChainApiMessage { + let (sender, _) = oneshot::channel(); + ChainApiMessage::FinalizedBlockNumber(sender) + } + + fn test_collator_generation_msg() -> CollationGenerationMessage { + CollationGenerationMessage::Initialize(CollationGenerationConfig { + key: CollatorPair::generate().0, + collator: Box::new(|_| Box::new(TestCollator)), + para_id: Default::default(), + }) + } + struct TestCollator; + + impl Future for TestCollator { + type Output = Collation; + + fn poll(self: Pin<&mut Self>, _cx: &mut futures::task::Context) -> Poll { + panic!("at the Disco") + } + } + + impl Unpin for TestCollator {} + + fn test_collator_protocol_msg() -> CollatorProtocolMessage { + CollatorProtocolMessage::CollateOn(Default::default()) + } + + fn test_network_bridge_event() -> NetworkBridgeEvent { + NetworkBridgeEvent::PeerDisconnected(PeerId::random()) + } + + fn test_statement_distribution_msg() -> StatementDistributionMessage { + StatementDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event()) + } + + fn test_availability_distribution_msg() -> AvailabilityDistributionMessage { + AvailabilityDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event()) + } + + fn test_bitfield_distribution_msg() -> BitfieldDistributionMessage { + BitfieldDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event()) + } + + fn test_provisioner_msg() -> ProvisionerMessage { + let (sender, _) = oneshot::channel(); + ProvisionerMessage::RequestInherentData(Default::default(), sender) + } + + fn test_pov_distribution_msg() -> PoVDistributionMessage { + PoVDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event()) + } + + fn test_runtime_api_msg() -> RuntimeApiMessage { + let (sender, _) = oneshot::channel(); + RuntimeApiMessage::Request(Default::default(), RuntimeApiRequest::Validators(sender)) + } + + fn test_availability_store_msg() -> AvailabilityStoreMessage { + let (sender, _) = oneshot::channel(); + AvailabilityStoreMessage::QueryAvailableData(Default::default(), sender) + } + + fn test_network_bridge_msg() -> NetworkBridgeMessage { + NetworkBridgeMessage::ReportPeer(PeerId::random(), ReputationChange::new(42, "")) + } + + // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. + #[test] + fn overseer_all_subsystems_receive_signals_and_messages() { + let spawner = sp_core::testing::TaskExecutor::new(); + + executor::block_on(async move { + let stop_signals_received = Arc::new(atomic::AtomicUsize::new(0)); + let signals_received = Arc::new(atomic::AtomicUsize::new(0)); + let msgs_received = Arc::new(atomic::AtomicUsize::new(0)); + + let subsystem = CounterSubsystem::new( + stop_signals_received.clone(), + signals_received.clone(), + msgs_received.clone(), + ); + + let all_subsystems = AllSubsystems { + candidate_validation: subsystem.clone(), + candidate_backing: subsystem.clone(), + candidate_selection: subsystem.clone(), + collation_generation: subsystem.clone(), + collator_protocol: subsystem.clone(), + statement_distribution: subsystem.clone(), + availability_distribution: subsystem.clone(), + bitfield_signing: subsystem.clone(), + bitfield_distribution: subsystem.clone(), + provisioner: subsystem.clone(), + pov_distribution: subsystem.clone(), + runtime_api: subsystem.clone(), + availability_store: subsystem.clone(), + network_bridge: subsystem.clone(), + chain_api: subsystem.clone(), + }; + let (overseer, mut handler) = Overseer::new( + vec![], + all_subsystems, + None, + spawner, + ).unwrap(); + let overseer_fut = overseer.run().fuse(); + + pin_mut!(overseer_fut); + + // send a signal to each subsystem + handler.block_imported(BlockInfo { + hash: Default::default(), + parent_hash: Default::default(), + number: Default::default(), + }).await.unwrap(); + + // send a msg to each subsystem + // except for BitfieldSigning as the message is not instantiable + handler.send_msg(AllMessages::CandidateValidation(test_candidate_validation_msg())).await.unwrap(); + handler.send_msg(AllMessages::CandidateBacking(test_candidate_backing_msg())).await.unwrap(); + handler.send_msg(AllMessages::CandidateSelection(test_candidate_selection_msg())).await.unwrap(); + handler.send_msg(AllMessages::CollationGeneration(test_collator_generation_msg())).await.unwrap(); + handler.send_msg(AllMessages::CollatorProtocol(test_collator_protocol_msg())).await.unwrap(); + handler.send_msg(AllMessages::StatementDistribution(test_statement_distribution_msg())).await.unwrap(); + handler.send_msg(AllMessages::AvailabilityDistribution(test_availability_distribution_msg())).await.unwrap(); + // handler.send_msg(AllMessages::BitfieldSigning(test_bitfield_signing_msg())).await.unwrap(); + handler.send_msg(AllMessages::BitfieldDistribution(test_bitfield_distribution_msg())).await.unwrap(); + handler.send_msg(AllMessages::Provisioner(test_provisioner_msg())).await.unwrap(); + handler.send_msg(AllMessages::PoVDistribution(test_pov_distribution_msg())).await.unwrap(); + handler.send_msg(AllMessages::RuntimeApi(test_runtime_api_msg())).await.unwrap(); + handler.send_msg(AllMessages::AvailabilityStore(test_availability_store_msg())).await.unwrap(); + handler.send_msg(AllMessages::NetworkBridge(test_network_bridge_msg())).await.unwrap(); + handler.send_msg(AllMessages::ChainApi(test_chain_api_msg())).await.unwrap(); + + // send a stop signal to each subsystems + handler.stop().await.unwrap(); + + select! { + res = overseer_fut => { + const NUM_SUBSYSTEMS: usize = 15; + + assert_eq!(stop_signals_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS); + // x2 because of broadcast_signal on startup + assert_eq!(signals_received.load(atomic::Ordering::SeqCst), 2 * NUM_SUBSYSTEMS); + // -1 for BitfieldSigning + assert_eq!(msgs_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS - 1); + + assert!(res.is_ok()); + }, + complete => (), + } + }); + } } diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index f317565b2e99ddd0b9a47671974f40c12147a262..81e2467b374fd0dbeac1715a6aea7a875e7969c4 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -6,7 +6,9 @@ edition = "2018" description = "Primitives types for the Node-side" [dependencies] +futures = "0.3.5" polkadot-primitives = { path = "../../primitives" } polkadot-statement-table = { path = "../../statement-table" } -parity-scale-codec = { version = "1.3.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "1.3.4", default-features = false, features = ["derive"] } runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index bd43748ab24a0e8b7dce38ad43ba86bce8c4ad53..df8bc22551da479cd8a285b9e7c8c9db0a0fb7a5 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -20,20 +20,35 @@ //! not shared between the node and the runtime. This crate builds on top of the primitives defined //! there. +use futures::Future; use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::{Hash, - parachain::{ - AbridgedCandidateReceipt, CandidateReceipt, CompactStatement, - EncodeAs, Signed, - } +use polkadot_primitives::v1::{ + Hash, CommittedCandidateReceipt, CandidateReceipt, CompactStatement, + EncodeAs, Signed, SigningContext, ValidatorIndex, ValidatorId, + UpwardMessage, Balance, ValidationCode, PersistedValidationData, ValidationData, + HeadData, PoV, CollatorPair, Id as ParaId, +}; +use polkadot_statement_table::{ + generic::{ + ValidityDoubleVote as TableValidityDoubleVote, + MultipleCandidates as TableMultipleCandidates, + }, + v1::Misbehavior as TableMisbehavior, }; +pub use sp_core::traits::SpawnNamed; + /// A statement, where the candidate receipt is included in the `Seconded` variant. -#[derive(Debug, Clone, PartialEq, Encode, Decode)] +/// +/// This is the committed candidate receipt instead of the bare candidate receipt. As such, +/// it gives access to the commitments to validators who have not executed the candidate. This +/// is necessary to allow a block-producing validator to include candidates from outside of the para +/// it is assigned to. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub enum Statement { /// A statement that a validator seconds a candidate. #[codec(index = "1")] - Seconded(AbridgedCandidateReceipt), + Seconded(CommittedCandidateReceipt), /// A statement that a validator has deemed a candidate valid. #[codec(index = "2")] Valid(Hash), @@ -42,16 +57,21 @@ pub enum Statement { Invalid(Hash), } +impl Statement { + /// Transform this statement into its compact version, which references only the hash + /// of the candidate. + pub fn to_compact(&self) -> CompactStatement { + match *self { + Statement::Seconded(ref c) => CompactStatement::Candidate(c.hash()), + Statement::Valid(hash) => CompactStatement::Valid(hash), + Statement::Invalid(hash) => CompactStatement::Invalid(hash), + } + } +} + impl EncodeAs for Statement { fn encode_as(&self) -> Vec { - let statement = match *self { - Statement::Seconded(ref c) => { - polkadot_primitives::parachain::CompactStatement::Candidate(c.hash()) - } - Statement::Valid(hash) => polkadot_primitives::parachain::CompactStatement::Valid(hash), - Statement::Invalid(hash) => polkadot_primitives::parachain::CompactStatement::Invalid(hash), - }; - statement.encode() + self.to_compact().encode() } } @@ -64,6 +84,7 @@ impl EncodeAs for Statement { pub type SignedFullStatement = Signed; /// A misbehaviour report. +#[derive(Debug, Clone)] pub enum MisbehaviorReport { /// These validator nodes disagree on this candidate's validity, please figure it out /// @@ -77,5 +98,200 @@ pub enum MisbehaviorReport { /// I've noticed a peer contradicting itself about a particular candidate SelfContradiction(CandidateReceipt, SignedFullStatement, SignedFullStatement), /// This peer has seconded more than one parachain candidate for this relay parent head - DoubleVote(CandidateReceipt, SignedFullStatement, SignedFullStatement), + DoubleVote(SignedFullStatement, SignedFullStatement), +} + +/// A utility struct used to convert `TableMisbehavior` to `MisbehaviorReport`s. +pub struct FromTableMisbehavior { + /// Index of the validator. + pub id: ValidatorIndex, + /// The misbehavior reported by the table. + pub report: TableMisbehavior, + /// Signing context. + pub signing_context: SigningContext, + /// Misbehaving validator's public key. + pub key: ValidatorId, +} + +/// Outputs of validating a candidate. +#[derive(Debug)] +pub struct ValidationOutputs { + /// The head-data produced by validation. + pub head_data: HeadData, + /// The persisted validation data. + pub validation_data: PersistedValidationData, + /// Upward messages to the relay chain. + pub upward_messages: Vec, + /// Fees paid to the validators of the relay-chain. + pub fees: Balance, + /// The new validation code submitted by the execution, if any. + pub new_validation_code: Option, +} + +/// Candidate invalidity details +#[derive(Debug)] +pub enum InvalidCandidate { + /// Failed to execute.`validate_block`. This includes function panicking. + ExecutionError(String), + /// Execution timeout. + Timeout, + /// Validation input is over the limit. + ParamsTooLarge(u64), + /// Code size is over the limit. + CodeTooLarge(u64), + /// Validation function returned invalid data. + BadReturn, + /// Invalid relay chain parent. + BadParent, + /// POV hash does not match. + HashMismatch, + /// Bad collator signature. + BadSignature, + /// Output code is too large + NewCodeTooLarge(u64), + /// Head-data is over the limit. + HeadDataTooLarge(u64), + /// Code upgrade triggered but not allowed. + CodeUpgradeNotAllowed, +} + +/// Result of the validation of the candidate. +#[derive(Debug)] +pub enum ValidationResult { + /// Candidate is valid. The validation process yields these outputs. + Valid(ValidationOutputs), + /// Candidate is invalid. + Invalid(InvalidCandidate), +} + +impl std::convert::TryFrom for MisbehaviorReport { + type Error = (); + + fn try_from(f: FromTableMisbehavior) -> Result { + match f.report { + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::IssuedAndValidity((c, s1), (d, s2)) + ) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new( + Statement::Seconded(c), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Valid(d), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::SelfContradiction(receipt.to_plain(), signed_1, signed_2)) + } + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::IssuedAndInvalidity((c, s1), (d, s2)) + ) => { + let receipt = c.clone(); + let signed_1 = SignedFullStatement::new( + Statement::Seconded(c), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Invalid(d), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::SelfContradiction(receipt.to_plain(), signed_1, signed_2)) + } + TableMisbehavior::ValidityDoubleVote( + TableValidityDoubleVote::ValidityAndInvalidity(c, s1, s2) + ) => { + let signed_1 = SignedFullStatement::new( + Statement::Valid(c.hash()), + f.id, + s1, + &f.signing_context, + &f.key, + ).ok_or(())?; + let signed_2 = SignedFullStatement::new( + Statement::Invalid(c.hash()), + f.id, + s2, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::SelfContradiction(c.to_plain(), signed_1, signed_2)) + } + TableMisbehavior::MultipleCandidates( + TableMultipleCandidates { + first, + second, + } + ) => { + let signed_1 = SignedFullStatement::new( + Statement::Seconded(first.0), + f.id, + first.1, + &f.signing_context, + &f.key, + ).ok_or(())?; + + let signed_2 = SignedFullStatement::new( + Statement::Seconded(second.0), + f.id, + second.1, + &f.signing_context, + &f.key, + ).ok_or(())?; + + Ok(MisbehaviorReport::DoubleVote(signed_1, signed_2)) + } + _ => Err(()), + } + } +} + +/// The output of a collator. +/// +/// This differs from `CandidateCommitments` in two ways: +/// +/// - does not contain the erasure root; that's computed at the Polkadot level, not at Cumulus +/// - contains a proof of validity. +#[derive(Clone, Encode, Decode)] +pub struct Collation { + /// Fees paid from the chain to the relay chain validators. + pub fees: Balance, + /// Messages destined to be interpreted by the Relay chain itself. + pub upward_messages: Vec, + /// New validation code. + pub new_validation_code: Option, + /// The head-data produced as a result of execution. + pub head_data: HeadData, + /// Proof that this block is valid. + pub proof_of_validity: PoV, +} + +/// Configuration for the collation generator +pub struct CollationGenerationConfig { + /// Collator's authentication key, so it can sign things. + pub key: CollatorPair, + /// Collation function. + pub collator: Box Box + Unpin + Send> + Send + Sync>, + /// The parachain that this collator collates for + pub para_id: ParaId, +} + +impl std::fmt::Debug for CollationGenerationConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "CollationGenerationConfig {{ ... }}") + } } diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 74069f0233afa8d73e79e2f3bf3ed8cba2c1cf9b..cf2dc0549a20b2d7d7e93f71e5461162e79944dc 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -15,10 +15,11 @@ hex-literal = "0.2.1" polkadot-primitives = { path = "../../primitives" } polkadot-runtime = { path = "../../runtime/polkadot" } polkadot-overseer = { path = "../overseer" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } kusama-runtime = { path = "../../runtime/kusama" } westend-runtime = { path = "../../runtime/westend" } -polkadot-network = { path = "../../network", optional = true } polkadot-rpc = { path = "../../rpc" } +polkadot-node-core-proposer = { path = "../core/proposer" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -30,6 +31,7 @@ sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "mas sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } @@ -42,20 +44,19 @@ sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master" } -im-online = { package = "pallet-im-online", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master" } authority-discovery = { package = "sc-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } [dev-dependencies] polkadot-test-runtime-client = { path = "../../runtime/test-runtime/client" } diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 0659d08083016a953407ad4aa7c532ff4898d76c..6a21478e2f21e7769aeeba1988cde4648d1df517 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -17,7 +17,7 @@ //! Polkadot chain configurations. use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; -use polkadot_primitives::{AccountId, AccountPublic, parachain::ValidatorId}; +use polkadot_primitives::v1::{AccountId, AccountPublic, ValidatorId}; use polkadot_runtime as polkadot; use kusama_runtime as kusama; use westend_runtime as westend; @@ -31,7 +31,7 @@ use telemetry::TelemetryEndpoints; use hex_literal::hex; use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; -use im_online::sr25519::{AuthorityId as ImOnlineId}; +use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use pallet_staking::Forcing; @@ -48,9 +48,9 @@ const DEFAULT_PROTOCOL_ID: &str = "dot"; #[serde(rename_all = "camelCase")] pub struct Extensions { /// Block numbers with known hashes. - pub fork_blocks: sc_client_api::ForkBlocks, + pub fork_blocks: sc_client_api::ForkBlocks, /// Known bad block hashes. - pub bad_blocks: sc_client_api::BadBlocks, + pub bad_blocks: sc_client_api::BadBlocks, } /// The `ChainSpec parametrised for polkadot runtime`. @@ -113,7 +113,7 @@ fn westend_session_keys( westend::SessionKeys { babe, grandpa, im_online, parachain_validator, authority_discovery } } -fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { +fn polkadot_staging_testnet_config_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![]; @@ -131,27 +131,27 @@ fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { const STASH: u128 = 100 * DOTS; polkadot::GenesisConfig { - system: Some(polkadot::SystemConfig { - code: polkadot::WASM_BINARY.to_vec(), + frame_system: Some(polkadot::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(polkadot::BalancesConfig { + pallet_balances: Some(polkadot::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(polkadot::IndicesConfig { + pallet_indices: Some(polkadot::IndicesConfig { indices: vec![], }), - session: Some(polkadot::SessionConfig { + pallet_session: Some(polkadot::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), polkadot_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(polkadot::StakingConfig { + pallet_staking: Some(polkadot::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -163,44 +163,34 @@ fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(Default::default()), - collective_Instance1: Some(polkadot::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(Default::default()), + pallet_collective_Instance1: Some(polkadot::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(polkadot::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(polkadot::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(polkadot::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(polkadot::VestingConfig { + pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![], }), - sudo: Some(polkadot::SudoConfig { - key: endowed_accounts[0].clone(), - }), } } -fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { +fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![ // 5ENpP27BrVdJTdUfY6djmcw3d3xEJ6NzSUU52CCPmGpMrdEY @@ -286,27 +276,27 @@ fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { const STASH: u128 = 100 * WND; westend::GenesisConfig { - system: Some(westend::SystemConfig { - code: westend::WASM_BINARY.to_vec(), + frame_system: Some(westend::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(westend::BalancesConfig { + pallet_balances: Some(westend::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(westend::IndicesConfig { + pallet_indices: Some(westend::IndicesConfig { indices: vec![], }), - session: Some(westend::SessionConfig { + pallet_session: Some(westend::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), westend_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(westend::StakingConfig { + pallet_staking: Some(westend::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -318,29 +308,22 @@ fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(westend::AuthorityDiscoveryConfig { + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(westend::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(westend::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(westend::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), - vesting: Some(westend::VestingConfig { + pallet_vesting: Some(westend::VestingConfig { vesting: vec![], }), - sudo: Some(westend::SudoConfig { + pallet_sudo: Some(westend::SudoConfig { key: endowed_accounts[0].clone(), }), } } -fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { +fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![ // 5CVFESwfkk7NmhQ6FwHCM9roBvr9BGa4vJHFYU8DnGQxrXvz @@ -426,27 +409,27 @@ fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { const STASH: u128 = 100 * KSM; kusama::GenesisConfig { - system: Some(kusama::SystemConfig { - code: kusama::WASM_BINARY.to_vec(), + frame_system: Some(kusama::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(kusama::BalancesConfig { + pallet_balances: Some(kusama::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(kusama::IndicesConfig { + pallet_indices: Some(kusama::IndicesConfig { indices: vec![], }), - session: Some(kusama::SessionConfig { + pallet_session: Some(kusama::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), kusama_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(kusama::StakingConfig { + pallet_staking: Some(kusama::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -458,89 +441,88 @@ fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(Default::default()), - collective_Instance1: Some(kusama::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(Default::default()), + pallet_collective_Instance1: Some(kusama::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(kusama::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(kusama::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(kusama::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(kusama::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(kusama::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(kusama::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(kusama::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(kusama::VestingConfig { + pallet_vesting: Some(kusama::VestingConfig { vesting: vec![], }), } } /// Polkadot staging testnet config. -pub fn polkadot_staging_testnet_config() -> PolkadotChainSpec { +pub fn polkadot_staging_testnet_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; let boot_nodes = vec![]; - PolkadotChainSpec::from_genesis( + + Ok(PolkadotChainSpec::from_genesis( "Polkadot Staging Testnet", "polkadot_staging_testnet", ChainType::Live, - polkadot_staging_testnet_config_genesis, + move || polkadot_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(POLKADOT_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Polkadot Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Staging testnet config. -pub fn kusama_staging_testnet_config() -> KusamaChainSpec { +pub fn kusama_staging_testnet_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; let boot_nodes = vec![]; - KusamaChainSpec::from_genesis( + + Ok(KusamaChainSpec::from_genesis( "Kusama Staging Testnet", "kusama_staging_testnet", ChainType::Live, - kusama_staging_testnet_config_genesis, + move || kusama_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(KUSAMA_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Kusama Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Westend staging testnet config. -pub fn westend_staging_testnet_config() -> WestendChainSpec { +pub fn westend_staging_testnet_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; let boot_nodes = vec![]; - WestendChainSpec::from_genesis( + + Ok(WestendChainSpec::from_genesis( "Westend Staging Testnet", "westend_staging_testnet", ChainType::Live, - westend_staging_testnet_config_genesis, + move || westend_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(WESTEND_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Westend Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Helper function to generate a crypto pair from seed @@ -598,8 +580,9 @@ fn testnet_accounts() -> Vec { /// Helper function to create polkadot GenesisConfig for testing pub fn polkadot_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, - root_key: AccountId, + _root_key: AccountId, endowed_accounts: Option>, ) -> polkadot::GenesisConfig { let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); @@ -608,24 +591,24 @@ pub fn polkadot_testnet_genesis( const STASH: u128 = 100 * DOTS; polkadot::GenesisConfig { - system: Some(polkadot::SystemConfig { - code: polkadot::WASM_BINARY.to_vec(), + frame_system: Some(polkadot::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(polkadot::IndicesConfig { + pallet_indices: Some(polkadot::IndicesConfig { indices: vec![], }), - balances: Some(polkadot::BalancesConfig { + pallet_balances: Some(polkadot::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(polkadot::SessionConfig { + pallet_session: Some(polkadot::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), polkadot_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(polkadot::StakingConfig { + pallet_staking: Some(polkadot::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -636,45 +619,36 @@ pub fn polkadot_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(polkadot::DemocracyConfig::default()), - collective_Instance1: Some(polkadot::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(polkadot::DemocracyConfig::default()), + pallet_collective_Instance1: Some(polkadot::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(polkadot::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(polkadot::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(polkadot::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(polkadot::VestingConfig { + pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![], }), - sudo: Some(polkadot::SudoConfig { - key: root_key, - }), } } /// Helper function to create kusama GenesisConfig for testing pub fn kusama_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, _root_key: AccountId, endowed_accounts: Option>, @@ -685,24 +659,24 @@ pub fn kusama_testnet_genesis( const STASH: u128 = 100 * KSM; kusama::GenesisConfig { - system: Some(kusama::SystemConfig { - code: kusama::WASM_BINARY.to_vec(), + frame_system: Some(kusama::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(kusama::IndicesConfig { + pallet_indices: Some(kusama::IndicesConfig { indices: vec![], }), - balances: Some(kusama::BalancesConfig { + pallet_balances: Some(kusama::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(kusama::SessionConfig { + pallet_session: Some(kusama::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), kusama_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(kusama::StakingConfig { + pallet_staking: Some(kusama::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -713,35 +687,28 @@ pub fn kusama_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(kusama::DemocracyConfig::default()), - collective_Instance1: Some(kusama::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(kusama::DemocracyConfig::default()), + pallet_collective_Instance1: Some(kusama::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(kusama::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(kusama::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(kusama::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(kusama::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(kusama::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(kusama::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(kusama::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(kusama::VestingConfig { + pallet_vesting: Some(kusama::VestingConfig { vesting: vec![], }), } @@ -749,6 +716,7 @@ pub fn kusama_testnet_genesis( /// Helper function to create polkadot GenesisConfig for testing pub fn westend_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, root_key: AccountId, endowed_accounts: Option>, @@ -759,24 +727,24 @@ pub fn westend_testnet_genesis( const STASH: u128 = 100 * DOTS; westend::GenesisConfig { - system: Some(westend::SystemConfig { - code: westend::WASM_BINARY.to_vec(), + frame_system: Some(westend::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(westend::IndicesConfig { + pallet_indices: Some(westend::IndicesConfig { indices: vec![], }), - balances: Some(westend::BalancesConfig { + pallet_balances: Some(westend::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(westend::SessionConfig { + pallet_session: Some(westend::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), westend_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(westend::StakingConfig { + pallet_staking: Some(westend::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -787,30 +755,24 @@ pub fn westend_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(westend::AuthorityDiscoveryConfig { + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(westend::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(westend::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(westend::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), - vesting: Some(westend::VestingConfig { + pallet_vesting: Some(westend::VestingConfig { vesting: vec![], }), - sudo: Some(westend::SudoConfig { + pallet_sudo: Some(westend::SudoConfig { key: root_key, }), } } -fn polkadot_development_config_genesis() -> polkadot::GenesisConfig { +fn polkadot_development_config_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { polkadot_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -819,8 +781,9 @@ fn polkadot_development_config_genesis() -> polkadot::GenesisConfig { ) } -fn kusama_development_config_genesis() -> kusama::GenesisConfig { +fn kusama_development_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { kusama_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -829,8 +792,9 @@ fn kusama_development_config_genesis() -> kusama::GenesisConfig { ) } -fn westend_development_config_genesis() -> westend::GenesisConfig { +fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { westend_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -840,52 +804,59 @@ fn westend_development_config_genesis() -> westend::GenesisConfig { } /// Polkadot development config (single validator Alice) -pub fn polkadot_development_config() -> PolkadotChainSpec { - PolkadotChainSpec::from_genesis( +pub fn polkadot_development_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; + + Ok(PolkadotChainSpec::from_genesis( "Development", "dev", ChainType::Development, - polkadot_development_config_genesis, + move || polkadot_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Kusama development config (single validator Alice) -pub fn kusama_development_config() -> KusamaChainSpec { - KusamaChainSpec::from_genesis( +pub fn kusama_development_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; + + Ok(KusamaChainSpec::from_genesis( "Development", "kusama_dev", ChainType::Development, - kusama_development_config_genesis, + move || kusama_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Westend development config (single validator Alice) -pub fn westend_development_config() -> WestendChainSpec { - WestendChainSpec::from_genesis( +pub fn westend_development_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; + + Ok(WestendChainSpec::from_genesis( "Development", "westend_dev", ChainType::Development, - westend_development_config_genesis, + move || westend_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn polkadot_local_testnet_genesis() -> polkadot::GenesisConfig { +fn polkadot_local_testnet_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { polkadot_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -896,22 +867,25 @@ fn polkadot_local_testnet_genesis() -> polkadot::GenesisConfig { } /// Polkadot local testnet config (multivalidator Alice + Bob) -pub fn polkadot_local_testnet_config() -> PolkadotChainSpec { - PolkadotChainSpec::from_genesis( +pub fn polkadot_local_testnet_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; + + Ok(PolkadotChainSpec::from_genesis( "Local Testnet", "local_testnet", ChainType::Local, - polkadot_local_testnet_genesis, + move || polkadot_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn kusama_local_testnet_genesis() -> kusama::GenesisConfig { +fn kusama_local_testnet_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { kusama_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -922,22 +896,25 @@ fn kusama_local_testnet_genesis() -> kusama::GenesisConfig { } /// Kusama local testnet config (multivalidator Alice + Bob) -pub fn kusama_local_testnet_config() -> KusamaChainSpec { - KusamaChainSpec::from_genesis( +pub fn kusama_local_testnet_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; + + Ok(KusamaChainSpec::from_genesis( "Kusama Local Testnet", "kusama_local_testnet", ChainType::Local, - kusama_local_testnet_genesis, + move || kusama_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn westend_local_testnet_genesis() -> westend::GenesisConfig { +fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { westend_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -948,16 +925,18 @@ fn westend_local_testnet_genesis() -> westend::GenesisConfig { } /// Westend local testnet config (multivalidator Alice + Bob) -pub fn westend_local_testnet_config() -> WestendChainSpec { - WestendChainSpec::from_genesis( +pub fn westend_local_testnet_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; + + Ok(WestendChainSpec::from_genesis( "Westend Local Testnet", "westend_local_testnet", ChainType::Local, - westend_local_testnet_genesis, + move || westend_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } diff --git a/node/service/src/grandpa_support.rs b/node/service/src/grandpa_support.rs index a875c4b45a375c9513f9eef3df4c54c89e2ed76a..666595f1027b296a5a09b66cb700aeebbdaf57e5 100644 --- a/node/service/src/grandpa_support.rs +++ b/node/service/src/grandpa_support.rs @@ -16,7 +16,7 @@ //! Polkadot-specific GRANDPA integration utilities. -use polkadot_primitives::Hash; +use polkadot_primitives::v1::Hash; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A custom GRANDPA voting rule that "pauses" voting (i.e. keeps voting for the @@ -98,7 +98,7 @@ impl grandpa::VotingRule for PauseAfterBlockFor Vec<( grandpa_primitives::SetId, - (Hash, polkadot_primitives::BlockNumber), + (Hash, polkadot_primitives::v1::BlockNumber), grandpa_primitives::AuthorityList, )> { use sp_core::crypto::Ss58Codec; @@ -250,17 +250,21 @@ mod tests { let mut push_blocks = { let mut client = client.clone(); + let mut base = 0; + move |n| { - for _ in 0..n { + for i in 0..n { let mut builder = client.new_block(Default::default()).unwrap(); - for extrinsic in polkadot_test_runtime_client::needed_extrinsics(vec![]) { + for extrinsic in polkadot_test_runtime_client::needed_extrinsics(base + i) { builder.push(extrinsic).unwrap() } let block = builder.build().unwrap().block; client.import(BlockOrigin::Own, block).unwrap(); } + + base += n; } }; diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 620850b3bd641463fea9e02d0702c903d985e89e..9c854525c08cbc9665ccd334ea413ef1fb63a7fe 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -22,22 +22,22 @@ mod client; use std::sync::Arc; use std::time::Duration; -use polkadot_primitives::{parachain, AccountId, Nonce, Balance}; -#[cfg(feature = "full-node")] -use service::{error::Error as ServiceError, ServiceBuilder}; +use polkadot_primitives::v1::{AccountId, Nonce, Balance}; +use service::{error::Error as ServiceError}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use sc_executor::native_executor_instance; use log::info; use sp_blockchain::HeaderBackend; -use polkadot_overseer::{ - self as overseer, - BlockInfo, Overseer, OverseerHandler, Subsystem, SubsystemContext, SpawnedSubsystem, - CandidateValidationMessage, CandidateBackingMessage, -}; +use polkadot_overseer::{self as overseer, AllSubsystems, BlockInfo, Overseer, OverseerHandler}; +use polkadot_subsystem::DummySubsystem; +use polkadot_node_core_proposer::ProposerFactory; +use sp_trie::PrefixedMemoryDB; +use sp_core::traits::SpawnNamed; +use sc_client_api::ExecutorProvider; pub use service::{ - AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, + Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, - Configuration, ChainSpec, ServiceBuilderCommand, + Configuration, ChainSpec, TaskManager, }; pub use service::config::{DatabaseConfig, PrometheusConfig}; pub use sc_executor::NativeExecutionDispatch; @@ -46,8 +46,7 @@ pub use sc_consensus::LongestChain; pub use sp_api::{ApiRef, Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend}; pub use sp_runtime::traits::{DigestFor, HashFor, NumberFor}; pub use consensus_common::{Proposal, SelectChain, BlockImport, RecordProof, block_validation::Chain}; -pub use polkadot_primitives::parachain::{CollatorId, ParachainHost}; -pub use polkadot_primitives::{Block, BlockId}; +pub use polkadot_primitives::v1::{Block, BlockId, CollatorId, Id as ParaId}; pub use sp_runtime::traits::{Block as BlockT, self as runtime_traits, BlakeTwo256}; pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec}; #[cfg(feature = "full-node")] @@ -80,47 +79,39 @@ native_executor_instance!( ); /// A set of APIs that polkadot-like runtimes must implement. -pub trait RuntimeApiCollection: +pub trait RuntimeApiCollection: sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_api::ApiExt + babe_primitives::BabeApi + grandpa_primitives::GrandpaApi - + ParachainHost + sp_block_builder::BlockBuilder - + system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + sp_api::Metadata + sp_offchain::OffchainWorkerApi + sp_session::SessionKeys + authority_discovery_primitives::AuthorityDiscoveryApi where - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, + >::StateBackend: sp_api::StateBackend>, {} -impl RuntimeApiCollection for Api +impl RuntimeApiCollection for Api where Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_api::ApiExt + babe_primitives::BabeApi + grandpa_primitives::GrandpaApi - + ParachainHost + sp_block_builder::BlockBuilder - + system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + sp_api::Metadata + sp_offchain::OffchainWorkerApi + sp_session::SessionKeys + authority_discovery_primitives::AuthorityDiscoveryApi, - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, + >::StateBackend: sp_api::StateBackend>, {} -pub trait RuntimeExtrinsic: codec::Codec + Send + Sync + 'static {} - -impl RuntimeExtrinsic for E where E: codec::Codec + Send + Sync + 'static {} - /// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network. pub trait IdentifyVariant { /// Returns if this is a configuration for the `Kusama` network. @@ -148,574 +139,629 @@ fn set_prometheus_registry(config: &mut Configuration) -> Result<(), ServiceErro Ok(()) } -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -macro_rules! new_full_start { - ($config:expr, $runtime:ty, $executor:ty) => {{ - set_prometheus_registry(&mut $config)?; - - let mut import_setup = None; - let mut rpc_setup = None; - let inherent_data_providers = inherents::InherentDataProviders::new(); - let builder = service::ServiceBuilder::new_full::< - Block, $runtime, $executor - >($config)? - .with_select_chain(|_, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let pool_api = sc_transaction_pool::FullChainApi::new(builder.client().clone()); - let pool = sc_transaction_pool::BasicPool::new( - builder.config().transaction_pool.clone(), - std::sync::Arc::new(pool_api), - builder.prometheus_registry(), - ); - Ok(pool) - })? - .with_import_queue(| - config, - client, - mut select_chain, - _, - spawn_task_handle, - registry, - | { - let select_chain = select_chain.take() - .ok_or_else(|| service::Error::SelectChainRequired)?; - - let grandpa_hard_forks = if config.chain_spec.is_kusama() { - grandpa_support::kusama_hard_forks() - } else { - Vec::new() - }; - - let (grandpa_block_import, grandpa_link) = - grandpa::block_import_with_authority_set_hard_forks( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - grandpa_hard_forks, - )?; - - let justification_import = grandpa_block_import.clone(); - - let (block_import, babe_link) = babe::block_import( - babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - None, - client, - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; - - import_setup = Some((block_import, grandpa_link, babe_link)); - Ok(import_queue) - })? - .with_rpc_extensions_builder(|builder| { - let grandpa_link = import_setup.as_ref().map(|s| &s.1) - .expect("GRANDPA LinkHalf is present for full services or set up failed; qed."); - - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = grandpa::SharedVoterState::empty(); - - rpc_setup = Some((shared_voter_state.clone())); - - let babe_link = import_setup.as_ref().map(|s| &s.2) - .expect("BabeLink is present for full services or set up faile; qed."); - - let babe_config = babe_link.config().clone(); - let shared_epoch_changes = babe_link.epoch_changes().clone(); - - let client = builder.client().clone(); - let pool = builder.pool().clone(); - let select_chain = builder.select_chain().cloned() - .expect("SelectChain is present for full services or set up failed; qed."); - let keystore = builder.keystore().clone(); - - Ok(move |deny_unsafe| -> polkadot_rpc::RpcExtension { - let deps = polkadot_rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - select_chain: select_chain.clone(), - deny_unsafe, - babe: polkadot_rpc::BabeDeps { - babe_config: babe_config.clone(), - shared_epoch_changes: shared_epoch_changes.clone(), - keystore: keystore.clone(), - }, - grandpa: polkadot_rpc::GrandpaDeps { - shared_voter_state: shared_voter_state.clone(), - shared_authority_set: shared_authority_set.clone(), - }, - }; - - polkadot_rpc::create_full(deps) - }) - })?; - - (builder, import_setup, inherent_data_providers, rpc_setup) - }} -} +type FullBackend = service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; +type FullClient = service::TFullClient; +type FullGrandpaBlockImport = grandpa::GrandpaBlockImport< + FullBackend, Block, FullClient, FullSelectChain +>; -struct CandidateValidationSubsystem; +type LightBackend = service::TLightBackendWithHash; -impl Subsystem for CandidateValidationSubsystem { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { - while let Ok(_) = ctx.recv().await {} - })) - } -} +type LightClient = + service::TLightClientWithBackend; + +#[cfg(feature = "full-node")] +fn new_partial(config: &mut Configuration) -> Result< + service::PartialComponents< + FullClient, FullBackend, FullSelectChain, + consensus_common::DefaultImportQueue>, + sc_transaction_pool::FullPool>, + ( + impl Fn(polkadot_rpc::DenyUnsafe, polkadot_rpc::SubscriptionManager) -> polkadot_rpc::RpcExtension, + ( + babe::BabeBlockImport< + Block, FullClient, FullGrandpaBlockImport + >, + grandpa::LinkHalf, FullSelectChain>, + babe::BabeLink + ), + grandpa::SharedVoterState, + ) + >, + Error +> + where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: + RuntimeApiCollection>, + Executor: NativeExecutionDispatch + 'static, +{ + set_prometheus_registry(config)?; -struct CandidateBackingSubsystem; + let inherent_data_providers = inherents::InherentDataProviders::new(); -impl Subsystem for CandidateBackingSubsystem { - fn start(&mut self, mut ctx: SubsystemContext) -> SpawnedSubsystem { - SpawnedSubsystem(Box::pin(async move { - while let Ok(_) = ctx.recv().await {} - })) - } + let (client, backend, keystore, task_manager) = + service::new_full_parts::(&config)?; + let client = Arc::new(client); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); + + let grandpa_hard_forks = if config.chain_spec.is_kusama() { + grandpa_support::kusama_hard_forks() + } else { + Vec::new() + }; + + let (grandpa_block_import, grandpa_link) = + grandpa::block_import_with_authority_set_hard_forks( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + grandpa_hard_forks, + )?; + + let justification_import = grandpa_block_import.clone(); + + let (block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), + None, + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()), + )?; + + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = grandpa::SharedVoterState::empty(); + + let import_setup = (block_import.clone(), grandpa_link, babe_link.clone()); + let rpc_setup = shared_voter_state.clone(); + + let babe_config = babe_link.config().clone(); + let shared_epoch_changes = babe_link.epoch_changes().clone(); + + let rpc_extensions_builder = { + let client = client.clone(); + let keystore = keystore.clone(); + let transaction_pool = transaction_pool.clone(); + let select_chain = select_chain.clone(); + + move |deny_unsafe, subscriptions| -> polkadot_rpc::RpcExtension { + let deps = polkadot_rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + select_chain: select_chain.clone(), + deny_unsafe, + babe: polkadot_rpc::BabeDeps { + babe_config: babe_config.clone(), + shared_epoch_changes: shared_epoch_changes.clone(), + keystore: keystore.clone(), + }, + grandpa: polkadot_rpc::GrandpaDeps { + shared_voter_state: shared_voter_state.clone(), + shared_authority_set: shared_authority_set.clone(), + justification_stream: justification_stream.clone(), + subscriptions, + }, + }; + + polkadot_rpc::create_full(deps) + } + }; + + Ok(service::PartialComponents { + client, backend, task_manager, keystore, select_chain, import_queue, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup) + }) } -fn real_overseer( +fn real_overseer( leaves: impl IntoIterator, + prometheus_registry: Option<&Registry>, s: S, ) -> Result<(Overseer, OverseerHandler), ServiceError> { - let validation = Box::new(CandidateValidationSubsystem); - let candidate_backing = Box::new(CandidateBackingSubsystem); - Overseer::new(leaves, validation, candidate_backing, s) - .map_err(|e| ServiceError::Other(format!("Failed to create an Overseer: {:?}", e))) + let all_subsystems = AllSubsystems { + candidate_validation: DummySubsystem, + candidate_backing: DummySubsystem, + candidate_selection: DummySubsystem, + statement_distribution: DummySubsystem, + availability_distribution: DummySubsystem, + bitfield_signing: DummySubsystem, + bitfield_distribution: DummySubsystem, + provisioner: DummySubsystem, + pov_distribution: DummySubsystem, + runtime_api: DummySubsystem, + availability_store: DummySubsystem, + network_bridge: DummySubsystem, + chain_api: DummySubsystem, + collation_generation: DummySubsystem, + collator_protocol: DummySubsystem, + }; + + Overseer::new( + leaves, + all_subsystems, + prometheus_registry, + s, + ).map_err(|e| ServiceError::Other(format!("Failed to create an Overseer: {:?}", e))) } -/// Builds a new service for a full client. -#[macro_export] -macro_rules! new_full { - ( - $config:expr, - $collating_for:expr, - $authority_discovery_enabled:expr, - $grandpa_pause:expr, - $runtime:ty, - $dispatch:ty, - ) => {{ - use sc_client_api::ExecutorProvider; - use sp_core::traits::BareCryptoStorePtr; - - let is_collator = $collating_for.is_some(); - let role = $config.role.clone(); - let is_authority = role.is_authority() && !is_collator; - let force_authoring = $config.force_authoring; - let disable_grandpa = $config.disable_grandpa; - let name = $config.network.node_name.clone(); - - let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) = - new_full_start!($config, $runtime, $dispatch); - - let service = builder - .with_finality_proof_provider(|client, backend| { - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build_full()?; - - let (block_import, link_half, babe_link) = import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - let shared_voter_state = rpc_setup.take() - .expect("The SharedVoterState is present for Full Services or setup failed before. qed"); - - let client = service.client(); - - let overseer_client = service.client(); - let spawner = service.spawn_task_handle(); - let leaves: Vec<_> = service.select_chain().ok_or(ServiceError::SelectChainRequired)? - .leaves() - .unwrap_or_else(|_| vec![]) - .into_iter() - .filter_map(|hash| { - let number = client.number(hash).ok()??; - let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash; - - Some(BlockInfo { - hash, - parent_hash, - number, - }) +#[cfg(feature = "full-node")] +fn new_full( + mut config: Configuration, + collating_for: Option<(CollatorId, ParaId)>, + _max_block_data_size: Option, + _authority_discovery_enabled: bool, + _slot_duration: u64, + grandpa_pause: Option<(u32, u32)>, +) -> Result<( + TaskManager, + Arc>, +), Error> + where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: + RuntimeApiCollection>, + Executor: NativeExecutionDispatch + 'static, +{ + use sp_core::traits::BareCryptoStorePtr; + + let is_collator = collating_for.is_some(); + let role = config.role.clone(); + let is_authority = role.is_authority() && !is_collator; + let force_authoring = config.force_authoring; + let disable_grandpa = config.disable_grandpa; + let name = config.network.node_name.clone(); + + let service::PartialComponents { + client, backend, mut task_manager, keystore, select_chain, import_queue, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup) + } = new_partial::(&mut config)?; + + let prometheus_registry = config.prometheus_registry().cloned(); + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + service::build_network(service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider.clone()), + })?; + + if config.offchain_worker.enabled { + service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } + + let telemetry_connection_sinks = service::TelemetryConnectionSinks::default(); + + service::spawn_tasks(service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore.clone(), + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + on_demand: None, + remote_blockchain: None, + telemetry_connection_sinks: telemetry_connection_sinks.clone(), + network_status_sinks, system_rpc_tx, + })?; + + let (block_import, link_half, babe_link) = import_setup; + + let shared_voter_state = rpc_setup; + + let overseer_client = client.clone(); + let spawner = task_manager.spawn_handle(); + let leaves: Vec<_> = select_chain.clone() + .leaves() + .unwrap_or_else(|_| vec![]) + .into_iter() + .filter_map(|hash| { + let number = client.number(hash).ok()??; + let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash; + + Some(BlockInfo { + hash, + parent_hash, + number, }) - .collect(); + }) + .collect(); - let (overseer, handler) = real_overseer(leaves, spawner)?; + let (overseer, handler) = real_overseer(leaves, prometheus_registry.as_ref(), spawner)?; + let handler_clone = handler.clone(); - service.spawn_essential_task_handle().spawn("overseer", Box::pin(async move { - use futures::{pin_mut, select, FutureExt}; + task_manager.spawn_essential_handle().spawn_blocking("overseer", Box::pin(async move { + use futures::{pin_mut, select, FutureExt}; - let forward = overseer::forward_events(overseer_client, handler); + let forward = overseer::forward_events(overseer_client, handler); - let forward = forward.fuse(); - let overseer_fut = overseer.run().fuse(); + let forward = forward.fuse(); + let overseer_fut = overseer.run().fuse(); - pin_mut!(overseer_fut); - pin_mut!(forward); + pin_mut!(overseer_fut); + pin_mut!(forward); - loop { - select! { - _ = forward => break, - _ = overseer_fut => break, - complete => break, - } + loop { + select! { + _ = forward => break, + _ = overseer_fut => break, + complete => break, } - })); - - if role.is_authority() { - let select_chain = service.select_chain().ok_or(ServiceError::SelectChainRequired)?; - let can_author_with = - consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); - - // TODO: custom proposer (https://github.com/paritytech/polkadot/issues/1248) - let proposer = sc_basic_authorship::ProposerFactory::new( - client.clone(), - service.transaction_pool(), - None, - ); - - let babe_config = babe::BabeParams { - keystore: service.keystore(), - client: client.clone(), - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - force_authoring, - babe_link, - can_author_with, - }; - - let babe = babe::start_babe(babe_config)?; - service.spawn_essential_task_handle().spawn_blocking("babe", babe); } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if is_authority { - Some(service.keystore() as BareCryptoStorePtr) - } else { - None + })); + + if role.is_authority() { + let can_author_with = + consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let proposer = ProposerFactory::new( + client.clone(), + transaction_pool, + handler_clone, + ); + + let babe_config = babe::BabeParams { + keystore: keystore.clone(), + client: client.clone(), + select_chain, + block_import, + env: proposer, + sync_oracle: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring, + babe_link, + can_author_with, }; - let config = grandpa::Config { - // FIXME substrate#1578 make this available through chainspec - gossip_duration: Duration::from_millis(1000), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_network_authority(), + let babe = babe::start_babe(babe_config)?; + task_manager.spawn_essential_handle().spawn_blocking("babe", babe); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if is_authority { + Some(keystore.clone() as BareCryptoStorePtr) + } else { + None + }; + + let config = grandpa::Config { + // FIXME substrate#1578 make this available through chainspec + gossip_duration: Duration::from_millis(1000), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_network_authority(), + }; + + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: unlike in substrate we are currently running the full + // GRANDPA voter protocol for all full nodes (regardless of whether + // they're validators or not). at this point the full voter should + // provide better guarantees of block and vote data availability than + // the observer. + + // add a custom voting rule to temporarily stop voting for new blocks + // after the given pause block is finalized and restarting after the + // given delay. + let voting_rule = match grandpa_pause { + Some((block, delay)) => { + info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", + block, + delay, + ); + + grandpa::VotingRulesBuilder::default() + .add(grandpa_support::PauseAfterBlockFor(block, delay)) + .build() + }, + None => + grandpa::VotingRulesBuilder::default() + .build(), }; - let enable_grandpa = !disable_grandpa; - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: unlike in substrate we are currently running the full - // GRANDPA voter protocol for all full nodes (regardless of whether - // they're validators or not). at this point the full voter should - // provide better guarantees of block and vote data availability than - // the observer. - - // add a custom voting rule to temporarily stop voting for new blocks - // after the given pause block is finalized and restarting after the - // given delay. - let voting_rule = match $grandpa_pause { - Some((block, delay)) => { - info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", - block, - delay, - ); - - grandpa::VotingRulesBuilder::default() - .add(grandpa_support::PauseAfterBlockFor(block, delay)) - .build() - }, - None => - grandpa::VotingRulesBuilder::default() - .build(), - }; + let grandpa_config = grandpa::GrandpaParams { + config, + link: link_half, + network: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), + voting_rule, + prometheus_registry, + shared_voter_state, + }; - let grandpa_config = grandpa::GrandpaParams { - config, - link: link_half, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule, - prometheus_registry: service.prometheus_registry(), - shared_voter_state, - }; + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + client.clone(), + &inherent_data_providers, + network.clone(), + )?; + } - service.spawn_essential_task_handle().spawn_blocking( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); - } else { - grandpa::setup_disabled_grandpa( - client.clone(), - &inherent_data_providers, - service.network(), - )?; - } + network_starter.start_network(); - (service, client) - }} + Ok((task_manager, client)) } pub struct FullNodeHandles; /// Builds a new service for a light client. -#[macro_export] -macro_rules! new_light { - ($config:expr, $runtime:ty, $dispatch:ty) => {{ - crate::set_prometheus_registry(&mut $config)?; - let inherent_data_providers = inherents::InherentDataProviders::new(); - - ServiceBuilder::new_light::($config)? - .with_select_chain(|_, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; - let pool_api = sc_transaction_pool::LightChainApi::new( - builder.client().clone(), - fetcher, - ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - builder.config().transaction_pool.clone(), - Arc::new(pool_api), - builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(| - _config, - client, - backend, - fetcher, - _select_chain, - _, - spawn_task_handle, - registry, - | { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend, &(client.clone() as Arc<_>), Arc::new(fetch_checker) - )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let (babe_block_import, babe_link) = babe::block_import( - babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let import_queue = babe::import_queue( - babe_link, - babe_block_import, - None, - Some(Box::new(finality_proof_import)), - client, - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - let provider = client as Arc>; - Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) - })? - .with_rpc_extensions(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start node RPC without active fetcher")?; - let remote_blockchain = builder.remote_backend() - .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; - - let light_deps = polkadot_rpc::LightDeps { - remote_blockchain, - fetcher, - client: builder.client().clone(), - pool: builder.pool(), - }; - Ok(polkadot_rpc::create_light(light_deps)) - })? - .build_light() - }} +fn new_light(mut config: Configuration) -> Result + where + Runtime: 'static + Send + Sync + ConstructRuntimeApi>, + >>::RuntimeApi: + RuntimeApiCollection>, + Dispatch: NativeExecutionDispatch + 'static, +{ + set_prometheus_registry(&mut config)?; + use sc_client_api::backend::RemoteBackend; + + let (client, backend, keystore, mut task_manager, on_demand) = + service::new_light_parts::(&config)?; + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + on_demand.clone(), + )); + + let grandpa_block_import = grandpa::light_block_import( + client.clone(), backend.clone(), &(client.clone() as Arc<_>), + Arc::new(on_demand.checker().clone()), + )?; + + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let (babe_block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let inherent_data_providers = inherents::InherentDataProviders::new(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let import_queue = babe::import_queue( + babe_link, + babe_block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + consensus_common::NeverCanAuthor, + )?; + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + service::build_network(service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + })?; + + if config.offchain_worker.enabled { + service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } + + let light_deps = polkadot_rpc::LightDeps { + remote_blockchain: backend.remote_blockchain(), + fetcher: on_demand.clone(), + client: client.clone(), + pool: transaction_pool.clone(), + }; + + let rpc_extensions = polkadot_rpc::create_light(light_deps); + + service::spawn_tasks(service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)), + task_manager: &mut task_manager, + telemetry_connection_sinks: service::TelemetryConnectionSinks::default(), + config, keystore, backend, transaction_pool, client, network, network_status_sinks, + system_rpc_tx, + })?; + + network_starter.start_network(); + + Ok(task_manager) } /// Builds a new object suitable for chain operations. -pub fn new_chain_ops(mut config: Configuration) - -> Result, ServiceError> +#[cfg(feature = "full-node")] +pub fn new_chain_ops(mut config: Configuration) -> Result< + ( + Arc>, + Arc, + consensus_common::import_queue::BasicQueue>, + TaskManager, + ), + ServiceError +> where - Runtime: ConstructRuntimeApi> + Send + Sync + 'static, + Runtime: ConstructRuntimeApi> + Send + Sync + 'static, Runtime::RuntimeApi: - RuntimeApiCollection, Block>>, + RuntimeApiCollection>, Dispatch: NativeExecutionDispatch + 'static, - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, { config.keystore = service::config::KeystoreConfig::InMemory; - Ok(new_full_start!(config, Runtime, Dispatch).0) + let service::PartialComponents { client, backend, import_queue, task_manager, .. } + = new_partial::(&mut config)?; + Ok((client, backend, import_queue, task_manager)) } /// Create a new Polkadot service for a full node. #[cfg(feature = "full-node")] pub fn polkadot_new_full( - mut config: Configuration, - collating_for: Option<(CollatorId, parachain::Id)>, - _max_block_data_size: Option, - _authority_discovery_enabled: bool, - _slot_duration: u64, + config: Configuration, + collating_for: Option<(CollatorId, ParaId)>, + max_block_data_size: Option, + authority_discovery_enabled: bool, + slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, + TaskManager, Arc, + FullBackend, polkadot_runtime::RuntimeApi >>, FullNodeHandles, ), ServiceError> { - let (service, client) = new_full!( + let (components, client) = new_full::( config, collating_for, + max_block_data_size, authority_discovery_enabled, + slot_duration, grandpa_pause, - polkadot_runtime::RuntimeApi, - PolkadotExecutor, - ); + )?; - Ok((service, client, FullNodeHandles)) + Ok((components, client, FullNodeHandles)) } /// Create a new Kusama service for a full node. #[cfg(feature = "full-node")] pub fn kusama_new_full( - mut config: Configuration, - collating_for: Option<(CollatorId, parachain::Id)>, - _max_block_data_size: Option, - _authority_discovery_enabled: bool, - _slot_duration: u64, + config: Configuration, + collating_for: Option<(CollatorId, ParaId)>, + max_block_data_size: Option, + authority_discovery_enabled: bool, + slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, + TaskManager, Arc, + FullBackend, kusama_runtime::RuntimeApi > >, FullNodeHandles, ), ServiceError> { - let (service, client) = new_full!( + let (components, client) = new_full::( config, collating_for, + max_block_data_size, authority_discovery_enabled, + slot_duration, grandpa_pause, - kusama_runtime::RuntimeApi, - KusamaExecutor, - ); + )?; - Ok((service, client, FullNodeHandles)) + Ok((components, client, FullNodeHandles)) } /// Create a new Kusama service for a full node. #[cfg(feature = "full-node")] pub fn westend_new_full( - mut config: Configuration, - collating_for: Option<(CollatorId, parachain::Id)>, - _max_block_data_size: Option, - _authority_discovery_enabled: bool, - _slot_duration: u64, + config: Configuration, + collating_for: Option<(CollatorId, ParaId)>, + max_block_data_size: Option, + authority_discovery_enabled: bool, + slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, + TaskManager, Arc, + FullBackend, westend_runtime::RuntimeApi >>, FullNodeHandles, ), ServiceError> { - let (service, client) = new_full!( + let (components, client) = new_full::( config, collating_for, + max_block_data_size, authority_discovery_enabled, + slot_duration, grandpa_pause, - westend_runtime::RuntimeApi, - WestendExecutor, - ); + )?; - Ok((service, client, FullNodeHandles)) + Ok((components, client, FullNodeHandles)) } /// Create a new Polkadot service for a light client. -pub fn polkadot_new_light(mut config: Configuration) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = polkadot_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor, - >, ServiceError> +pub fn polkadot_new_light(config: Configuration) -> Result { - new_light!(config, polkadot_runtime::RuntimeApi, PolkadotExecutor) + new_light::(config) } /// Create a new Kusama service for a light client. -pub fn kusama_new_light(mut config: Configuration) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = kusama_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor, - >, ServiceError> +pub fn kusama_new_light(config: Configuration) -> Result { - new_light!(config, kusama_runtime::RuntimeApi, KusamaExecutor) + new_light::(config) } /// Create a new Westend service for a light client. -pub fn westend_new_light(mut config: Configuration, ) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = westend_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor - >, - ServiceError> +pub fn westend_new_light(config: Configuration, ) -> Result { - new_light!(config, westend_runtime::RuntimeApi, KusamaExecutor) + new_light::(config) } diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..869fb74d0691c791aa8aaf7c6127150030d36565 --- /dev/null +++ b/node/subsystem-test-helpers/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "polkadot-node-subsystem-test-helpers" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +description = "Subsystem traits and message definitions" + +[dependencies] +async-trait = "0.1" +derive_more = "0.99.9" +futures = "0.3.5" +futures-timer = "3.0.2" +log = "0.4.8" +parity-scale-codec = "1.3.4" +parking_lot = "0.10.0" +pin-project = "0.4.23" +polkadot-node-primitives = { path = "../primitives" } +polkadot-node-subsystem = { path = "../subsystem" } +polkadot-primitives = { path = "../../primitives" } +polkadot-statement-table = { path = "../../statement-table" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +smallvec = "1.4.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4d5be56cdfd121e3a7c53629e52ae23c6b65a756 --- /dev/null +++ b/node/subsystem-test-helpers/src/lib.rs @@ -0,0 +1,328 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Utilities for testing subsystems. + +use polkadot_node_subsystem::messages::AllMessages; +use polkadot_node_subsystem::{FromOverseer, SubsystemContext, SubsystemError, SubsystemResult}; + +use futures::channel::mpsc; +use futures::poll; +use futures::prelude::*; +use futures_timer::Delay; +use parking_lot::Mutex; +use pin_project::pin_project; +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; + +use std::convert::Infallible; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll, Waker}; +use std::time::Duration; + +enum SinkState { + Empty { + read_waker: Option, + }, + Item { + item: T, + ready_waker: Option, + flush_waker: Option, + }, +} + +/// The sink half of a single-item sink that does not resolve until the item has been read. +pub struct SingleItemSink(Arc>>); + +/// The stream half of a single-item sink. +pub struct SingleItemStream(Arc>>); + +impl Sink for SingleItemSink { + type Error = Infallible; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut state = self.0.lock(); + match *state { + SinkState::Empty { .. } => Poll::Ready(Ok(())), + SinkState::Item { + ref mut ready_waker, + .. + } => { + *ready_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + } + + fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Infallible> { + let mut state = self.0.lock(); + + match *state { + SinkState::Empty { ref mut read_waker } => { + if let Some(waker) = read_waker.take() { + waker.wake(); + } + } + _ => panic!("start_send called outside of empty sink state ensured by poll_ready"), + } + + *state = SinkState::Item { + item, + ready_waker: None, + flush_waker: None, + }; + + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut state = self.0.lock(); + match *state { + SinkState::Empty { .. } => Poll::Ready(Ok(())), + SinkState::Item { + ref mut flush_waker, + .. + } => { + *flush_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.poll_flush(cx) + } +} + +impl Stream for SingleItemStream { + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut state = self.0.lock(); + + let read_waker = Some(cx.waker().clone()); + + match std::mem::replace(&mut *state, SinkState::Empty { read_waker }) { + SinkState::Empty { .. } => Poll::Pending, + SinkState::Item { + item, + ready_waker, + flush_waker, + } => { + if let Some(waker) = ready_waker { + waker.wake(); + } + + if let Some(waker) = flush_waker { + waker.wake(); + } + + Poll::Ready(Some(item)) + } + } + } +} + +/// Create a single-item Sink/Stream pair. +/// +/// The sink's send methods resolve at the point which the stream reads the item, +/// not when the item is buffered. +pub fn single_item_sink() -> (SingleItemSink, SingleItemStream) { + let inner = Arc::new(Mutex::new(SinkState::Empty { read_waker: None })); + (SingleItemSink(inner.clone()), SingleItemStream(inner)) +} + +/// A test subsystem context. +pub struct TestSubsystemContext { + tx: mpsc::UnboundedSender, + rx: SingleItemStream>, + spawn: S, +} + +#[async_trait::async_trait] +impl SubsystemContext + for TestSubsystemContext +{ + type Message = M; + + async fn try_recv(&mut self) -> Result>, ()> { + match poll!(self.rx.next()) { + Poll::Ready(Some(msg)) => Ok(Some(msg)), + Poll::Ready(None) => Err(()), + Poll::Pending => Ok(None), + } + } + + async fn recv(&mut self) -> SubsystemResult> { + self.rx.next().await.ok_or(SubsystemError) + } + + async fn spawn( + &mut self, + name: &'static str, + s: Pin + Send>>, + ) -> SubsystemResult<()> { + self.spawn.spawn(name, s); + Ok(()) + } + + async fn spawn_blocking(&mut self, name: &'static str, s: Pin + Send>>) + -> SubsystemResult<()> + { + self.spawn.spawn_blocking(name, s); + Ok(()) + } + + async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()> { + self.tx + .send(msg) + .await + .expect("test overseer no longer live"); + Ok(()) + } + + async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + where + T: IntoIterator + Send, + T::IntoIter: Send, + { + let mut iter = stream::iter(msgs.into_iter().map(Ok)); + self.tx + .send_all(&mut iter) + .await + .expect("test overseer no longer live"); + + Ok(()) + } +} + +/// A handle for interacting with the subsystem context. +pub struct TestSubsystemContextHandle { + tx: SingleItemSink>, + rx: mpsc::UnboundedReceiver, +} + +impl TestSubsystemContextHandle { + /// Send a message or signal to the subsystem. This resolves at the point in time where the + /// subsystem has _read_ the message. + pub async fn send(&mut self, from_overseer: FromOverseer) { + self.tx + .send(from_overseer) + .await + .expect("Test subsystem no longer live"); + } + + /// Receive the next message from the subsystem. + pub async fn recv(&mut self) -> AllMessages { + self.try_recv().await.expect("Test subsystem no longer live") + } + + /// Receive the next message from the subsystem, or `None` if the channel has been closed. + pub async fn try_recv(&mut self) -> Option { + self.rx.next().await + } +} + +/// Make a test subsystem context. +pub fn make_subsystem_context( + spawn: S, +) -> (TestSubsystemContext, TestSubsystemContextHandle) { + let (overseer_tx, overseer_rx) = single_item_sink(); + let (all_messages_tx, all_messages_rx) = mpsc::unbounded(); + + ( + TestSubsystemContext { + tx: all_messages_tx, + rx: overseer_rx, + spawn, + }, + TestSubsystemContextHandle { + tx: overseer_tx, + rx: all_messages_rx, + }, + ) +} + +/// Test a subsystem, mocking the overseer +/// +/// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective of a subsystem. +/// +/// Times out in two seconds. +pub fn subsystem_test_harness( + overseer_factory: OverseerFactory, + test_factory: TestFactory, +) where + OverseerFactory: FnOnce(TestSubsystemContextHandle) -> Overseer, + Overseer: Future, + TestFactory: FnOnce(TestSubsystemContext) -> Test, + Test: Future, +{ + let pool = TaskExecutor::new(); + let (context, handle) = make_subsystem_context(pool); + let overseer = overseer_factory(handle); + let test = test_factory(context); + + futures::pin_mut!(overseer, test); + + futures::executor::block_on(async move { + future::join(overseer, test) + .timeout(Duration::from_secs(2)) + .await + .expect("test timed out instead of completing") + }); +} + +/// A future that wraps another future with a `Delay` allowing for time-limited futures. +#[pin_project] +pub struct Timeout { + #[pin] + future: F, + #[pin] + delay: Delay, +} + +/// Extends `Future` to allow time-limited futures. +pub trait TimeoutExt: Future { + fn timeout(self, duration: Duration) -> Timeout + where + Self: Sized, + { + Timeout { + future: self, + delay: Delay::new(duration), + } + } +} + +impl TimeoutExt for F {} + +impl Future for Timeout { + type Output = Option; + + fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { + let this = self.project(); + + if this.delay.poll(ctx).is_ready() { + return Poll::Ready(None); + } + + if let Poll::Ready(output) = this.future.poll(ctx) { + return Poll::Ready(Some(output)); + } + + Poll::Pending + } +} diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2c189419cc15e88939ab8bfc35fa00d6bf383cb4 --- /dev/null +++ b/node/subsystem-util/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "polkadot-node-subsystem-util" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +description = "Subsystem traits and message definitions" + +[dependencies] +async-trait = "0.1" +derive_more = "0.99.9" +futures = "0.3.5" +futures-timer = "3.0.2" +keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } +log = "0.4.8" +parity-scale-codec = "1.3.4" +parking_lot = { version = "0.10.0", optional = true } +pin-project = "0.4.22" +polkadot-node-primitives = { path = "../primitives" } +polkadot-node-subsystem = { path = "../subsystem" } +polkadot-primitives = { path = "../../primitives" } +polkadot-statement-table = { path = "../../statement-table" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +smallvec = "1.4.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +streamunordered = "0.5.1" + +[dev-dependencies] +assert_matches = "1.3.0" +async-trait = "0.1" +futures = { version = "0.3.5", features = ["thread-pool"] } +parking_lot = "0.10.0" +polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } +env_logger = "0.7.1" diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..0cd7a05c31853d68187c6af7bd392bc10a5f2325 --- /dev/null +++ b/node/subsystem-util/src/lib.rs @@ -0,0 +1,1273 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Utility module for subsystems +//! +//! Many subsystems have common interests such as canceling a bunch of spawned jobs, +//! or determining what their validator ID is. These common interests are factored into +//! this module. + +use polkadot_node_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, + FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemError, SubsystemResult, + metrics, +}; +use futures::{ + channel::{mpsc, oneshot}, + future::Either, + prelude::*, + select, + stream::Stream, + task, +}; +use futures_timer::Delay; +use keystore::KeyStorePtr; +use parity_scale_codec::Encode; +use pin_project::{pin_project, pinned_drop}; +use polkadot_primitives::v1::{ + CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, PersistedValidationData, + GroupRotationInfo, Hash, Id as ParaId, ValidationData, OccupiedCoreAssumption, + SessionIndex, Signed, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, + ValidatorPair, +}; +use sp_core::{Pair, traits::SpawnNamed}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + marker::Unpin, + pin::Pin, + time::Duration, +}; +use streamunordered::{StreamUnordered, StreamYield}; + +/// These reexports are required so that external crates can use the `delegated_subsystem` macro properly. +pub mod reexports { + pub use sp_core::traits::SpawnNamed; + pub use polkadot_node_subsystem::{ + SpawnedSubsystem, + Subsystem, + SubsystemContext, + }; +} + + +/// Duration a job will wait after sending a stop signal before hard-aborting. +pub const JOB_GRACEFUL_STOP_DURATION: Duration = Duration::from_secs(1); +/// Capacity of channels to and from individual jobs +pub const JOB_CHANNEL_CAPACITY: usize = 64; + + +/// Utility errors +#[derive(Debug, derive_more::From)] +pub enum Error { + /// Attempted to send or receive on a oneshot channel which had been canceled + #[from] + Oneshot(oneshot::Canceled), + /// Attempted to send on a MPSC channel which has been canceled + #[from] + Mpsc(mpsc::SendError), + /// A subsystem error + #[from] + Subsystem(SubsystemError), + /// An error in the Chain API. + #[from] + ChainApi(ChainApiError), + /// An error in the Runtime API. + #[from] + RuntimeApi(RuntimeApiError), + /// The type system wants this even though it doesn't make sense + #[from] + Infallible(std::convert::Infallible), + /// Attempted to convert from an AllMessages to a FromJob, and failed. + SenderConversion(String), + /// The local node is not a validator. + NotAValidator, + /// The desired job is not present in the jobs list. + JobNotFound(Hash), + /// Already forwarding errors to another sender + AlreadyForwarding, +} + +/// A type alias for Runtime API receivers. +pub type RuntimeApiReceiver = oneshot::Receiver>; + +/// Request some data from the `RuntimeApi`. +pub async fn request_from_runtime( + parent: Hash, + sender: &mut mpsc::Sender, + request_builder: RequestBuilder, +) -> Result, Error> +where + RequestBuilder: FnOnce(RuntimeApiSender) -> RuntimeApiRequest, + FromJob: TryFrom, + >::Error: std::fmt::Debug, +{ + let (tx, rx) = oneshot::channel(); + + sender + .send( + AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))) + .try_into() + .map_err(|err| Error::SenderConversion(format!("{:?}", err)))?, + ) + .await?; + + Ok(rx) +} + +/// Construct specialized request functions for the runtime. +/// +/// These would otherwise get pretty repetitive. +macro_rules! specialize_requests { + // expand return type name for documentation purposes + (fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => { + specialize_requests!{ + named stringify!($request_variant) ; fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant; + } + }; + + // create a single specialized request function + (named $doc_name:expr ; fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => { + #[doc = "Request `"] + #[doc = $doc_name] + #[doc = "` from the runtime"] + pub async fn $func_name( + parent: Hash, + $( + $param_name: $param_ty, + )* + sender: &mut mpsc::Sender, + ) -> Result, Error> + where + FromJob: TryFrom, + >::Error: std::fmt::Debug, + { + request_from_runtime(parent, sender, |tx| RuntimeApiRequest::$request_variant( + $( $param_name, )* tx + )).await + } + }; + + // recursive decompose + ( + fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident; + $( + fn $t_func_name:ident( $( $t_param_name:ident : $t_param_ty:ty ),* ) -> $t_return_ty:ty ; $t_request_variant:ident; + )+ + ) => { + specialize_requests!{ + fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant ; + } + specialize_requests!{ + $( + fn $t_func_name( $( $t_param_name : $t_param_ty ),* ) -> $t_return_ty ; $t_request_variant ; + )+ + } + }; +} + +specialize_requests! { + fn request_validators() -> Vec; Validators; + fn request_validator_groups() -> (Vec>, GroupRotationInfo); ValidatorGroups; + fn request_availability_cores() -> Vec; AvailabilityCores; + fn request_full_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; FullValidationData; + fn request_persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; PersistedValidationData; + fn request_session_index_for_child() -> SessionIndex; SessionIndexForChild; + fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; + fn request_candidate_pending_availability(para_id: ParaId) -> Option; CandidatePendingAvailability; + fn request_candidate_events() -> Vec; CandidateEvents; +} + +/// Request some data from the `RuntimeApi` via a SubsystemContext. +async fn request_from_runtime_ctx( + parent: Hash, + ctx: &mut Context, + request_builder: RequestBuilder, +) -> Result, Error> +where + RequestBuilder: FnOnce(RuntimeApiSender) -> RuntimeApiRequest, + Context: SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + + ctx + .send_message( + AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))) + .try_into() + .map_err(|err| Error::SenderConversion(format!("{:?}", err)))?, + ) + .await?; + + Ok(rx) +} + + +/// Construct specialized request functions for the runtime. +/// +/// These would otherwise get pretty repetitive. +macro_rules! specialize_requests_ctx { + // expand return type name for documentation purposes + (fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => { + specialize_requests_ctx!{ + named stringify!($request_variant) ; fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant; + } + }; + + // create a single specialized request function + (named $doc_name:expr ; fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => { + #[doc = "Request `"] + #[doc = $doc_name] + #[doc = "` from the runtime via a `SubsystemContext`"] + pub async fn $func_name( + parent: Hash, + $( + $param_name: $param_ty, + )* + ctx: &mut Context, + ) -> Result, Error> { + request_from_runtime_ctx(parent, ctx, |tx| RuntimeApiRequest::$request_variant( + $( $param_name, )* tx + )).await + } + }; + + // recursive decompose + ( + fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident; + $( + fn $t_func_name:ident( $( $t_param_name:ident : $t_param_ty:ty ),* ) -> $t_return_ty:ty ; $t_request_variant:ident; + )+ + ) => { + specialize_requests_ctx!{ + fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant ; + } + specialize_requests_ctx!{ + $( + fn $t_func_name( $( $t_param_name : $t_param_ty ),* ) -> $t_return_ty ; $t_request_variant ; + )+ + } + }; +} + +specialize_requests_ctx! { + fn request_validators_ctx() -> Vec; Validators; + fn request_validator_groups_ctx() -> (Vec>, GroupRotationInfo); ValidatorGroups; + fn request_availability_cores_ctx() -> Vec; AvailabilityCores; + fn request_full_validation_data_ctx(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; FullValidationData; + fn request_persisted_validation_data_ctx(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; PersistedValidationData; + fn request_session_index_for_child_ctx() -> SessionIndex; SessionIndexForChild; + fn request_validation_code_ctx(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; + fn request_candidate_pending_availability_ctx(para_id: ParaId) -> Option; CandidatePendingAvailability; + fn request_candidate_events_ctx() -> Vec; CandidateEvents; +} + +/// From the given set of validators, find the first key we can sign with, if any. +pub fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option { + let keystore = keystore.read(); + validators + .iter() + .find_map(|v| keystore.key_pair::(&v).ok()) +} + +/// Local validator information +/// +/// It can be created if the local node is a validator in the context of a particular +/// relay chain block. +pub struct Validator { + signing_context: SigningContext, + key: ValidatorPair, + index: ValidatorIndex, +} + +impl Validator { + /// Get a struct representing this node's validator if this node is in fact a validator in the context of the given block. + pub async fn new( + parent: Hash, + keystore: KeyStorePtr, + mut sender: mpsc::Sender, + ) -> Result + where + FromJob: TryFrom, + >::Error: std::fmt::Debug, + { + // Note: request_validators and request_session_index_for_child do not and cannot + // run concurrently: they both have a mutable handle to the same sender. + // However, each of them returns a oneshot::Receiver, and those are resolved concurrently. + let (validators, session_index) = futures::try_join!( + request_validators(parent, &mut sender).await?, + request_session_index_for_child(parent, &mut sender).await?, + )?; + + let signing_context = SigningContext { + session_index: session_index?, + parent_hash: parent, + }; + + let validators = validators?; + + Self::construct(&validators, signing_context, keystore) + } + + /// Construct a validator instance without performing runtime fetches. + /// + /// This can be useful if external code also needs the same data. + pub fn construct( + validators: &[ValidatorId], + signing_context: SigningContext, + keystore: KeyStorePtr, + ) -> Result { + let key = signing_key(validators, &keystore).ok_or(Error::NotAValidator)?; + let index = validators + .iter() + .enumerate() + .find(|(_, k)| k == &&key.public()) + .map(|(idx, _)| idx as ValidatorIndex) + .expect("signing_key would have already returned NotAValidator if the item we're searching for isn't in this list; qed"); + + Ok(Validator { + signing_context, + key, + index, + }) + } + + /// Get this validator's id. + pub fn id(&self) -> ValidatorId { + self.key.public() + } + + /// Get this validator's local index. + pub fn index(&self) -> ValidatorIndex { + self.index + } + + /// Get the current signing context. + pub fn signing_context(&self) -> &SigningContext { + &self.signing_context + } + + /// Sign a payload with this validator + pub fn sign, RealPayload: Encode>( + &self, + payload: Payload, + ) -> Signed { + Signed::sign(payload, &self.signing_context, self.index, &self.key) + } + + /// Validate the payload with this validator + /// + /// Validation can only succeed if `signed.validator_index() == self.index()`. + /// Normally, this will always be the case for a properly operating program, + /// but it's double-checked here anyway. + pub fn check_payload, RealPayload: Encode>( + &self, + signed: Signed, + ) -> Result<(), ()> { + if signed.validator_index() != self.index { + return Err(()); + } + signed.check_signature(&self.signing_context, &self.id()) + } +} + +/// ToJob is expected to be an enum declaring the set of messages of interest to a particular job. +/// +/// Normally, this will be some subset of `Allmessages`, and a `Stop` variant. +pub trait ToJobTrait: TryFrom { + /// The `Stop` variant of the ToJob enum. + const STOP: Self; + + /// If the message variant contains its relay parent, return it here + fn relay_parent(&self) -> Option; +} + +/// A JobHandle manages a particular job for a subsystem. +struct JobHandle { + abort_handle: future::AbortHandle, + to_job: mpsc::Sender, + finished: oneshot::Receiver<()>, + outgoing_msgs_handle: usize, +} + +impl JobHandle { + /// Send a message to the job. + async fn send_msg(&mut self, msg: ToJob) -> Result<(), Error> { + self.to_job.send(msg).await.map_err(Into::into) + } +} + +impl JobHandle { + /// Stop this job gracefully. + /// + /// If it hasn't shut itself down after `JOB_GRACEFUL_STOP_DURATION`, abort it. + async fn stop(mut self) { + // we don't actually care if the message couldn't be sent + if let Err(_) = self.to_job.send(ToJob::STOP).await { + // no need to wait further here: the job is either stalled or + // disconnected, and in either case, we can just abort it immediately + self.abort_handle.abort(); + return; + } + let stop_timer = Delay::new(JOB_GRACEFUL_STOP_DURATION); + + match future::select(stop_timer, self.finished).await { + Either::Left((_, _)) => {} + Either::Right((_, _)) => { + self.abort_handle.abort(); + } + } + } +} + +/// This trait governs jobs. +/// +/// Jobs are instantiated and killed automatically on appropriate overseer messages. +/// Other messages are passed along to and from the job via the overseer to other +/// subsystems. +pub trait JobTrait: Unpin { + /// Message type to the job. Typically a subset of AllMessages. + type ToJob: 'static + ToJobTrait + Send; + /// Message type from the job. Typically a subset of AllMessages. + type FromJob: 'static + Into + Send; + /// Job runtime error. + type Error: 'static + std::fmt::Debug + Send; + /// Extra arguments this job needs to run properly. + /// + /// If no extra information is needed, it is perfectly acceptable to set it to `()`. + type RunArgs: 'static + Send; + /// Subsystem-specific Prometheus metrics. + /// + /// Jobs spawned by one subsystem should share the same + /// instance of metrics (use `.clone()`). + /// The `delegate_subsystem!` macro should take care of this. + type Metrics: 'static + metrics::Metrics + Send; + + /// Name of the job, i.e. `CandidateBackingJob` + const NAME: &'static str; + + /// Run a job for the parent block indicated + fn run( + parent: Hash, + run_args: Self::RunArgs, + metrics: Self::Metrics, + receiver: mpsc::Receiver, + sender: mpsc::Sender, + ) -> Pin> + Send>>; + + /// Handle a message which has no relay parent, and therefore can't be dispatched to a particular job + /// + /// By default, this is implemented with a NOP function. However, if + /// ToJob occasionally has messages which do not correspond to a particular + /// parent relay hash, then this function will be spawned as a one-off + /// task to handle those messages. + // TODO: the API here is likely not precisely what we want; figure it out more + // once we're implementing a subsystem which actually needs this feature. + // In particular, we're quite likely to want this to return a future instead of + // interrupting the active thread for the duration of the handler. + fn handle_unanchored_msg(_msg: Self::ToJob) -> Result<(), Self::Error> { + Ok(()) + } +} + +/// Error which can be returned by the jobs manager +/// +/// Wraps the utility error type and the job-specific error +#[derive(Debug, derive_more::From)] +pub enum JobsError { + /// utility error + #[from] + Utility(Error), + /// internal job error + Job(JobError), +} + +/// Jobs manager for a subsystem +/// +/// - Spawns new jobs for a given relay-parent on demand. +/// - Closes old jobs for a given relay-parent on demand. +/// - Dispatches messages to the appropriate job for a given relay-parent. +/// - When dropped, aborts all remaining jobs. +/// - implements `Stream`, collecting all messages from subordinate jobs. +#[pin_project(PinnedDrop)] +pub struct Jobs { + spawner: Spawner, + running: HashMap>, + #[pin] + outgoing_msgs: StreamUnordered>, + job: std::marker::PhantomData, + errors: Option, JobsError)>>, +} + +impl Jobs { + /// Create a new Jobs manager which handles spawning appropriate jobs. + pub fn new(spawner: Spawner) -> Self { + Self { + spawner, + running: HashMap::new(), + outgoing_msgs: StreamUnordered::new(), + job: std::marker::PhantomData, + errors: None, + } + } + + /// Monitor errors which may occur during handling of a spawned job. + /// + /// By default, an error in a job is simply logged. Once this is called, + /// the error is forwarded onto the provided channel. + /// + /// Errors if the error channel already exists. + pub fn forward_errors( + &mut self, + tx: mpsc::Sender<(Option, JobsError)>, + ) -> Result<(), Error> { + if self.errors.is_some() { + return Err(Error::AlreadyForwarding); + } + self.errors = Some(tx); + Ok(()) + } + + /// Spawn a new job for this `parent_hash`, with whatever args are appropriate. + fn spawn_job(&mut self, parent_hash: Hash, run_args: Job::RunArgs, metrics: Job::Metrics) -> Result<(), Error> { + let (to_job_tx, to_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY); + let (from_job_tx, from_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY); + let (finished_tx, finished) = oneshot::channel(); + + // clone the error transmitter to move into the future + let err_tx = self.errors.clone(); + + let (future, abort_handle) = future::abortable(async move { + if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { + log::error!( + "{}({}) finished with an error {:?}", + Job::NAME, + parent_hash, + e, + ); + + if let Some(mut err_tx) = err_tx { + // if we can't send the notification of error on the error channel, then + // there's no point trying to propagate this error onto the channel too + // all we can do is warn that error propagatio has failed + if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { + log::warn!("failed to forward error: {:?}", e); + } + } + } + }); + + // the spawn mechanism requires that the spawned future has no output + let future = async move { + // job errors are already handled within the future, meaning + // that any errors here are due to the abortable mechanism. + // failure to abort isn't of interest. + let _ = future.await; + // transmission failure here is only possible if the receiver is closed, + // which means the handle is dropped, which means we don't care anymore + let _ = finished_tx.send(()); + }; + self.spawner.spawn(Job::NAME, future.boxed()); + + // this handle lets us remove the appropriate receiver from self.outgoing_msgs + // when it's time to stop the job. + let outgoing_msgs_handle = self.outgoing_msgs.push(from_job_rx); + + let handle = JobHandle { + abort_handle, + to_job: to_job_tx, + finished, + outgoing_msgs_handle, + }; + + self.running.insert(parent_hash, handle); + + Ok(()) + } + + /// Stop the job associated with this `parent_hash`. + pub async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), Error> { + match self.running.remove(&parent_hash) { + Some(handle) => { + Pin::new(&mut self.outgoing_msgs).remove(handle.outgoing_msgs_handle); + handle.stop().await; + Ok(()) + } + None => Err(Error::JobNotFound(parent_hash)), + } + } + + /// Send a message to the appropriate job for this `parent_hash`. + /// Will not return an error if the job is not running. + async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) -> Result<(), Error> { + match self.running.get_mut(&parent_hash) { + Some(job) => job.send_msg(msg).await?, + None => { + // don't bring down the subsystem, this can happen to due a race condition + }, + } + Ok(()) + } +} + +// Note that on drop, we don't have the chance to gracefully spin down each of the remaining handles; +// we just abort them all. Still better than letting them dangle. +#[pinned_drop] +impl PinnedDrop for Jobs { + fn drop(self: Pin<&mut Self>) { + for job_handle in self.running.values() { + job_handle.abort_handle.abort(); + } + } +} + +impl Stream for Jobs +where + Spawner: SpawnNamed, + Job: JobTrait, +{ + type Item = Job::FromJob; + + fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context) -> task::Poll> { + // pin-project the outgoing messages + let result = self.project().outgoing_msgs.poll_next(cx).map(|opt| { + opt.and_then(|(stream_yield, _)| match stream_yield { + StreamYield::Item(msg) => Some(msg), + StreamYield::Finished(_) => None, + }) + }); + // we don't want the stream to end if the jobs are empty at some point + match result { + task::Poll::Ready(None) => task::Poll::Pending, + otherwise => otherwise, + } + } +} + +/// A basic implementation of a subsystem. +/// +/// This struct is responsible for handling message traffic between +/// this subsystem and the overseer. It spawns and kills jobs on the +/// appropriate Overseer messages, and dispatches standard traffic to +/// the appropriate job the rest of the time. +pub struct JobManager { + spawner: Spawner, + run_args: Job::RunArgs, + metrics: Job::Metrics, + context: std::marker::PhantomData, + job: std::marker::PhantomData, + errors: Option, JobsError)>>, +} + +impl JobManager +where + Spawner: SpawnNamed + Clone + Send + Unpin, + Context: SubsystemContext, + Job: 'static + JobTrait, + Job::RunArgs: Clone, + Job::ToJob: TryFrom + TryFrom<::Message> + Sync, +{ + /// Creates a new `Subsystem`. + pub fn new(spawner: Spawner, run_args: Job::RunArgs, metrics: Job::Metrics) -> Self { + Self { + spawner, + run_args, + metrics, + context: std::marker::PhantomData, + job: std::marker::PhantomData, + errors: None, + } + } + + /// Monitor errors which may occur during handling of a spawned job. + /// + /// By default, an error in a job is simply logged. Once this is called, + /// the error is forwarded onto the provided channel. + /// + /// Errors if the error channel already exists. + pub fn forward_errors( + &mut self, + tx: mpsc::Sender<(Option, JobsError)>, + ) -> Result<(), Error> { + if self.errors.is_some() { + return Err(Error::AlreadyForwarding); + } + self.errors = Some(tx); + Ok(()) + } + + /// Run this subsystem + /// + /// Conceptually, this is very simple: it just loops forever. + /// + /// - On incoming overseer messages, it starts or stops jobs as appropriate. + /// - On other incoming messages, if they can be converted into Job::ToJob and + /// include a hash, then they're forwarded to the appropriate individual job. + /// - On outgoing messages from the jobs, it forwards them to the overseer. + /// + /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. + /// Otherwise, most are logged and then discarded. + pub async fn run( + mut ctx: Context, + run_args: Job::RunArgs, + metrics: Job::Metrics, + spawner: Spawner, + mut err_tx: Option, JobsError)>>, + ) { + let mut jobs = Jobs::new(spawner.clone()); + if let Some(ref err_tx) = err_tx { + jobs.forward_errors(err_tx.clone()) + .expect("we never call this twice in this context; qed"); + } + + loop { + select! { + incoming = ctx.recv().fuse() => if Self::handle_incoming(incoming, &mut jobs, &run_args, &metrics, &mut err_tx).await { break }, + outgoing = jobs.next().fuse() => Self::handle_outgoing(outgoing, &mut ctx, &mut err_tx).await, + complete => break, + } + } + } + + // if we have a channel on which to forward errors, do so + async fn fwd_err( + hash: Option, + err: JobsError, + err_tx: &mut Option, JobsError)>>, + ) { + if let Some(err_tx) = err_tx { + // if we can't send on the error transmission channel, we can't do anything useful about it + // still, we can at least log the failure + if let Err(e) = err_tx.send((hash, err)).await { + log::warn!("failed to forward error: {:?}", e); + } + } + } + + // handle an incoming message. return true if we should break afterwards. + async fn handle_incoming( + incoming: SubsystemResult>, + jobs: &mut Jobs, + run_args: &Job::RunArgs, + metrics: &Job::Metrics, + err_tx: &mut Option, JobsError)>>, + ) -> bool { + use polkadot_node_subsystem::ActiveLeavesUpdate; + use polkadot_node_subsystem::FromOverseer::{Communication, Signal}; + use polkadot_node_subsystem::OverseerSignal::{ActiveLeaves, BlockFinalized, Conclude}; + + match incoming { + Ok(Signal(ActiveLeaves(ActiveLeavesUpdate { + activated, + deactivated, + }))) => { + for hash in activated { + let metrics = metrics.clone(); + if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { + log::error!("Failed to spawn a job: {:?}", e); + Self::fwd_err(Some(hash), e.into(), err_tx).await; + return true; + } + } + + for hash in deactivated { + if let Err(e) = jobs.stop_job(hash).await { + log::error!("Failed to stop a job: {:?}", e); + Self::fwd_err(Some(hash), e.into(), err_tx).await; + return true; + } + } + } + Ok(Signal(Conclude)) => { + // Breaking the loop ends fn run, which drops `jobs`, which immediately drops all ongoing work. + // We can afford to wait a little while to shut them all down properly before doing that. + // + // Forwarding the stream to a drain means we wait until all of the items in the stream + // have completed. Contrast with `into_future`, which turns it into a future of `(head, rest_stream)`. + use futures::sink::drain; + use futures::stream::FuturesUnordered; + use futures::stream::StreamExt; + + if let Err(e) = jobs + .running + .drain() + .map(|(_, handle)| handle.stop()) + .collect::>() + .map(Ok) + .forward(drain()) + .await + { + log::error!("failed to stop all jobs on conclude signal: {:?}", e); + Self::fwd_err(None, Error::from(e).into(), err_tx).await; + } + + return true; + } + Ok(Communication { msg }) => { + if let Ok(to_job) = ::try_from(msg) { + match to_job.relay_parent() { + Some(hash) => { + if let Err(err) = jobs.send_msg(hash, to_job).await { + log::error!("Failed to send a message to a job: {:?}", err); + Self::fwd_err(Some(hash), err.into(), err_tx).await; + return true; + } + } + None => { + if let Err(err) = Job::handle_unanchored_msg(to_job) { + log::error!("Failed to handle unhashed message: {:?}", err); + Self::fwd_err(None, JobsError::Job(err), err_tx).await; + return true; + } + } + } + } + } + Ok(Signal(BlockFinalized(_))) => {} + Err(err) => { + log::error!("error receiving message from subsystem context: {:?}", err); + Self::fwd_err(None, Error::from(err).into(), err_tx).await; + return true; + } + } + false + } + + // handle an outgoing message. + async fn handle_outgoing( + outgoing: Option, + ctx: &mut Context, + err_tx: &mut Option, JobsError)>>, + ) { + let msg = outgoing.expect("the Jobs stream never ends; qed"); + if let Err(e) = ctx.send_message(msg.into()).await { + Self::fwd_err(None, Error::from(e).into(), err_tx).await; + } + } +} + +impl Subsystem for JobManager +where + Spawner: SpawnNamed + Send + Clone + Unpin + 'static, + Context: SubsystemContext, + ::Message: Into, + Job: 'static + JobTrait + Send, + Job::RunArgs: Clone + Sync, + Job::ToJob: TryFrom + Sync, + Job::Metrics: Sync, +{ + type Metrics = Job::Metrics; + + fn start(self, ctx: Context) -> SpawnedSubsystem { + let spawner = self.spawner.clone(); + let run_args = self.run_args.clone(); + let metrics = self.metrics.clone(); + let errors = self.errors; + + let future = Box::pin(async move { + Self::run(ctx, run_args, metrics, spawner, errors).await; + }); + + SpawnedSubsystem { + name: Job::NAME.strip_suffix("Job").unwrap_or(Job::NAME), + future, + } + } +} + +/// Create a delegated subsystem +/// +/// It is possible to create a type which implements `Subsystem` by simply doing: +/// +/// ```ignore +/// pub type ExampleSubsystem = JobManager; +/// ``` +/// +/// However, doing this requires that job itself and all types which comprise it (i.e. `ToJob`, `FromJob`, `Error`, `RunArgs`) +/// are public, to avoid exposing private types in public interfaces. It's possible to delegate instead, which +/// can reduce the total number of public types exposed, i.e. +/// +/// ```ignore +/// type Manager = JobManager; +/// pub struct ExampleSubsystem { +/// manager: Manager, +/// } +/// +/// impl Subsystem for ExampleSubsystem { ... } +/// ``` +/// +/// This dramatically reduces the number of public types in the crate; the only things which must be public are now +/// +/// - `struct ExampleSubsystem` (defined by this macro) +/// - `type ToJob` (because it appears in a trait bound) +/// - `type RunArgs` (because it appears in a function signature) +/// +/// Implementing this all manually is of course possible, but it's tedious; why bother? This macro exists for +/// the purpose of doing it automatically: +/// +/// ```ignore +/// delegated_subsystem!(ExampleJob(ExampleRunArgs) <- ExampleToJob as ExampleSubsystem); +/// ``` +#[macro_export] +macro_rules! delegated_subsystem { + ($job:ident($run_args:ty, $metrics:ty) <- $to_job:ty as $subsystem:ident) => { + delegated_subsystem!($job($run_args, $metrics) <- $to_job as $subsystem; stringify!($subsystem)); + }; + + ($job:ident($run_args:ty, $metrics:ty) <- $to_job:ty as $subsystem:ident; $subsystem_name:expr) => { + #[doc = "Manager type for the "] + #[doc = $subsystem_name] + type Manager = $crate::JobManager; + + #[doc = "An implementation of the "] + #[doc = $subsystem_name] + pub struct $subsystem { + manager: Manager, + } + + impl $subsystem + where + Spawner: Clone + $crate::reexports::SpawnNamed + Send + Unpin, + Context: $crate::reexports::SubsystemContext, + ::Message: Into<$to_job>, + { + #[doc = "Creates a new "] + #[doc = $subsystem_name] + pub fn new(spawner: Spawner, run_args: $run_args, metrics: $metrics) -> Self { + $subsystem { + manager: $crate::JobManager::new(spawner, run_args, metrics) + } + } + + /// Run this subsystem + pub async fn run(ctx: Context, run_args: $run_args, metrics: $metrics, spawner: Spawner) { + >::run(ctx, run_args, metrics, spawner, None).await + } + } + + impl $crate::reexports::Subsystem for $subsystem + where + Spawner: $crate::reexports::SpawnNamed + Send + Clone + Unpin + 'static, + Context: $crate::reexports::SubsystemContext, + ::Message: Into<$to_job>, + { + type Metrics = $metrics; + + fn start(self, ctx: Context) -> $crate::reexports::SpawnedSubsystem { + self.manager.start(ctx) + } + } + }; +} + +#[cfg(test)] +mod tests { + use super::{Error as UtilError, JobManager, JobTrait, JobsError, ToJobTrait}; + use polkadot_node_subsystem::{ + messages::{AllMessages, CandidateSelectionMessage}, + ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, + }; + use assert_matches::assert_matches; + use futures::{ + channel::mpsc, + executor, + stream::{self, StreamExt}, + future, Future, FutureExt, SinkExt, + }; + use polkadot_primitives::v1::Hash; + use polkadot_node_subsystem_test_helpers::{self as test_helpers, make_subsystem_context, TimeoutExt as _}; + use std::{collections::HashMap, convert::TryFrom, pin::Pin, time::Duration}; + + // basic usage: in a nutshell, when you want to define a subsystem, just focus on what its jobs do; + // you can leave the subsystem itself to the job manager. + + // for purposes of demonstration, we're going to whip up a fake subsystem. + // this will 'select' candidates which are pre-loaded in the job + + // job structs are constructed within JobTrait::run + // most will want to retain the sender and receiver, as well as whatever other data they like + struct FakeCandidateSelectionJob { + receiver: mpsc::Receiver, + } + + // ToJob implementations require the following properties: + // + // - have a Stop variant (to impl ToJobTrait) + // - impl ToJobTrait + // - impl TryFrom + // - impl From (from SubsystemContext::Message) + // + // Mostly, they are just a type-safe subset of AllMessages that this job is prepared to receive + enum ToJob { + CandidateSelection(CandidateSelectionMessage), + Stop, + } + + impl ToJobTrait for ToJob { + const STOP: Self = ToJob::Stop; + + fn relay_parent(&self) -> Option { + match self { + Self::CandidateSelection(csm) => csm.relay_parent(), + Self::Stop => None, + } + } + } + + impl TryFrom for ToJob { + type Error = (); + + fn try_from(msg: AllMessages) -> Result { + match msg { + AllMessages::CandidateSelection(csm) => Ok(ToJob::CandidateSelection(csm)), + _ => Err(()), + } + } + } + + impl From for ToJob { + fn from(csm: CandidateSelectionMessage) -> ToJob { + ToJob::CandidateSelection(csm) + } + } + + // FromJob must be infallibly convertable into AllMessages. + // + // It exists to be a type-safe subset of AllMessages that this job is specified to send. + // + // Note: the Clone impl here is not generally required; it's just ueful for this test context because + // we include it in the RunArgs + #[derive(Clone)] + enum FromJob { + Test, + } + + impl From for AllMessages { + fn from(from_job: FromJob) -> AllMessages { + match from_job { + FromJob::Test => AllMessages::CandidateSelection(CandidateSelectionMessage::default()), + } + } + } + + // Error will mostly be a wrapper to make the try operator more convenient; + // deriving From implementations for most variants is recommended. + // It must implement Debug for logging. + #[derive(Debug, derive_more::From)] + enum Error { + #[from] + Sending(mpsc::SendError), + } + + impl JobTrait for FakeCandidateSelectionJob { + type ToJob = ToJob; + type FromJob = FromJob; + type Error = Error; + // RunArgs can be anything that a particular job needs supplied from its external context + // in order to create the Job. In this case, they're a hashmap of parents to the mock outputs + // expected from that job. + // + // Note that it's not recommended to use something as heavy as a hashmap in production: the + // RunArgs get cloned so that each job gets its own owned copy. If you need that, wrap it in + // an Arc. Within a testing context, that efficiency is less important. + type RunArgs = HashMap>; + type Metrics = (); + + const NAME: &'static str = "FakeCandidateSelectionJob"; + + /// Run a job for the parent block indicated + // + // this function is in charge of creating and executing the job's main loop + fn run( + parent: Hash, + mut run_args: Self::RunArgs, + _metrics: Self::Metrics, + receiver: mpsc::Receiver, + mut sender: mpsc::Sender, + ) -> Pin> + Send>> { + async move { + let job = FakeCandidateSelectionJob { receiver }; + + // most jobs will have a request-response cycle at the heart of their run loop. + // however, in this case, we never receive valid messages, so we may as well + // just send all of our (mock) output messages now + let mock_output = run_args.remove(&parent).unwrap_or_default(); + let mut stream = stream::iter(mock_output.into_iter().map(Ok)); + sender.send_all(&mut stream).await?; + + // it isn't necessary to break run_loop into its own function, + // but it's convenient to separate the concerns in this way + job.run_loop().await + } + .boxed() + } + } + + impl FakeCandidateSelectionJob { + async fn run_loop(mut self) -> Result<(), Error> { + while let Some(msg) = self.receiver.next().await { + match msg { + ToJob::CandidateSelection(_csm) => { + unimplemented!("we'd report the collator to the peer set manager here, but that's not implemented yet"); + } + ToJob::Stop => break, + } + } + + Ok(()) + } + } + + // with the job defined, it's straightforward to get a subsystem implementation. + type FakeCandidateSelectionSubsystem = + JobManager; + + // this type lets us pretend to be the overseer + type OverseerHandle = test_helpers::TestSubsystemContextHandle; + + fn test_harness>( + run_args: HashMap>, + test: impl FnOnce(OverseerHandle, mpsc::Receiver<(Option, JobsError)>) -> T, + ) { + let _ = env_logger::builder() + .is_test(true) + .filter( + None, + log::LevelFilter::Trace, + ) + .try_init(); + + let pool = sp_core::testing::TaskExecutor::new(); + let (context, overseer_handle) = make_subsystem_context(pool.clone()); + let (err_tx, err_rx) = mpsc::channel(16); + + let subsystem = FakeCandidateSelectionSubsystem::run(context, run_args, (), pool, Some(err_tx)); + let test_future = test(overseer_handle, err_rx); + + futures::pin_mut!(subsystem, test_future); + + executor::block_on(async move { + future::join(subsystem, test_future) + .timeout(Duration::from_secs(2)) + .await + .expect("test timed out instead of completing") + }); + } + + #[test] + fn starting_and_stopping_job_works() { + let relay_parent: Hash = [0; 32].into(); + let mut run_args = HashMap::new(); + run_args.insert( + relay_parent.clone(), + vec![FromJob::Test], + ); + + test_harness(run_args, |mut overseer_handle, err_rx| async move { + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(relay_parent), + ))) + .await; + assert_matches!( + overseer_handle.recv().await, + AllMessages::CandidateSelection(_) + ); + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(relay_parent), + ))) + .await; + + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::Conclude)) + .await; + + let errs: Vec<_> = err_rx.collect().await; + assert_eq!(errs.len(), 0); + }); + } + + #[test] + fn stopping_non_running_job_fails() { + let relay_parent: Hash = [0; 32].into(); + let run_args = HashMap::new(); + + test_harness(run_args, |mut overseer_handle, err_rx| async move { + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(relay_parent), + ))) + .await; + + let errs: Vec<_> = err_rx.collect().await; + assert_eq!(errs.len(), 1); + assert_eq!(errs[0].0, Some(relay_parent)); + assert_matches!( + errs[0].1, + JobsError::Utility(UtilError::JobNotFound(match_relay_parent)) if relay_parent == match_relay_parent + ); + }); + } + + #[test] + fn sending_to_a_non_running_job_do_not_stop_the_subsystem() { + let relay_parent = Hash::repeat_byte(0x01); + let mut run_args = HashMap::new(); + run_args.insert( + relay_parent.clone(), + vec![FromJob::Test], + ); + + test_harness(run_args, |mut overseer_handle, err_rx| async move { + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(relay_parent), + ))) + .await; + + // send to a non running job + overseer_handle + .send(FromOverseer::Communication { + msg: Default::default(), + }) + .await; + + // the subsystem is still alive + assert_matches!( + overseer_handle.recv().await, + AllMessages::CandidateSelection(_) + ); + + overseer_handle + .send(FromOverseer::Signal(OverseerSignal::Conclude)) + .await; + + let errs: Vec<_> = err_rx.collect().await; + assert_eq!(errs.len(), 0); + }); + } + + #[test] + fn test_subsystem_impl_and_name_derivation() { + let pool = sp_core::testing::TaskExecutor::new(); + let (context, _) = make_subsystem_context::(pool.clone()); + + let SpawnedSubsystem { name, .. } = + FakeCandidateSelectionSubsystem::new(pool, HashMap::new(), ()).start(context); + assert_eq!(name, "FakeCandidateSelection"); + } +} diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..f7283d40aadd4ae0f8851e876a58375f9c817276 --- /dev/null +++ b/node/subsystem/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "polkadot-node-subsystem" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +description = "Subsystem traits and message definitions" + +[dependencies] +async-trait = "0.1" +derive_more = "0.99.9" +futures = "0.3.5" +futures-timer = "3.0.2" +log = "0.4.8" +parity-scale-codec = "1.3.4" +parking_lot = { version = "0.10.0", optional = true } +pin-project = "0.4.22" +polkadot-node-primitives = { path = "../primitives" } +polkadot-node-network-protocol = { path = "../network/protocol" } +polkadot-primitives = { path = "../../primitives" } +polkadot-statement-table = { path = "../../statement-table" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +smallvec = "1.4.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +assert_matches = "1.3.0" +async-trait = "0.1" +futures = { version = "0.3.5", features = ["thread-pool"] } +parking_lot = "0.10.0" +polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/node/subsystem/src/errors.rs b/node/subsystem/src/errors.rs new file mode 100644 index 0000000000000000000000000000000000000000..40edb1b3c148014f179c942c0723cbf7106352f3 --- /dev/null +++ b/node/subsystem/src/errors.rs @@ -0,0 +1,57 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Error types for the subsystem requests. + +/// A description of an error causing the runtime API request to be unservable. +#[derive(Debug, Clone)] +pub struct RuntimeApiError(String); + +impl From for RuntimeApiError { + fn from(s: String) -> Self { + RuntimeApiError(s) + } +} + +impl core::fmt::Display for RuntimeApiError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.0) + } +} + +/// A description of an error causing the chain API request to be unservable. +#[derive(Debug, Clone)] +pub struct ChainApiError { + msg: String, +} + +impl From<&str> for ChainApiError { + fn from(s: &str) -> Self { + s.to_owned().into() + } +} + +impl From for ChainApiError { + fn from(msg: String) -> Self { + Self { msg } + } +} + +impl core::fmt::Display for ChainApiError { + fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.msg) + } +} diff --git a/node/subsystem/src/lib.rs b/node/subsystem/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..77ad27a2c1d46b3443c0a41f111446a957002892 --- /dev/null +++ b/node/subsystem/src/lib.rs @@ -0,0 +1,274 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Subsystem trait definitions and message types. +//! +//! Node-side logic for Polkadot is mostly comprised of Subsystems, which are discrete components +//! that communicate via message-passing. They are coordinated by an overseer, provided by a +//! separate crate. +//! +//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. + +#![warn(missing_docs)] + +use std::pin::Pin; + +use futures::prelude::*; +use futures::channel::{mpsc, oneshot}; +use futures::future::BoxFuture; + +use polkadot_primitives::v1::Hash; +use async_trait::async_trait; +use smallvec::SmallVec; + +use crate::messages::AllMessages; + +pub mod errors; +pub mod messages; + +/// How many slots are stack-reserved for active leaves updates +/// +/// If there are fewer than this number of slots, then we've wasted some stack space. +/// If there are greater than this number of slots, then we fall back to a heap vector. +const ACTIVE_LEAVES_SMALLVEC_CAPACITY: usize = 8; + +/// Changes in the set of active leaves: the parachain heads which we care to work on. +/// +/// Note that the activated and deactivated fields indicate deltas, not complete sets. +#[derive(Clone, Debug, Default, Eq)] +pub struct ActiveLeavesUpdate { + /// New relay chain block hashes of interest. + pub activated: SmallVec<[Hash; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>, + /// Relay chain block hashes no longer of interest. + pub deactivated: SmallVec<[Hash; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>, +} + +impl ActiveLeavesUpdate { + /// Create a ActiveLeavesUpdate with a single activated hash + pub fn start_work(hash: Hash) -> Self { + Self { activated: [hash].as_ref().into(), ..Default::default() } + } + + /// Create a ActiveLeavesUpdate with a single deactivated hash + pub fn stop_work(hash: Hash) -> Self { + Self { deactivated: [hash].as_ref().into(), ..Default::default() } + } +} + +impl PartialEq for ActiveLeavesUpdate { + /// Equality for `ActiveLeavesUpdate` doesnt imply bitwise equality. + /// + /// Instead, it means equality when `activated` and `deactivated` are considered as sets. + fn eq(&self, other: &Self) -> bool { + use std::collections::HashSet; + self.activated.iter().collect::>() == other.activated.iter().collect::>() && + self.deactivated.iter().collect::>() == other.deactivated.iter().collect::>() + } +} + +/// Signals sent by an overseer to a subsystem. +#[derive(PartialEq, Clone, Debug)] +pub enum OverseerSignal { + /// Subsystems should adjust their jobs to start and stop work on appropriate block hashes. + ActiveLeaves(ActiveLeavesUpdate), + /// `Subsystem` is informed of a finalized block by its block hash. + BlockFinalized(Hash), + /// Conclude the work of the `Overseer` and all `Subsystem`s. + Conclude, +} + +/// A message type that a subsystem receives from an overseer. +/// It wraps signals from an overseer and messages that are circulating +/// between subsystems. +/// +/// It is generic over over the message type `M` that a particular `Subsystem` may use. +#[derive(Debug)] +pub enum FromOverseer { + /// Signal from the `Overseer`. + Signal(OverseerSignal), + + /// Some other `Subsystem`'s message. + Communication { + /// Contained message + msg: M, + }, +} + +/// An error type that describes faults that may happen +/// +/// These are: +/// * Channels being closed +/// * Subsystems dying when they are not expected to +/// * Subsystems not dying when they are told to die +/// * etc. +#[derive(Debug, PartialEq, Eq)] +pub struct SubsystemError; + +impl From for SubsystemError { + fn from(_: mpsc::SendError) -> Self { + Self + } +} + +impl From for SubsystemError { + fn from(_: oneshot::Canceled) -> Self { + Self + } +} + +impl From for SubsystemError { + fn from(_: futures::task::SpawnError) -> Self { + Self + } +} + +impl From for SubsystemError { + fn from(e: std::convert::Infallible) -> Self { + match e {} + } +} + +/// An asynchronous subsystem task.. +/// +/// In essence it's just a newtype wrapping a `BoxFuture`. +pub struct SpawnedSubsystem { + /// Name of the subsystem being spawned. + pub name: &'static str, + /// The task of the subsystem being spawned. + pub future: BoxFuture<'static, ()>, +} + +/// A `Result` type that wraps [`SubsystemError`]. +/// +/// [`SubsystemError`]: struct.SubsystemError.html +pub type SubsystemResult = Result; + +/// A context type that is given to the [`Subsystem`] upon spawning. +/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s +/// or spawn jobs. +/// +/// [`Overseer`]: struct.Overseer.html +/// [`SubsystemJob`]: trait.SubsystemJob.html +#[async_trait] +pub trait SubsystemContext: Send + 'static { + /// The message type of this context. Subsystems launched with this context will expect + /// to receive messages of this type. + type Message: Send; + + /// Try to asynchronously receive a message. + /// + /// This has to be used with caution, if you loop over this without + /// using `pending!()` macro you will end up with a busy loop! + async fn try_recv(&mut self) -> Result>, ()>; + + /// Receive a message. + async fn recv(&mut self) -> SubsystemResult>; + + /// Spawn a child task on the executor. + async fn spawn(&mut self, name: &'static str, s: Pin + Send>>) -> SubsystemResult<()>; + + /// Spawn a blocking child task on the executor's dedicated thread pool. + async fn spawn_blocking( + &mut self, + name: &'static str, + s: Pin + Send>>, + ) -> SubsystemResult<()>; + + /// Send a direct message to some other `Subsystem`, routed based on message type. + async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()>; + + /// Send multiple direct messages to other `Subsystem`s, routed based on message type. + async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + where T: IntoIterator + Send, T::IntoIter: Send; +} + +/// A trait that describes the [`Subsystem`]s that can run on the [`Overseer`]. +/// +/// It is generic over the message type circulating in the system. +/// The idea that we want some type contaning persistent state that +/// can spawn actually running subsystems when asked to. +/// +/// [`Overseer`]: struct.Overseer.html +/// [`Subsystem`]: trait.Subsystem.html +pub trait Subsystem { + /// Subsystem-specific Prometheus metrics. + type Metrics: metrics::Metrics; + + /// Start this `Subsystem` and return `SpawnedSubsystem`. + fn start(self, ctx: C) -> SpawnedSubsystem; +} + +/// A dummy subsystem that implements [`Subsystem`] for all +/// types of messages. Used for tests or as a placeholder. +pub struct DummySubsystem; + +impl Subsystem for DummySubsystem { + type Metrics = (); + + fn start(self, mut ctx: C) -> SpawnedSubsystem { + let future = Box::pin(async move { + loop { + match ctx.recv().await { + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, + Err(_) => return, + _ => continue, + } + } + }); + + SpawnedSubsystem { + name: "DummySubsystem", + future, + } + } +} + +/// This module reexports Prometheus types and defines the [`Metrics`] trait. +pub mod metrics { + /// Reexport Prometheus types. + pub use substrate_prometheus_endpoint as prometheus; + + /// Subsystem- or job-specific Prometheus metrics. + /// + /// Usually implemented as a wrapper for `Option` + /// to ensure `Default` bounds or as a dummy type (). + /// Prometheus metrics internally hold an `Arc` reference, so cloning them is fine. + pub trait Metrics: Default + Clone { + /// Try to register metrics in the Prometheus registry. + fn try_register(registry: &prometheus::Registry) -> Result; + + /// Convience method to register metrics in the optional Prometheus registry. + /// If the registration fails, prints a warning and returns `Default::default()`. + fn register(registry: Option<&prometheus::Registry>) -> Self { + registry.map(|r| { + match Self::try_register(r) { + Err(e) => { + log::warn!("Failed to register metrics: {:?}", e); + Default::default() + }, + Ok(metrics) => metrics, + } + }).unwrap_or_default() + } + } + + // dummy impl + impl Metrics for () { + fn try_register(_registry: &prometheus::Registry) -> Result<(), prometheus::PrometheusError> { + Ok(()) + } + } +} diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs new file mode 100644 index 0000000000000000000000000000000000000000..39f3dc923f777294f4529f98be8bb2081c24e1f0 --- /dev/null +++ b/node/subsystem/src/messages.rs @@ -0,0 +1,548 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Message types for the overseer and subsystems. +//! +//! These messages are intended to define the protocol by which different subsystems communicate with each +//! other and signals that they receive from an overseer to coordinate their work. +//! This is intended for use with the `polkadot-overseer` crate. +//! +//! Subsystems' APIs are defined separately from their implementation, leading to easier mocking. + +use futures::channel::{mpsc, oneshot}; + +use polkadot_node_network_protocol::{ + v1 as protocol_v1, NetworkBridgeEvent, ReputationChange, PeerId, PeerSet, +}; +use polkadot_node_primitives::{ + CollationGenerationConfig, MisbehaviorReport, SignedFullStatement, ValidationResult, +}; +use polkadot_primitives::v1::{ + AvailableData, BackedCandidate, BlockNumber, CandidateDescriptor, CandidateEvent, + CandidateReceipt, CollatorId, CommittedCandidateReceipt, + CoreState, ErasureChunk, GroupRotationInfo, Hash, Id as ParaId, + OccupiedCoreAssumption, PersistedValidationData, PoV, SessionIndex, SignedAvailabilityBitfield, + TransientValidationData, ValidationCode, ValidatorId, ValidationData, ValidatorIndex, + ValidatorSignature, +}; +use std::sync::Arc; + +/// A notification of a new backed candidate. +#[derive(Debug)] +pub struct NewBackedCandidate(pub BackedCandidate); + +/// Messages received by the Candidate Selection subsystem. +#[derive(Debug)] +pub enum CandidateSelectionMessage { + /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. + /// The hash is the relay parent. + Invalid(Hash, CandidateReceipt), +} + +impl CandidateSelectionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::Invalid(hash, _) => Some(*hash), + } + } +} + +impl Default for CandidateSelectionMessage { + fn default() -> Self { + CandidateSelectionMessage::Invalid(Default::default(), Default::default()) + } +} + +/// Messages received by the Candidate Backing subsystem. +#[derive(Debug)] +pub enum CandidateBackingMessage { + /// Requests a set of backable candidates that could be backed in a child of the given + /// relay-parent, referenced by its hash. + GetBackedCandidates(Hash, oneshot::Sender>), + /// Note that the Candidate Backing subsystem should second the given candidate in the context of the + /// given relay-parent (ref. by hash). This candidate must be validated. + Second(Hash, CandidateReceipt, PoV), + /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated + /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. + Statement(Hash, SignedFullStatement), +} + +impl CandidateBackingMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::GetBackedCandidates(hash, _) => Some(*hash), + Self::Second(hash, _, _) => Some(*hash), + Self::Statement(hash, _) => Some(*hash), + } + } +} + +/// Blanket error for validation failing for internal reasons. +#[derive(Debug)] +pub struct ValidationFailed(pub String); + +/// Messages received by the Validation subsystem. +/// +/// ## Validation Requests +/// +/// Validation requests made to the subsystem should return an error only on internal error. +/// Otherwise, they should return either `Ok(ValidationResult::Valid(_))` +/// or `Ok(ValidationResult::Invalid)`. +#[derive(Debug)] +pub enum CandidateValidationMessage { + /// Validate a candidate with provided parameters using relay-chain state. + /// + /// This will implicitly attempt to gather the `PersistedValidationData` and `ValidationCode` + /// from the runtime API of the chain, based on the `relay_parent` + /// of the `CandidateDescriptor`. + /// + /// If there is no state available which can provide this data or the core for + /// the para is not free at the relay-parent, an error is returned. + ValidateFromChainState( + CandidateDescriptor, + Arc, + oneshot::Sender>, + ), + /// Validate a candidate with provided, exhaustive parameters for validation. + /// + /// Explicitly provide the `PersistedValidationData` and `ValidationCode` so this can do full + /// validation without needing to access the state of the relay-chain. Optionally provide the + /// `TransientValidationData` for further checks on the outputs. + ValidateFromExhaustive( + PersistedValidationData, + Option, + ValidationCode, + CandidateDescriptor, + Arc, + oneshot::Sender>, + ), +} + +impl CandidateValidationMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::ValidateFromChainState(_, _, _) => None, + Self::ValidateFromExhaustive(_, _, _, _, _, _) => None, + } + } +} + + +/// Messages received by the Collator Protocol subsystem. +#[derive(Debug)] +pub enum CollatorProtocolMessage { + /// Signal to the collator protocol that it should connect to validators with the expectation + /// of collating on the given para. This is only expected to be called once, early on, if at all, + /// and only by the Collation Generation subsystem. As such, it will overwrite the value of + /// the previous signal. + /// + /// This should be sent before any `DistributeCollation` message. + CollateOn(ParaId), + /// Provide a collation to distribute to validators. + DistributeCollation(CandidateReceipt, PoV), + /// Fetch a collation under the given relay-parent for the given ParaId. + FetchCollation(Hash, ParaId, oneshot::Sender<(CandidateReceipt, PoV)>), + /// Report a collator as having provided an invalid collation. This should lead to disconnect + /// and blacklist of the collator. + ReportCollator(CollatorId), + /// Note a collator as having provided a good collation. + NoteGoodCollation(CollatorId), + /// Get a network bridge update. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} + +impl CollatorProtocolMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::CollateOn(_) => None, + Self::DistributeCollation(receipt, _) => Some(receipt.descriptor().relay_parent), + Self::FetchCollation(relay_parent, _, _) => Some(*relay_parent), + Self::ReportCollator(_) => None, + Self::NoteGoodCollation(_) => None, + Self::NetworkBridgeUpdateV1(_) => None, + } + } +} + +/// Messages received by the network bridge subsystem. +#[derive(Debug)] +pub enum NetworkBridgeMessage { + /// Report a peer for their actions. + ReportPeer(PeerId, ReputationChange), + + /// Send a message to one or more peers on the validation peer-set. + SendValidationMessage(Vec, protocol_v1::ValidationProtocol), + + /// Send a message to one or more peers on the collation peer-set. + SendCollationMessage(Vec, protocol_v1::CollationProtocol), + + /// Connect to peers who represent the given `ValidatorId`s at the given relay-parent. + /// + /// Also accepts a response channel by which the issuer can learn the `PeerId`s of those + /// validators. + ConnectToValidators(PeerSet, Vec, oneshot::Sender>), +} + +impl NetworkBridgeMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::ReportPeer(_, _) => None, + Self::SendValidationMessage(_, _) => None, + Self::SendCollationMessage(_, _) => None, + Self::ConnectToValidators(_, _, _) => None, + } + } +} + +/// Availability Distribution Message. +#[derive(Debug)] +pub enum AvailabilityDistributionMessage { + /// Event from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} + +impl AvailabilityDistributionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::NetworkBridgeUpdateV1(_) => None, + } + } +} + +/// Bitfield distribution message. +#[derive(Debug)] +pub enum BitfieldDistributionMessage { + /// Distribute a bitfield via gossip to other validators. + DistributeBitfield(Hash, SignedAvailabilityBitfield), + + /// Event from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} + +impl BitfieldDistributionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::DistributeBitfield(hash, _) => Some(*hash), + Self::NetworkBridgeUpdateV1(_) => None, + } + } +} + +/// Bitfield signing message. +/// +/// Currently non-instantiable. +#[derive(Debug)] +pub enum BitfieldSigningMessage {} + +impl BitfieldSigningMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + None + } +} + +/// Availability store subsystem message. +#[derive(Debug)] +pub enum AvailabilityStoreMessage { + /// Query a `AvailableData` from the AV store. + QueryAvailableData(Hash, oneshot::Sender>), + + /// Query whether a `AvailableData` exists within the AV Store. + /// + /// This is useful in cases when existence + /// matters, but we don't want to necessarily pass around multiple + /// megabytes of data to get a single bit of information. + QueryDataAvailability(Hash, oneshot::Sender), + + /// Query an `ErasureChunk` from the AV store by the candidate hash and validator index. + QueryChunk(Hash, ValidatorIndex, oneshot::Sender>), + + /// Query whether an `ErasureChunk` exists within the AV Store. + /// + /// This is useful in cases like bitfield signing, when existence + /// matters, but we don't want to necessarily pass around large + /// quantities of data to get a single bit of information. + QueryChunkAvailability(Hash, ValidatorIndex, oneshot::Sender), + + /// Store an `ErasureChunk` in the AV store. + /// + /// Return `Ok(())` if the store operation succeeded, `Err(())` if it failed. + StoreChunk(Hash, ValidatorIndex, ErasureChunk, oneshot::Sender>), + + /// Store a `AvailableData` in the AV store. + /// If `ValidatorIndex` is present store corresponding chunk also. + /// + /// Return `Ok(())` if the store operation succeeded, `Err(())` if it failed. + StoreAvailableData(Hash, Option, u32, AvailableData, oneshot::Sender>), +} + +impl AvailabilityStoreMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::QueryAvailableData(hash, _) => Some(*hash), + Self::QueryDataAvailability(hash, _) => Some(*hash), + Self::QueryChunk(hash, _, _) => Some(*hash), + Self::QueryChunkAvailability(hash, _, _) => Some(*hash), + Self::StoreChunk(hash, _, _, _) => Some(*hash), + Self::StoreAvailableData(hash, _, _, _, _) => Some(*hash), + } + } +} + +/// A response channel for the result of a chain API request. +pub type ChainApiResponseChannel = oneshot::Sender>; + +/// Chain API request subsystem message. +#[derive(Debug)] +pub enum ChainApiMessage { + /// Request the block number by hash. + /// Returns `None` if a block with the given hash is not present in the db. + BlockNumber(Hash, ChainApiResponseChannel>), + /// Request the finalized block hash by number. + /// Returns `None` if a block with the given number is not present in the db. + /// Note: the caller must ensure the block is finalized. + FinalizedBlockHash(BlockNumber, ChainApiResponseChannel>), + /// Request the last finalized block number. + /// This request always succeeds. + FinalizedBlockNumber(ChainApiResponseChannel), + /// Request the `k` ancestors block hashes of a block with the given hash. + /// The response channel may return a `Vec` of size up to `k` + /// filled with ancestors hashes with the following order: + /// `parent`, `grandparent`, ... + Ancestors { + /// The hash of the block in question. + hash: Hash, + /// The number of ancestors to request. + k: usize, + /// The response channel. + response_channel: ChainApiResponseChannel>, + }, +} + +impl ChainApiMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + None + } +} + +/// A sender for the result of a runtime API request. +pub type RuntimeApiSender = oneshot::Sender>; + +/// A request to the Runtime API subsystem. +#[derive(Debug)] +pub enum RuntimeApiRequest { + /// Get the current validator set. + Validators(RuntimeApiSender>), + /// Get the validator groups and group rotation info. + ValidatorGroups(RuntimeApiSender<(Vec>, GroupRotationInfo)>), + /// Get information on all availability cores. + AvailabilityCores(RuntimeApiSender>), + /// Get the persisted validation data for a particular para, taking the given + /// `OccupiedCoreAssumption`, which will inform on how the validation data should be computed + /// if the para currently occupies a core. + PersistedValidationData( + ParaId, + OccupiedCoreAssumption, + RuntimeApiSender>, + ), + /// Get the full validation data for a particular para, taking the given + /// `OccupiedCoreAssumption`, which will inform on how the validation data should be computed + /// if the para currently occupies a core. + FullValidationData( + ParaId, + OccupiedCoreAssumption, + RuntimeApiSender>, + ), + /// Get the session index that a child of the block will have. + SessionIndexForChild(RuntimeApiSender), + /// Get the validation code for a para, taking the given `OccupiedCoreAssumption`, which + /// will inform on how the validation data should be computed if the para currently + /// occupies a core. + ValidationCode(ParaId, OccupiedCoreAssumption, RuntimeApiSender>), + /// Get a the candidate pending availability for a particular parachain by parachain / core index + CandidatePendingAvailability(ParaId, RuntimeApiSender>), + /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of + /// the block in whose state this request is executed. + CandidateEvents(RuntimeApiSender>), +} + +/// A message to the Runtime API subsystem. +#[derive(Debug)] +pub enum RuntimeApiMessage { + /// Make a request of the runtime API against the post-state of the given relay-parent. + Request(Hash, RuntimeApiRequest), +} + +impl RuntimeApiMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::Request(hash, _) => Some(*hash), + } + } +} + +/// Statement distribution message. +#[derive(Debug)] +pub enum StatementDistributionMessage { + /// We have originated a signed statement in the context of + /// given relay-parent hash and it should be distributed to other validators. + Share(Hash, SignedFullStatement), + /// Event from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} + +impl StatementDistributionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::Share(hash, _) => Some(*hash), + Self::NetworkBridgeUpdateV1(_) => None, + } + } +} + +/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. +// It needs to be cloneable because multiple potential block authors can request copies. +#[derive(Debug, Clone)] +pub enum ProvisionableData { + /// This bitfield indicates the availability of various candidate blocks. + Bitfield(Hash, SignedAvailabilityBitfield), + /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. + BackedCandidate(BackedCandidate), + /// Misbehavior reports are self-contained proofs of validator misbehavior. + MisbehaviorReport(Hash, MisbehaviorReport), + /// Disputes trigger a broad dispute resolution process. + Dispute(Hash, ValidatorSignature), +} + +/// This data needs to make its way from the provisioner into the InherentData. +/// +/// There, it is used to construct the InclusionInherent. +pub type ProvisionerInherentData = (Vec, Vec); + +/// Message to the Provisioner. +/// +/// In all cases, the Hash is that of the relay parent. +#[derive(Debug)] +pub enum ProvisionerMessage { + /// This message allows potential block authors to be kept updated with all new authorship data + /// as it becomes available. + RequestBlockAuthorshipData(Hash, mpsc::Sender), + /// This message allows external subsystems to request the set of bitfields and backed candidates + /// associated with a particular potential block hash. + /// + /// This is expected to be used by a proposer, to inject that information into the InherentData + /// where it can be assembled into the InclusionInherent. + RequestInherentData(Hash, oneshot::Sender), + /// This data should become part of a relay chain block + ProvisionableData(ProvisionableData), +} + +impl ProvisionerMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::RequestBlockAuthorshipData(hash, _) => Some(*hash), + Self::RequestInherentData(hash, _) => Some(*hash), + Self::ProvisionableData(_) => None, + } + } +} + +/// Message to the PoV Distribution Subsystem. +#[derive(Debug)] +pub enum PoVDistributionMessage { + /// Fetch a PoV from the network. + /// + /// This `CandidateDescriptor` should correspond to a candidate seconded under the provided + /// relay-parent hash. + FetchPoV(Hash, CandidateDescriptor, oneshot::Sender>), + /// Distribute a PoV for the given relay-parent and CandidateDescriptor. + /// The PoV should correctly hash to the PoV hash mentioned in the CandidateDescriptor + DistributePoV(Hash, CandidateDescriptor, Arc), + /// An update from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} + +impl PoVDistributionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + match self { + Self::FetchPoV(hash, _, _) => Some(*hash), + Self::DistributePoV(hash, _, _) => Some(*hash), + Self::NetworkBridgeUpdateV1(_) => None, + } + } +} + +/// Message to the Collation Generation Subsystem. +#[derive(Debug)] +pub enum CollationGenerationMessage { + /// Initialize the collation generation subsystem + Initialize(CollationGenerationConfig), +} + +impl CollationGenerationMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + None + } +} + +/// A message type tying together all message types that are used across Subsystems. +#[derive(Debug)] +pub enum AllMessages { + /// Message for the validation subsystem. + CandidateValidation(CandidateValidationMessage), + /// Message for the candidate backing subsystem. + CandidateBacking(CandidateBackingMessage), + /// Message for the candidate selection subsystem. + CandidateSelection(CandidateSelectionMessage), + /// Message for the Chain API subsystem. + ChainApi(ChainApiMessage), + /// Message for the Collator Protocol subsystem. + CollatorProtocol(CollatorProtocolMessage), + /// Message for the statement distribution subsystem. + StatementDistribution(StatementDistributionMessage), + /// Message for the availability distribution subsystem. + AvailabilityDistribution(AvailabilityDistributionMessage), + /// Message for the bitfield distribution subsystem. + BitfieldDistribution(BitfieldDistributionMessage), + /// Message for the bitfield signing subsystem. + BitfieldSigning(BitfieldSigningMessage), + /// Message for the Provisioner subsystem. + Provisioner(ProvisionerMessage), + /// Message for the PoV Distribution subsystem. + PoVDistribution(PoVDistributionMessage), + /// Message for the Runtime API subsystem. + RuntimeApi(RuntimeApiMessage), + /// Message for the availability store subsystem. + AvailabilityStore(AvailabilityStoreMessage), + /// Message for the network bridge subsystem. + NetworkBridge(NetworkBridgeMessage), + /// Message for the Collation Generation subsystem + CollationGeneration(CollationGenerationMessage), +} diff --git a/node/test-service/Cargo.toml b/node/test-service/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d1bf0aed1d1993e5964dd44542fa33e8939c80f8 --- /dev/null +++ b/node/test-service/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "polkadot-test-service" +version = "0.8.2" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.4" +futures01 = { package = "futures", version = "0.1.29" } +hex = "0.4" +log = "0.4.8" +rand = "0.7.3" +tempfile = "3.1.0" + +# Polkadot dependencies +polkadot-primitives = { path = "../../primitives" } +polkadot-rpc = { path = "../../rpc" } +polkadot-runtime-common = { path = "../../runtime/common" } +polkadot-service = { path = "../../service" } +polkadot-test-runtime = { path = "../../runtime/test-runtime" } + +# Substrate dependencies +authority-discovery = { package = "sc-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } +babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } +consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } +grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-informant = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +service = { package = "sc-service", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +serde_json = "1.0" +substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } +tokio = { version = "0.2", features = ["macros"] } diff --git a/node/test-service/src/chain_spec.rs b/node/test-service/src/chain_spec.rs new file mode 100644 index 0000000000000000000000000000000000000000..93a614b7d3a788be2d9131a3bc06e0a9e3ae9bb5 --- /dev/null +++ b/node/test-service/src/chain_spec.rs @@ -0,0 +1,177 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use babe_primitives::AuthorityId as BabeId; +use grandpa::AuthorityId as GrandpaId; +use pallet_staking::Forcing; +use polkadot_primitives::v0::{ValidatorId, AccountId}; +use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; +use polkadot_test_runtime::constants::currency::DOTS; +use sc_chain_spec::{ChainSpec, ChainType}; +use sp_core::{sr25519, ChangesTrieConfiguration}; +use sp_runtime::Perbill; + +const DEFAULT_PROTOCOL_ID: &str = "dot"; + +/// The `ChainSpec parametrised for polkadot runtime`. +pub type PolkadotChainSpec = + service::GenericChainSpec; + +/// Polkadot local testnet config (multivalidator Alice + Bob) +pub fn polkadot_local_testnet_config() -> PolkadotChainSpec { + PolkadotChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + || polkadot_local_testnet_genesis(None), + vec![], + None, + Some(DEFAULT_PROTOCOL_ID), + None, + Default::default(), + ) +} + +/// Polkadot local testnet genesis config (multivalidator Alice + Bob) +pub fn polkadot_local_testnet_genesis( + changes_trie_config: Option, +) -> polkadot_test_runtime::GenesisConfig { + polkadot_testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + get_authority_keys_from_seed("Bob"), + get_authority_keys_from_seed("Charlie"), + ], + get_account_id_from_seed::("Alice"), + None, + changes_trie_config, + ) +} + +/// Helper function to generate stash, controller and session key from seed +fn get_authority_keys_from_seed( + seed: &str, +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId) { + ( + get_account_id_from_seed::(&format!("{}//stash", seed)), + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) +} + +fn testnet_accounts() -> Vec { + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ] +} + +/// Helper function to create polkadot GenesisConfig for testing +fn polkadot_testnet_genesis( + initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ValidatorId)>, + root_key: AccountId, + endowed_accounts: Option>, + changes_trie_config: Option, +) -> polkadot_test_runtime::GenesisConfig { + use polkadot_test_runtime as polkadot; + + let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); + + const ENDOWMENT: u128 = 1_000_000 * DOTS; + const STASH: u128 = 100 * DOTS; + + polkadot::GenesisConfig { + frame_system: Some(polkadot::SystemConfig { + code: polkadot::WASM_BINARY.expect("Wasm binary must be built for testing").to_vec(), + changes_trie_config, + }), + pallet_indices: Some(polkadot::IndicesConfig { indices: vec![] }), + pallet_balances: Some(polkadot::BalancesConfig { + balances: endowed_accounts + .iter() + .map(|k| (k.clone(), ENDOWMENT)) + .collect(), + }), + pallet_session: Some(polkadot::SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + polkadot_test_runtime::SessionKeys { + babe: x.2.clone(), + grandpa: x.3.clone(), + parachain_validator: x.4.clone(), + }, + ) + }) + .collect::>(), + }), + pallet_staking: Some(polkadot::StakingConfig { + minimum_validator_count: 1, + validator_count: 2, + stakers: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.1.clone(), + STASH, + polkadot::StakerStatus::Validator, + ) + }) + .collect(), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), + force_era: Forcing::NotForcing, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + }), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![] }), + claims: Some(polkadot::ClaimsConfig { + claims: vec![], + vesting: vec![], + }), + pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![] }), + pallet_sudo: Some(polkadot::SudoConfig { key: root_key }), + } +} + +/// Can be called for a `Configuration` to check if it is a configuration for the `Test` network. +pub trait IdentifyVariant { + /// Returns if this is a configuration for the `Test` network. + fn is_test(&self) -> bool; +} + +impl IdentifyVariant for Box { + fn is_test(&self) -> bool { + self.id().starts_with("test") + } +} diff --git a/node/test-service/src/lib.rs b/node/test-service/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f6e45879c785cf860de0b011d27e1b9b22e8a92 --- /dev/null +++ b/node/test-service/src/lib.rs @@ -0,0 +1,301 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Polkadot test service only. + +#![warn(missing_docs)] + +mod chain_spec; + +pub use chain_spec::*; +use futures::future::Future; +use polkadot_primitives::v0::{ + Block, Hash, CollatorId, Id as ParaId, +}; +use polkadot_runtime_common::BlockHashCount; +use polkadot_service::{ + new_full, FullNodeHandles, AbstractClient, ClientHandle, ExecuteWithClient, +}; +use polkadot_test_runtime::{Runtime, SignedExtra, SignedPayload, VERSION}; +use sc_chain_spec::ChainSpec; +use sc_client_api::{execution_extensions::ExecutionStrategies, BlockchainEvents}; +use sc_executor::native_executor_instance; +use sc_informant::OutputFormat; +use sc_network::{ + config::{NetworkConfiguration, TransportConfig}, + multiaddr, NetworkService, +}; +use service::{ + config::{DatabaseConfig, KeystoreConfig, MultiaddrWithPeerId, WasmExecutionMethod}, + error::Error as ServiceError, + RpcHandlers, TaskExecutor, TaskManager, +}; +use service::{BasePath, Configuration, Role, TFullBackend}; +use sp_arithmetic::traits::SaturatedConversion; +use sp_blockchain::HeaderBackend; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{codec::Encode, generic}; +use sp_state_machine::BasicExternalities; +use std::sync::Arc; +use substrate_test_client::{BlockchainEventsExt, RpcHandlersExt, RpcTransactionOutput, RpcTransactionError}; + +native_executor_instance!( + pub PolkadotTestExecutor, + polkadot_test_runtime::api::dispatch, + polkadot_test_runtime::native_version, + frame_benchmarking::benchmarking::HostFunctions, +); + +/// Create a new Polkadot test service for a full node. +pub fn polkadot_test_new_full( + config: Configuration, + collating_for: Option<(CollatorId, ParaId)>, + authority_discovery_enabled: bool, +) -> Result< + ( + TaskManager, + Arc>, + FullNodeHandles, + Arc>, + RpcHandlers, + ), + ServiceError, +> { + let (task_manager, client, handles, network, rpc_handlers) = + new_full::( + config, + collating_for, + authority_discovery_enabled, + None, + true, + )?; + + Ok((task_manager, client, handles, network, rpc_handlers)) +} + +/// A wrapper for the test client that implements `ClientHandle`. +pub struct TestClient(pub Arc>); + +impl ClientHandle for TestClient { + fn execute_with(&self, t: T) -> T::Output { + T::execute_with_client::<_, _, polkadot_service::FullBackend>(t, self.0.clone()) + } +} + +/// Create a Polkadot `Configuration`. By default an in-memory socket will be used, therefore you need to provide boot +/// nodes if you want the future node to be connected to other nodes. The `storage_update_func` can be used to make +/// adjustements to the runtime before the node starts. +pub fn node_config( + storage_update_func: impl Fn(), + task_executor: TaskExecutor, + key: Sr25519Keyring, + boot_nodes: Vec, +) -> Configuration { + let base_path = BasePath::new_temp_dir().expect("could not create temporary directory"); + let root = base_path.path(); + let role = Role::Authority { + sentry_nodes: Vec::new(), + }; + let key_seed = key.to_seed(); + let mut spec = polkadot_local_testnet_config(); + let mut storage = spec + .as_storage_builder() + .build_storage() + .expect("could not build storage"); + + BasicExternalities::execute_with_storage(&mut storage, storage_update_func); + spec.set_storage(storage); + + let mut network_config = NetworkConfiguration::new( + format!("Polkadot Test Node for: {}", key_seed), + "network/test/0.1", + Default::default(), + None, + ); + let informant_output_format = OutputFormat { + enable_color: false, + prefix: format!("[{}] ", key_seed), + }; + + network_config.boot_nodes = boot_nodes; + + network_config.allow_non_globals_in_dht = true; + + network_config + .listen_addresses + .push(multiaddr::Protocol::Memory(rand::random()).into()); + + network_config.transport = TransportConfig::MemoryOnly; + + Configuration { + impl_name: "polkadot-test-node".to_string(), + impl_version: "0.1".to_string(), + role, + task_executor, + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { + path: root.join("key"), + password: None, + }, + database: DatabaseConfig::RocksDb { + path: root.join("db"), + cache_size: 128, + }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + pruning: Default::default(), + chain_spec: Box::new(spec), + wasm_method: WasmExecutionMethod::Interpreted, + // NOTE: we enforce the use of the native runtime to make the errors more debuggable + execution_strategies: ExecutionStrategies { + syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, + importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, + block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, + offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, + other: sc_client_api::ExecutionStrategy::NativeWhenPossible, + }, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + prometheus_config: None, + telemetry_endpoints: None, + telemetry_external_transport: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(key_seed), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + informant_output_format, + } +} + +/// Run a Polkadot test node using the Polkadot test runtime. The node will be using an in-memory socket, therefore you +/// need to provide boot nodes if you want it to be connected to other nodes. The `storage_update_func` can be used to +/// make adjustements to the runtime before the node starts. +pub fn run_test_node( + task_executor: TaskExecutor, + key: Sr25519Keyring, + storage_update_func: impl Fn(), + boot_nodes: Vec, +) -> PolkadotTestNode< + TaskManager, + impl AbstractClient>, +> { + let config = node_config(storage_update_func, task_executor, key, boot_nodes); + let multiaddr = config.network.listen_addresses[0].clone(); + let authority_discovery_enabled = false; + let (task_manager, client, handles, network, rpc_handlers) = + polkadot_test_new_full(config, None, authority_discovery_enabled) + .expect("could not create Polkadot test service"); + + let peer_id = network.local_peer_id().clone(); + let addr = MultiaddrWithPeerId { multiaddr, peer_id }; + + PolkadotTestNode { + task_manager, + client, + handles, + addr, + rpc_handlers, + } +} + +/// A Polkadot test node instance used for testing. +pub struct PolkadotTestNode { + /// TaskManager's instance. + pub task_manager: S, + /// Client's instance. + pub client: Arc, + /// Node's handles. + pub handles: FullNodeHandles, + /// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes. + pub addr: MultiaddrWithPeerId, + /// RPCHandlers to make RPC queries. + pub rpc_handlers: RpcHandlers, +} + +impl PolkadotTestNode +where + C: HeaderBackend, +{ + /// Send a transaction through RPCHandlers to call a function. + pub async fn call_function( + &self, + function: polkadot_test_runtime::Call, + caller: Sr25519Keyring, + ) -> Result { + let current_block_hash = self.client.info().best_hash; + let current_block = self.client.info().best_number.saturated_into(); + let genesis_block = self.client.hash(0).unwrap().unwrap(); + let nonce = 0; + let period = BlockHashCount::get() + .checked_next_power_of_two() + .map(|c| c / 2) + .unwrap_or(2) as u64; + let tip = 0; + let extra: SignedExtra = ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + ); + let raw_payload = SignedPayload::from_raw( + function.clone(), + extra.clone(), + ( + VERSION.spec_version, + VERSION.transaction_version, + genesis_block, + current_block_hash, + (), + (), + (), + ), + ); + let signature = raw_payload.using_encoded(|e| caller.sign(e)); + let extrinsic = polkadot_test_runtime::UncheckedExtrinsic::new_signed( + function.clone(), + polkadot_test_runtime::Address::Id(caller.public().into()), + polkadot_primitives::v0::Signature::Sr25519(signature.clone()), + extra.clone(), + ); + + self.rpc_handlers.send_transaction(extrinsic.into()).await + } +} + +impl PolkadotTestNode +where + C: BlockchainEvents, +{ + /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks + /// are ever created, thus you should restrict the maximum amount of time of the test execution. + pub fn wait_for_blocks(&self, count: usize) -> impl Future { + self.client.wait_for_blocks(count) + } +} diff --git a/node/test-service/tests/build-blocks.rs b/node/test-service/tests/build-blocks.rs new file mode 100644 index 0000000000000000000000000000000000000000..b809f188aafc9fff50e5cbfbdb521efe54c54cc8 --- /dev/null +++ b/node/test-service/tests/build-blocks.rs @@ -0,0 +1,53 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use futures::{future, pin_mut, select}; +use polkadot_test_service::*; +use service::TaskExecutor; +use sp_keyring::Sr25519Keyring; + +#[substrate_test_utils::test] +async fn ensure_test_service_build_blocks(task_executor: TaskExecutor) { + let mut alice = run_test_node( + task_executor.clone(), + Sr25519Keyring::Alice, + || {}, + Vec::new(), + ); + let mut bob = run_test_node( + task_executor.clone(), + Sr25519Keyring::Bob, + || {}, + vec![alice.addr.clone()], + ); + + { + let t1 = future::join(alice.wait_for_blocks(3), bob.wait_for_blocks(3)).fuse(); + let t2 = alice.task_manager.future().fuse(); + let t3 = bob.task_manager.future().fuse(); + + pin_mut!(t1, t2, t3); + + select! { + _ = t1 => {}, + _ = t2 => panic!("service Alice failed"), + _ = t3 => panic!("service Bob failed"), + } + } + + alice.task_manager.clean_shutdown().await; + bob.task_manager.clean_shutdown().await; +} diff --git a/node/test-service/tests/call-function.rs b/node/test-service/tests/call-function.rs new file mode 100644 index 0000000000000000000000000000000000000000..af62bcf5dbff584c29d6ab95202da0b237d3e46d --- /dev/null +++ b/node/test-service/tests/call-function.rs @@ -0,0 +1,44 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_test_service::*; +use service::TaskExecutor; +use sp_keyring::Sr25519Keyring::{Alice, Bob}; + +#[substrate_test_utils::test] +async fn call_function_actually_work(task_executor: TaskExecutor) { + let alice = run_test_node(task_executor, Alice, || {}, Vec::new()); + + let function = polkadot_test_runtime::Call::Balances(pallet_balances::Call::transfer( + Default::default(), + 1, + )); + let output = alice.call_function(function, Bob).await.unwrap(); + + let res = output.result.expect("return value expected"); + let json = serde_json::from_str::(res.as_str()).expect("valid JSON"); + let object = json.as_object().expect("JSON is an object"); + assert!(object.contains_key("jsonrpc"), "key jsonrpc exists"); + let result = object.get("result"); + let result = result.expect("key result exists"); + assert_eq!( + result.as_str().map(|x| x.starts_with("0x")), + Some(true), + "result starts with 0x" + ); + + alice.task_manager.clean_shutdown().await; +} diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index 5846105a9375a17b1ec0ec95c7269e12b90e1487..bb8a340ecaea9b473558086c05f288702177318a 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] description = "Types and utilities for creating and working with parachains" edition = "2018" @@ -9,27 +9,28 @@ edition = "2018" # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = [ "derive" ] } sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +polkadot-core-primitives = { path = "../core-primitives", default-features = false } # all optional crates. derive_more = { version = "0.99.2", optional = true } serde = { version = "1.0.102", default-features = false, features = [ "derive" ], optional = true } -sp-runtime-interface = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true, default-features = false } sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } parking_lot = { version = "0.10.0", optional = true } log = { version = "0.4.8", optional = true } +futures = { version = "0.3.4", optional = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] shared_memory = { version = "0.10.0", optional = true } [features] default = ["std"] -wasm-api = ["sp-runtime-interface"] +wasm-api = [] std = [ "codec/std", "derive_more", @@ -39,8 +40,9 @@ std = [ "sp-core/std", "parking_lot", "log", - "sp-runtime-interface/std", "sp-externalities", "sc-executor", "sp-io", + "polkadot-core-primitives/std", + "futures", ] diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index 54c6907f4201f7b1ad2e2ba0fa13e50460c79215..3a1ee81bcdc31451b897d140b590d2994c91c15c 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -28,13 +28,14 @@ use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] use sp_core::bytes; -/// The block number of the relay chain. -/// 32-bits will allow for 136 years of blocks assuming 1 block per second. -pub type RelayChainBlockNumber = u32; +use polkadot_core_primitives::Hash; + +/// Block number type used by the relay chain. +pub use polkadot_core_primitives::BlockNumber as RelayChainBlockNumber; /// Parachain head data included in the chain. #[derive(PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Default))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Default, Hash))] pub struct HeadData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); impl From> for HeadData { @@ -45,7 +46,7 @@ impl From> for HeadData { /// Parachain validation code. #[derive(Default, PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidationCode(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); impl From> for ValidationCode { @@ -81,6 +82,36 @@ impl From for Id { fn from(x: u32) -> Self { Id(x) } } +impl From for Id { + fn from(x: usize) -> Self { + use sp_std::convert::TryInto; + // can't panic, so need to truncate + let x = x.try_into().unwrap_or(u32::MAX); + Id(x) + } +} + +// When we added a second From impl for Id, type inference could no longer +// determine which impl should apply for things like `5.into()`. It therefore +// raised a bunch of errors in our test code, scattered throughout the +// various modules' tests, that there is no impl of `From` (`i32` being +// the default numeric type). +// +// We can't use `cfg(test)` here, because that configuration directive does not +// propagate between crates, which would fail to fix tests in crates other than +// this one. +// +// Instead, let's take advantage of the observation that what really matters for a +// ParaId within a test context is that it is unique and constant. I believe that +// there is no case where someone does `(-1).into()` anyway, but if they do, it +// never matters whether the actual contained ID is `-1` or `4294967295`. Nobody +// does arithmetic on a `ParaId`; doing so would be a bug. +impl From for Id { + fn from(x: i32) -> Self { + Id(x as u32) + } +} + const USER_INDEX_START: u32 = 1000; /// The ID of the first user (non-system) parachain. @@ -157,7 +188,7 @@ impl AccountIdConversion for Id { /// Which origin a parachain's message to the relay chain should be dispatched from. #[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug, Hash))] #[repr(u8)] pub enum ParachainDispatchOrigin { /// As a simple `Origin::Signed`, using `ParaId::account_id` as its value. This is good when @@ -186,11 +217,7 @@ impl sp_std::convert::TryFrom for ParachainDispatchOrigin { /// A message from a parachain to its Relay Chain. #[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[cfg_attr( - any(feature = "std", feature = "wasm-api"), - derive(sp_runtime_interface::pass_by::PassByCodec, -))] -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug, Hash))] pub struct UpwardMessage { /// The origin for the message to be sent from. pub origin: ParachainDispatchOrigin, @@ -203,22 +230,16 @@ pub struct UpwardMessage { #[derive(PartialEq, Eq, Decode)] #[cfg_attr(feature = "std", derive(Debug, Encode))] pub struct ValidationParams { - /// The collation body. - pub block_data: BlockData, /// Previous head-data. pub parent_head: HeadData, - /// The maximum code size permitted, in bytes. - pub max_code_size: u32, - /// The maximum head-data size permitted, in bytes. - pub max_head_data_size: u32, + /// The collation body. + pub block_data: BlockData, /// The current relay-chain block number. pub relay_chain_height: RelayChainBlockNumber, - /// Whether a code upgrade is allowed or not, and at which height the upgrade - /// would be applied after, if so. The parachain logic should apply any upgrade - /// issued in this block after the first block - /// with `relay_chain_height` at least this value, if `Some`. if `None`, issue - /// no upgrade. - pub code_upgrade_allowed: Option, + /// The list of MQC heads for the inbound HRMP channels paired with the sender para ids. This + /// vector is sorted ascending by the para id and doesn't contain multiple entries with the same + /// sender. + pub hrmp_mqc_heads: Vec<(Id, Hash)>, } /// The result of parachain validation. @@ -230,4 +251,10 @@ pub struct ValidationResult { pub head_data: HeadData, /// An update to the validation code that should be scheduled in the relay chain. pub new_validation_code: Option, + /// Upward messages send by the Parachain. + pub upward_messages: Vec, + /// Number of downward messages that were processed by the Parachain. + /// + /// It is expected that the Parachain processes them from first to last. + pub processed_downward_messages: u32, } diff --git a/parachain/src/wasm_api.rs b/parachain/src/wasm_api.rs index f5a82f57819ca6634ea6fb2423bf7b9f456b75b6..9c7eac25f1e57516c31c62a117bfe48f632e0d5f 100644 --- a/parachain/src/wasm_api.rs +++ b/parachain/src/wasm_api.rs @@ -16,29 +16,6 @@ //! Utilities for writing parachain WASM. -#[cfg(any(feature = "std", all(not(feature = "std"), feature = "wasm-api")))] -use crate::primitives::UpwardMessage; -#[cfg(any(feature = "std", all(not(feature = "std"), feature = "wasm-api")))] -use sp_runtime_interface::runtime_interface; -#[cfg(feature = "std")] -use sp_externalities::ExternalitiesExt; - -/// The parachain api for posting messages. -// Either activate on `std` to get access to the `HostFunctions` or when `wasm-api` is given and on -// `no_std`. -#[cfg(any(feature = "std", all(not(feature = "std"), feature = "wasm-api")))] -#[runtime_interface] -pub trait Parachain { - /// Post a message to this parachain's relay chain. - #[allow(dead_code)] - fn post_upward_message(&mut self, msg: UpwardMessage) { - self.extension::() - .expect("No `ParachainExt` associated with the current context.") - .post_upward_message(msg) - .expect("Failed to post upward message") - } -} - /// Load the validation params from memory when implementing a Rust parachain. /// /// Offset and length must have been provided by the validation diff --git a/parachain/src/wasm_executor/mod.rs b/parachain/src/wasm_executor/mod.rs index 2410da844faa1dc4048df42806afbf89ce5892a4..c9ab7587bfaa9921d4f9eea42d2edff0540a805d 100644 --- a/parachain/src/wasm_executor/mod.rs +++ b/parachain/src/wasm_executor/mod.rs @@ -21,12 +21,11 @@ //! a WASM VM for re-execution of a parachain candidate. use std::any::{TypeId, Any}; -use crate::primitives::{ValidationParams, ValidationResult, UpwardMessage}; +use crate::primitives::{ValidationParams, ValidationResult}; use codec::{Decode, Encode}; -use sp_core::storage::ChildInfo; -use sp_core::traits::CallInWasm; -use sp_wasm_interface::HostFunctions as _; +use sp_core::{storage::{ChildInfo, TrackedStorageKey}, traits::{CallInWasm, SpawnNamed}}; use sp_externalities::Extensions; +use sp_wasm_interface::HostFunctions as _; #[cfg(not(any(target_os = "android", target_os = "unknown")))] pub use validation_host::{run_worker, ValidationPool, EXECUTION_TIMEOUT_SEC}; @@ -36,18 +35,7 @@ mod validation_host; // maximum memory in bytes const MAX_RUNTIME_MEM: usize = 1024 * 1024 * 1024; // 1 GiB const MAX_CODE_MEM: usize = 16 * 1024 * 1024; // 16 MiB - -sp_externalities::decl_extension! { - /// The extension that is registered at the `Externalities` when validating a parachain state - /// transition. - pub(crate) struct ParachainExt(Box); -} - -impl ParachainExt { - pub fn new(ext: T) -> Self { - Self(Box::new(ext)) - } -} +const MAX_VALIDATION_RESULT_HEADER_MEM: usize = MAX_CODE_MEM + 1024; // 16.001 MiB /// A stub validation-pool defined when compiling for Android or WASM. #[cfg(any(target_os = "android", target_os = "unknown"))] @@ -82,9 +70,18 @@ pub enum ExecutionMode<'a> { RemoteTest(&'a ValidationPool), } -/// Error type for the wasm executor #[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { +/// Candidate validation error. +pub enum ValidationError { + /// Validation failed due to internal reasons. The candidate might still be valid. + Internal(InternalError), + /// Candidate is invalid. + InvalidCandidate(InvalidCandidate), +} + +/// Error type that indicates invalid candidate. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum InvalidCandidate { /// Wasm executor error. #[display(fmt = "WASM executor error: {:?}", _0)] WasmExecutor(sc_executor::error::Error), @@ -95,95 +92,91 @@ pub enum Error { /// Code size it too large. #[display(fmt = "WASM code is {} bytes, max allowed is {}", _0, MAX_CODE_MEM)] CodeTooLarge(usize), - /// Bad return data or type. + /// Error decoding returned data. #[display(fmt = "Validation function returned invalid data.")] BadReturn, #[display(fmt = "Validation function timeout.")] Timeout, + #[display(fmt = "External WASM execution error: {}", _0)] + ExternalWasmExecutor(String), +} + +/// Host error during candidate validation. This does not indicate an invalid candidate. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum InternalError { #[display(fmt = "IO error: {}", _0)] Io(std::io::Error), #[display(fmt = "System error: {}", _0)] System(Box), - #[display(fmt = "WASM worker error: {}", _0)] - External(String), #[display(fmt = "Shared memory error: {}", _0)] #[cfg(not(any(target_os = "android", target_os = "unknown")))] SharedMem(shared_memory::SharedMemError), + #[display(fmt = "WASM worker error: {}", _0)] + WasmWorker(String), } -impl std::error::Error for Error { +impl std::error::Error for ValidationError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { - Error::WasmExecutor(ref err) => Some(err), - Error::Io(ref err) => Some(err), - Error::System(ref err) => Some(&**err), + ValidationError::Internal(InternalError::Io(ref err)) => Some(err), + ValidationError::Internal(InternalError::System(ref err)) => Some(&**err), #[cfg(not(any(target_os = "android", target_os = "unknown")))] - Error::SharedMem(ref err) => Some(err), + ValidationError::Internal(InternalError::SharedMem(ref err)) => Some(err), + ValidationError::InvalidCandidate(InvalidCandidate::WasmExecutor(ref err)) => Some(err), _ => None, } } } -/// Externalities for parachain validation. -pub trait Externalities: Send { - /// Called when a message is to be posted to the parachain's relay chain. - fn post_upward_message(&mut self, message: UpwardMessage) -> Result<(), String>; -} - /// Validate a candidate under the given validation code. /// /// This will fail if the validation code is not a proper parachain validation module. -pub fn validate_candidate( +pub fn validate_candidate( validation_code: &[u8], params: ValidationParams, - ext: E, options: ExecutionMode<'_>, -) -> Result { + spawner: impl SpawnNamed + 'static, +) -> Result { match options { ExecutionMode::Local => { - validate_candidate_internal(validation_code, ¶ms.encode(), ext) + validate_candidate_internal(validation_code, ¶ms.encode(), spawner) }, #[cfg(not(any(target_os = "android", target_os = "unknown")))] ExecutionMode::Remote(pool) => { - pool.validate_candidate(validation_code, params, ext, false) + pool.validate_candidate(validation_code, params, false) }, #[cfg(not(any(target_os = "android", target_os = "unknown")))] ExecutionMode::RemoteTest(pool) => { - pool.validate_candidate(validation_code, params, ext, true) + pool.validate_candidate(validation_code, params, true) }, #[cfg(any(target_os = "android", target_os = "unknown"))] - ExecutionMode::Remote(pool) => - Err(Error::System(Box::::from( - "Remote validator not available".to_string() - ) as Box<_>)), + ExecutionMode::Remote(_pool) => + Err(ValidationError::Internal(InternalError::System( + Box::::from( + "Remote validator not available".to_string() + ) as Box<_> + ))), #[cfg(any(target_os = "android", target_os = "unknown"))] - ExecutionMode::RemoteTest(pool) => - Err(Error::System(Box::::from( - "Remote validator not available".to_string() - ) as Box<_>)), + ExecutionMode::RemoteTest(_pool) => + Err(ValidationError::Internal(InternalError::System( + Box::::from( + "Remote validator not available".to_string() + ) as Box<_> + ))), } } /// The host functions provided by the wasm executor to the parachain wasm blob. -type HostFunctions = ( - sp_io::SubstrateHostFunctions, - crate::wasm_api::parachain::HostFunctions, -); +type HostFunctions = sp_io::SubstrateHostFunctions; /// Validate a candidate under the given validation code. /// /// This will fail if the validation code is not a proper parachain validation module. -pub fn validate_candidate_internal( +pub fn validate_candidate_internal( validation_code: &[u8], encoded_call_data: &[u8], - externalities: E, -) -> Result { - let mut extensions = Extensions::new(); - extensions.register(ParachainExt::new(externalities)); - extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); - - let mut ext = ValidationExternalities(extensions); - + spawner: impl SpawnNamed + 'static, +) -> Result { let executor = sc_executor::WasmExecutor::new( sc_executor::WasmExecutionMethod::Interpreted, // TODO: Make sure we don't use more than 1GB: https://github.com/paritytech/polkadot/issues/699 @@ -191,6 +184,13 @@ pub fn validate_candidate_internal( HostFunctions::host_functions(), 8 ); + + let mut extensions = Extensions::new(); + extensions.register(sp_core::traits::TaskExecutorExt::new(spawner)); + extensions.register(sp_core::traits::CallInWasmExt::new(executor.clone())); + + let mut ext = ValidationExternalities(extensions); + let res = executor.call_in_wasm( validation_code, None, @@ -198,9 +198,10 @@ pub fn validate_candidate_internal( encoded_call_data, &mut ext, sp_core::traits::MissingHostFunctions::Allow, - )?; + ).map_err(|e| ValidationError::InvalidCandidate(e.into()))?; - ValidationResult::decode(&mut &res[..]).map_err(|_| Error::BadReturn.into()) + ValidationResult::decode(&mut &res[..]) + .map_err(|_| ValidationError::InvalidCandidate(InvalidCandidate::BadReturn).into()) } /// The validation externalities that will panic on any storage related access. They just provide @@ -304,7 +305,11 @@ impl sp_externalities::Externalities for ValidationExternalities { panic!("reset_read_write_count: unsupported feature for parachain validation") } - fn set_whitelist(&mut self, _: Vec>) { + fn get_whitelist(&self) -> Vec { + panic!("get_whitelist: unsupported feature for parachain validation") + } + + fn set_whitelist(&mut self, _: Vec) { panic!("set_whitelist: unsupported feature for parachain validation") } diff --git a/parachain/src/wasm_executor/validation_host.rs b/parachain/src/wasm_executor/validation_host.rs index 0f123349e3edefa86bde982f13b33945fb16fb66..238ed65c7a76ab50bc9bd655d260b6e35afd1f5b 100644 --- a/parachain/src/wasm_executor/validation_host.rs +++ b/parachain/src/wasm_executor/validation_host.rs @@ -16,17 +16,18 @@ #![cfg(not(any(target_os = "android", target_os = "unknown")))] -use std::{process, env, sync::Arc, sync::atomic, mem}; -use codec::{Decode, Encode, EncodeAppend}; -use crate::primitives::{ValidationParams, ValidationResult, UpwardMessage}; -use super::{validate_candidate_internal, Error, Externalities}; -use super::{MAX_CODE_MEM, MAX_RUNTIME_MEM}; +use std::{process, env, sync::Arc, sync::atomic}; +use codec::{Decode, Encode}; +use crate::primitives::{ValidationParams, ValidationResult}; +use super::{ + validate_candidate_internal, ValidationError, InvalidCandidate, InternalError, + MAX_CODE_MEM, MAX_RUNTIME_MEM, MAX_VALIDATION_RESULT_HEADER_MEM, +}; use shared_memory::{SharedMem, SharedMemConf, EventState, WriteLockable, EventWait, EventSet}; use parking_lot::Mutex; use log::{debug, trace}; - -// Message data limit -const MAX_MESSAGE_MEM: usize = 16 * 1024 * 1024; // 16 MiB +use futures::executor::ThreadPool; +use sp_core::traits::SpawnNamed; const WORKER_ARGS_TEST: &[&'static str] = &["--nocapture", "validation_worker"]; /// CLI Argument to start in validation worker mode. @@ -40,31 +41,29 @@ pub const EXECUTION_TIMEOUT_SEC: u64 = 30; #[cfg(not(debug_assertions))] pub const EXECUTION_TIMEOUT_SEC: u64 = 5; -#[derive(Default)] -struct WorkerExternalitiesInner { - up_data: Vec, +enum Event { + CandidateReady = 0, + ResultReady = 1, + WorkerReady = 2, } -#[derive(Default, Clone)] -struct WorkerExternalities { - inner: Arc>, -} +#[derive(Clone)] +struct TaskExecutor(ThreadPool); -impl Externalities for WorkerExternalities { - fn post_upward_message(&mut self, message: UpwardMessage) -> Result<(), String> { - let mut inner = self.inner.lock(); - inner.up_data = as EncodeAppend>::append_or_new( - mem::replace(&mut inner.up_data, Vec::new()), - std::iter::once(message), - ).map_err(|e| e.what())?; - Ok(()) +impl TaskExecutor { + fn new() -> Result { + ThreadPool::new().map_err(|e| e.to_string()).map(Self) } } -enum Event { - CandidateReady = 0, - ResultReady = 1, - WorkerReady = 2, +impl SpawnNamed for TaskExecutor { + fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.0.spawn_ok(future); + } + + fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.0.spawn_ok(future); + } } /// A pool of hosts. @@ -87,21 +86,20 @@ impl ValidationPool { /// free validation host. /// /// This will fail if the validation code is not a proper parachain validation module. - pub fn validate_candidate( + pub fn validate_candidate( &self, validation_code: &[u8], params: ValidationParams, - externalities: E, test_mode: bool, - ) -> Result { + ) -> Result { for host in self.hosts.iter() { if let Some(mut host) = host.try_lock() { - return host.validate_candidate(validation_code, params, externalities, test_mode); + return host.validate_candidate(validation_code, params, test_mode); } } // all workers are busy, just wait for the first one - self.hosts[0].lock().validate_candidate(validation_code, params, externalities, test_mode) + self.hosts[0].lock().validate_candidate(validation_code, params, test_mode) } } @@ -116,9 +114,8 @@ pub fn run_worker(mem_id: &str) -> Result<(), String> { } }; - let worker_ext = WorkerExternalities::default(); - let exit = Arc::new(atomic::AtomicBool::new(false)); + let task_executor = TaskExecutor::new()?; // spawn parent monitor thread let watch_exit = exit.clone(); std::thread::spawn(move || { @@ -166,22 +163,15 @@ pub fn run_worker(mem_id: &str) -> Result<(), String> { let (call_data, _) = rest.split_at_mut(MAX_RUNTIME_MEM); let (call_data, _) = call_data.split_at_mut(header.params_size as usize); - let result = validate_candidate_internal(code, call_data, worker_ext.clone()); + let result = validate_candidate_internal(code, call_data, task_executor.clone()); debug!("{} Candidate validated: {:?}", process::id(), result); match result { - Ok(r) => { - let inner = worker_ext.inner.lock(); - let up_data = &inner.up_data; - let up_len = up_data.len(); - - if up_len > MAX_MESSAGE_MEM { - ValidationResultHeader::Error("Message data is too large".into()) - } else { - ValidationResultHeader::Ok(r) - } - }, - Err(e) => ValidationResultHeader::Error(e.to_string()), + Ok(r) => ValidationResultHeader::Ok(r), + Err(ValidationError::Internal(e)) => + ValidationResultHeader::Error(WorkerValidationError::InternalError(e.to_string())), + Err(ValidationError::InvalidCandidate(e)) => + ValidationResultHeader::Error(WorkerValidationError::ValidationError(e.to_string())), } }; let mut data: &mut[u8] = &mut **slice; @@ -202,9 +192,15 @@ struct ValidationHeader { } #[derive(Encode, Decode, Debug)] -pub enum ValidationResultHeader { +enum WorkerValidationError { + InternalError(String), + ValidationError(String), +} + +#[derive(Encode, Decode, Debug)] +enum ValidationResultHeader { Ok(ValidationResult), - Error(String), + Error(WorkerValidationError), } unsafe impl Send for ValidationHost {} @@ -225,8 +221,8 @@ impl Drop for ValidationHost { } impl ValidationHost { - fn create_memory() -> Result { - let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_MESSAGE_MEM + 1024; + fn create_memory() -> Result { + let mem_size = MAX_RUNTIME_MEM + MAX_CODE_MEM + MAX_VALIDATION_RESULT_HEADER_MEM; let mem_config = SharedMemConf::default() .set_size(mem_size) .add_lock(shared_memory::LockType::Mutex, 0, mem_size)? @@ -237,7 +233,7 @@ impl ValidationHost { Ok(mem_config.create()?) } - fn start_worker(&mut self, test_mode: bool) -> Result<(), Error> { + fn start_worker(&mut self, test_mode: bool) -> Result<(), InternalError> { if let Some(ref mut worker) = self.worker { // Check if still alive if let Ok(None) = worker.try_wait() { @@ -268,15 +264,14 @@ impl ValidationHost { /// Validate a candidate under the given validation code. /// /// This will fail if the validation code is not a proper parachain validation module. - pub fn validate_candidate( + pub fn validate_candidate( &mut self, validation_code: &[u8], params: ValidationParams, - mut externalities: E, test_mode: bool, - ) -> Result { + ) -> Result { if validation_code.len() > MAX_CODE_MEM { - return Err(Error::CodeTooLarge(validation_code.len())); + return Err(ValidationError::InvalidCandidate(InvalidCandidate::CodeTooLarge(validation_code.len()))); } // First, check if need to spawn the child process self.start_worker(test_mode)?; @@ -284,7 +279,8 @@ impl ValidationHost { .expect("memory is always `Some` after `start_worker` completes successfully"); { // Put data in shared mem - let data: &mut[u8] = &mut **memory.wlock_as_slice(0)?; + let data: &mut[u8] = &mut **memory.wlock_as_slice(0) + .map_err(|e|ValidationError::Internal(e.into()))?; let (mut header_buf, rest) = data.split_at_mut(1024); let (code, rest) = rest.split_at_mut(MAX_CODE_MEM); let (code, _) = code.split_at_mut(validation_code.len()); @@ -292,7 +288,7 @@ impl ValidationHost { code[..validation_code.len()].copy_from_slice(validation_code); let encoded_params = params.encode(); if encoded_params.len() >= MAX_RUNTIME_MEM { - return Err(Error::ParamsTooLarge(MAX_RUNTIME_MEM)); + return Err(ValidationError::InvalidCandidate(InvalidCandidate::ParamsTooLarge(MAX_RUNTIME_MEM))); } call_data[..encoded_params.len()].copy_from_slice(&encoded_params); @@ -305,7 +301,8 @@ impl ValidationHost { } debug!("{} Signaling candidate", self.id); - memory.set(Event::CandidateReady as usize, EventState::Signaled)?; + memory.set(Event::CandidateReady as usize, EventState::Signaled) + .map_err(|e| ValidationError::Internal(e.into()))?; debug!("{} Waiting for results", self.id); match memory.wait(Event::ResultReady as usize, shared_memory::Timeout::Sec(EXECUTION_TIMEOUT_SEC as usize)) { @@ -314,35 +311,34 @@ impl ValidationHost { if let Some(mut worker) = self.worker.take() { worker.kill().ok(); } - return Err(Error::Timeout.into()); + return Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout)); } Ok(()) => {} } { debug!("{} Reading results", self.id); - let data: &[u8] = &**memory.wlock_as_slice(0)?; - let (header_buf, rest) = data.split_at(1024); - let (_, rest) = rest.split_at(MAX_CODE_MEM); - let (_, message_data) = rest.split_at(MAX_RUNTIME_MEM); + let data: &[u8] = &**memory.wlock_as_slice(0) + .map_err(|e| ValidationError::Internal(e.into()))?; + let (header_buf, _) = data.split_at(MAX_VALIDATION_RESULT_HEADER_MEM); let mut header_buf: &[u8] = header_buf; - let mut message_data: &[u8] = message_data; - let header = ValidationResultHeader::decode(&mut header_buf).unwrap(); + let header = ValidationResultHeader::decode(&mut header_buf) + .map_err(|e| + InternalError::System( + Box::::from( + format!("Failed to decode `ValidationResultHeader`: {:?}", e) + ) as Box<_> + ) + )?; match header { - ValidationResultHeader::Ok(result) => { - let upwards = Vec::::decode(&mut message_data) - .map_err(|e| - Error::External( - format!("Could not decode upward messages: {}", e.what()) - ) - )?; - upwards.into_iter().try_for_each(|msg| externalities.post_upward_message(msg))?; - - Ok(result) - } - ValidationResultHeader::Error(message) => { - debug!("{} Validation error: {}", self.id, message); - Err(Error::External(message).into()) + ValidationResultHeader::Ok(result) => Ok(result), + ValidationResultHeader::Error(WorkerValidationError::InternalError(e)) => { + debug!("{} Internal validation error: {}", self.id, e); + Err(ValidationError::Internal(InternalError::WasmWorker(e))) + }, + ValidationResultHeader::Error(WorkerValidationError::ValidationError(e)) => { + debug!("{} External validation error: {}", self.id, e); + Err(ValidationError::InvalidCandidate(InvalidCandidate::ExternalWasmExecutor(e))) } } } diff --git a/parachain/test-parachains/Cargo.toml b/parachain/test-parachains/Cargo.toml index 8b903b85734e0de13c45d75df81cbfcec571ccdb..d391c1b2314dc70e256b796ad127adb4235cf376 100644 --- a/parachain/test-parachains/Cargo.toml +++ b/parachain/test-parachains/Cargo.toml @@ -7,17 +7,18 @@ edition = "2018" [dependencies] tiny-keccak = "1.5.0" -codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } parachain = { package = "polkadot-parachain", path = ".." } adder = { package = "test-parachain-adder", path = "adder" } halt = { package = "test-parachain-halt", path = "halt" } -code-upgrader = { package = "test-parachain-code-upgrader", path = "code-upgrader" } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] default = [ "std" ] std = [ "adder/std", "halt/std", - "code-upgrader/std", ] diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index 3a636fca68a8affb94042e5f3294ddfd834acd56..5b2e79257f1d5afe1130c933f376567e0974127d 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] description = "Test parachain which adds to a number as its state transition" edition = "2018" @@ -8,7 +8,8 @@ build = "build.rs" [dependencies] parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } tiny-keccak = "1.5.0" dlmalloc = { version = "0.1.3", features = [ "global" ] } @@ -20,4 +21,7 @@ wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "1. [features] default = [ "std" ] -std = ["parachain/std"] +std = [ + "parachain/std", + "sp-std/std", +] diff --git a/parachain/test-parachains/adder/build.rs b/parachain/test-parachains/adder/build.rs index 9a2e2c8fddbe3fe521d58a223f621eaaee6ac93a..2e407bbef3876e3ff3cc1183533545fb74952acf 100644 --- a/parachain/test-parachains/adder/build.rs +++ b/parachain/test-parachains/adder/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .export_heap_base() .build() } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml deleted file mode 100644 index 3363a908a943cfe937e75c5519b41d0062688b2d..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "test-parachain-adder-collator" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" - -[dependencies] -adder = { package = "test-parachain-adder", path = ".." } -parachain = { package = "polkadot-parachain", path = "../../.." } -collator = { package = "polkadot-collator", path = "../../../../collator" } -primitives = { package = "polkadot-primitives", path = "../../../../primitives" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -client-api = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "master" } -parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.2.0" } -futures = "0.3.4" diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs deleted file mode 100644 index a32b6e4ca4e19c01e1b855e7716c20b4d33c5bc2..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Collator for polkadot - -use std::collections::HashMap; -use std::sync::Arc; - -use adder::{HeadData as AdderHead, BlockData as AdderBody}; -use sp_core::Pair; -use codec::{Encode, Decode}; -use primitives::{ - Hash, - parachain::{HeadData, BlockData, Id as ParaId, LocalValidationData, GlobalValidationSchedule}, -}; -use collator::{ParachainContext, Network, BuildParachainContext, Cli, SubstrateCli}; -use parking_lot::Mutex; -use futures::future::{Ready, ready, TryFutureExt}; - -const GENESIS: AdderHead = AdderHead { - number: 0, - parent_hash: [0; 32], - post_state: [ - 1, 27, 77, 3, 221, 140, 1, 241, 4, 145, 67, 207, 156, 76, 129, 126, 75, - 22, 127, 29, 27, 131, 229, 198, 240, 241, 13, 137, 186, 30, 123, 206 - ], -}; - -const GENESIS_BODY: AdderBody = AdderBody { - state: 0, - add: 0, -}; - -#[derive(Clone)] -struct AdderContext { - db: Arc>>, - /// We store it here to make sure that our interfaces require the correct bounds. - _network: Option>, -} - -/// The parachain context. -impl ParachainContext for AdderContext { - type ProduceCandidate = Ready>; - - fn produce_candidate( - &mut self, - _relay_parent: Hash, - _global_validation: GlobalValidationSchedule, - local_validation: LocalValidationData, - ) -> Self::ProduceCandidate - { - let adder_head = match AdderHead::decode(&mut &local_validation.parent_head.0[..]).ok() { - Some(res) => res, - None => return ready(None), - }; - - let mut db = self.db.lock(); - - let last_body = if adder_head == GENESIS { - GENESIS_BODY - } else { - db.get(&adder_head) - .expect("All past bodies stored since this is the only collator") - .clone() - }; - - let next_body = AdderBody { - state: last_body.state.overflowing_add(last_body.add).0, - add: adder_head.number % 100, - }; - - let next_head = adder::execute(adder_head.hash(), adder_head, &next_body) - .expect("good execution params; qed"); - - let encoded_head = HeadData(next_head.encode()); - let encoded_body = BlockData(next_body.encode()); - - println!("Created collation for #{}, post-state={}", - next_head.number, next_body.state.overflowing_add(next_body.add).0); - - db.insert(next_head.clone(), next_body); - ready(Some((encoded_body, encoded_head))) - } -} - -impl BuildParachainContext for AdderContext { - type ParachainContext = Self; - - fn build( - self, - _: Arc, - _: SP, - network: impl Network + Clone + 'static, - ) -> Result { - Ok(Self { _network: Some(Arc::new(network)), ..self }) - } -} - -fn main() -> Result<(), Box> { - let key = Arc::new(Pair::from_seed(&[1; 32])); - let id: ParaId = 100.into(); - - println!("Starting adder collator with genesis: "); - - { - let encoded = GENESIS.encode(); - println!("Dec: {:?}", encoded); - print!("Hex: 0x"); - for byte in encoded { - print!("{:02x}", byte); - } - - println!(); - } - - let context = AdderContext { - db: Arc::new(Mutex::new(HashMap::new())), - _network: None, - }; - - let cli = Cli::from_iter(&["-dev"]); - let runner = cli.create_runner(&cli.run.base)?; - runner.async_run(|config| { - collator::start_collator( - context, - id, - key, - config, - ).map_err(|e| e.into()) - })?; - - Ok(()) -} diff --git a/parachain/test-parachains/adder/src/lib.rs b/parachain/test-parachains/adder/src/lib.rs index d910eb0fc1afb0032bb1fa58f30b2f3ed20572d6..7ccba8400efbb07eaadddfcd284ab0b60e94858b 100644 --- a/parachain/test-parachains/adder/src/lib.rs +++ b/parachain/test-parachains/adder/src/lib.rs @@ -33,6 +33,13 @@ static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +#[cfg(feature = "std")] +/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. +pub fn wasm_binary_unwrap() -> &'static [u8] { + WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ + supported with the flag disabled.") +} + /// Head data for this parachain. #[derive(Default, Clone, Hash, Eq, PartialEq, Encode, Decode)] pub struct HeadData { diff --git a/parachain/test-parachains/adder/src/wasm_validation.rs b/parachain/test-parachains/adder/src/wasm_validation.rs index eaa5101ba927f9b5b0c075791c389a00dde94872..c0f3b56dc8e49bfa588a7f75099c952e62eff07b 100644 --- a/parachain/test-parachains/adder/src/wasm_validation.rs +++ b/parachain/test-parachains/adder/src/wasm_validation.rs @@ -21,22 +21,6 @@ use core::{intrinsics, panic}; use parachain::primitives::{ValidationResult, HeadData as GenericHeadData}; use codec::{Encode, Decode}; -#[panic_handler] -#[no_mangle] -pub fn panic(_info: &panic::PanicInfo) -> ! { - unsafe { - intrinsics::abort() - } -} - -#[alloc_error_handler] -#[no_mangle] -pub fn oom(_: core::alloc::Layout) -> ! { - unsafe { - intrinsics::abort(); - } -} - #[no_mangle] pub extern fn validate_block(params: *const u8, len: usize) -> u64 { let params = unsafe { parachain::load_params(params, len) }; @@ -53,6 +37,8 @@ pub extern fn validate_block(params: *const u8, len: usize) -> u64 { &ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, + upward_messages: sp_std::vec::Vec::new(), + processed_downward_messages: 0, } ), Err(_) => panic!("execution failure"), diff --git a/parachain/test-parachains/code-upgrader/Cargo.toml b/parachain/test-parachains/code-upgrader/Cargo.toml deleted file mode 100644 index 4dfcde274a73b298782f544d9be10a53132d04b4..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/code-upgrader/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "test-parachain-code-upgrader" -version = "0.7.22" -authors = ["Parity Technologies "] -description = "Test parachain which can upgrade code" -edition = "2018" -build = "build.rs" - -[dependencies] -parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } -codec = { package = "parity-scale-codec", version = "1.1.0", default-features = false, features = ["derive"] } -tiny-keccak = "1.5.0" -dlmalloc = { version = "0.1.3", features = [ "global" ] } - -# We need to make sure the global allocator is disabled until we have support of full substrate externalities -runtime-io = { package = "sp-io", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = [ "disable_allocator" ] } - -[build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "1.0.6" } - -[features] -default = [ "std" ] -std = ["parachain/std"] diff --git a/parachain/test-parachains/code-upgrader/src/lib.rs b/parachain/test-parachains/code-upgrader/src/lib.rs deleted file mode 100644 index 4a717af0084cdbc6ba21df1cf130e8561d06b304..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/code-upgrader/src/lib.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Test parachain WASM which implements code ugprades. - -#![no_std] - -#![cfg_attr(not(feature = "std"), feature(core_intrinsics, lang_items, core_panic_info, alloc_error_handler))] - -use codec::{Encode, Decode}; -use parachain::primitives::{RelayChainBlockNumber, ValidationCode}; - -#[cfg(not(feature = "std"))] -mod wasm_validation; - -#[cfg(not(feature = "std"))] -#[global_allocator] -static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -#[derive(Encode, Decode, Clone, Default)] -pub struct State { - /// The current code that is "active" in this chain. - pub code: ValidationCode, - /// Code upgrade that is pending. - pub pending_code: Option<(ValidationCode, RelayChainBlockNumber)>, -} - -/// Head data for this parachain. -#[derive(Default, Clone, Hash, Eq, PartialEq, Encode, Decode)] -pub struct HeadData { - /// Block number - pub number: u64, - /// parent block keccak256 - pub parent_hash: [u8; 32], - /// hash of post-execution state. - pub post_state: [u8; 32], -} - -impl HeadData { - pub fn hash(&self) -> [u8; 32] { - tiny_keccak::keccak256(&self.encode()) - } -} - -/// Block data for this parachain. -#[derive(Default, Clone, Encode, Decode)] -pub struct BlockData { - /// State to begin from. - pub state: State, - /// Code to upgrade to. - pub new_validation_code: Option, -} - -pub fn hash_state(state: &State) -> [u8; 32] { - tiny_keccak::keccak256(state.encode().as_slice()) -} - -#[derive(Debug)] -pub enum Error { - /// Start state mismatched with parent header's state hash. - StateMismatch, - /// New validation code too large. - NewCodeTooLarge, - /// Code upgrades not allowed at this time. - CodeUpgradeDisallowed, -} - -pub struct ValidationResult { - /// The new head data. - pub head_data: HeadData, - /// The new validation code. - pub new_validation_code: Option, -} - -pub struct RelayChainParams { - /// Whether a code upgrade is allowed and at what relay-chain block number - /// to process it after. - pub code_upgrade_allowed: Option, - /// The maximum code size allowed for an upgrade. - pub max_code_size: u32, - /// The relay-chain block number. - pub relay_chain_block_number: RelayChainBlockNumber, -} - -/// Execute a block body on top of given parent head, producing new parent head -/// if valid. -pub fn execute( - parent_hash: [u8; 32], - parent_head: HeadData, - block_data: BlockData, - relay_params: &RelayChainParams, -) -> Result { - debug_assert_eq!(parent_hash, parent_head.hash()); - - if hash_state(&block_data.state) != parent_head.post_state { - return Err(Error::StateMismatch); - } - - let mut new_state = block_data.state; - - if let Some((pending_code, after)) = new_state.pending_code.take() { - if after <= relay_params.relay_chain_block_number { - // code applied. - new_state.code = pending_code; - } else { - // reinstate. - new_state.pending_code = Some((pending_code, after)); - } - } - - let new_validation_code = if let Some(ref new_validation_code) = block_data.new_validation_code { - if new_validation_code.0.len() as u32 > relay_params.max_code_size { - return Err(Error::NewCodeTooLarge); - } - - // replace the code if allowed and we don't have an upgrade pending. - match (new_state.pending_code.is_some(), relay_params.code_upgrade_allowed) { - (_, None) => return Err(Error::CodeUpgradeDisallowed), - (false, Some(after)) => { - new_state.pending_code = Some((new_validation_code.clone(), after)); - Some(new_validation_code.clone()) - } - (true, Some(_)) => None, - } - } else { - None - }; - - let head_data = HeadData { - number: parent_head.number + 1, - parent_hash, - post_state: hash_state(&new_state), - }; - - Ok(ValidationResult { - head_data, - new_validation_code: new_validation_code, - }) -} diff --git a/parachain/test-parachains/code-upgrader/src/wasm_validation.rs b/parachain/test-parachains/code-upgrader/src/wasm_validation.rs deleted file mode 100644 index 8ebc3ae3c6f31052f471a8fdea136a916cc8f115..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/code-upgrader/src/wasm_validation.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! WASM validation for adder parachain. - -use crate::{HeadData, BlockData, RelayChainParams}; -use core::{intrinsics, panic}; -use parachain::primitives::{ValidationResult, HeadData as GenericHeadData}; -use codec::{Encode, Decode}; - -#[panic_handler] -#[no_mangle] -pub fn panic(_info: &panic::PanicInfo) -> ! { - unsafe { - intrinsics::abort() - } -} - -#[alloc_error_handler] -#[no_mangle] -pub fn oom(_: core::alloc::Layout) -> ! { - unsafe { - intrinsics::abort(); - } -} - -#[no_mangle] -pub extern fn validate_block(params: *const u8, len: usize) -> u64 { - let params = unsafe { parachain::load_params(params, len) }; - let parent_head = HeadData::decode(&mut ¶ms.parent_head.0[..]) - .expect("invalid parent head format."); - - let block_data = BlockData::decode(&mut ¶ms.block_data.0[..]) - .expect("invalid block data format."); - - let parent_hash = tiny_keccak::keccak256(¶ms.parent_head.0[..]); - - let res = crate::execute( - parent_hash, - parent_head, - block_data, - &RelayChainParams { - code_upgrade_allowed: params.code_upgrade_allowed, - max_code_size: params.max_code_size, - relay_chain_block_number: params.relay_chain_height, - }, - ); - - match res { - Ok(output) => parachain::write_result( - &ValidationResult { - head_data: GenericHeadData(output.head_data.encode()), - new_validation_code: output.new_validation_code, - } - ), - Err(_) => panic!("execution failure"), - } -} diff --git a/parachain/test-parachains/code-upgrader/wasm/Cargo.toml b/parachain/test-parachains/code-upgrader/wasm/Cargo.toml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index cf3fb265554402cd3924812119ed1fa411515952..9d1af0e64623d472e33c034847dc9a3dacadb2b2 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-halt" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] description = "Test parachain which executes forever" edition = "2018" diff --git a/parachain/test-parachains/halt/build.rs b/parachain/test-parachains/halt/build.rs index 9a2e2c8fddbe3fe521d58a223f621eaaee6ac93a..2e407bbef3876e3ff3cc1183533545fb74952acf 100644 --- a/parachain/test-parachains/halt/build.rs +++ b/parachain/test-parachains/halt/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .export_heap_base() .build() } diff --git a/parachain/test-parachains/halt/src/lib.rs b/parachain/test-parachains/halt/src/lib.rs index fe2778ae783ba5925c6cb3efbc85dfec3ae95d32..b82a649e3f1ea781565e5369367cd16eeedb916e 100644 --- a/parachain/test-parachains/halt/src/lib.rs +++ b/parachain/test-parachains/halt/src/lib.rs @@ -23,6 +23,13 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +#[cfg(feature = "std")] +/// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. +pub fn wasm_binary_unwrap() -> &'static [u8] { + WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ + supported with the flag disabled.") +} + #[cfg(not(feature = "std"))] #[panic_handler] #[no_mangle] @@ -46,4 +53,3 @@ pub fn oom(_: core::alloc::Layout) -> ! { pub extern fn validate_block(params: *const u8, len: usize) -> usize { loop {} } - diff --git a/parachain/test-parachains/tests/adder/mod.rs b/parachain/test-parachains/tests/adder/mod.rs index d97db998fd541c46cfa6f82de66f441ca87b0662..76924551ba9a33e35ad90516bd604bfee7c7b5d6 100644 --- a/parachain/test-parachains/tests/adder/mod.rs +++ b/parachain/test-parachains/tests/adder/mod.rs @@ -16,15 +16,11 @@ //! Basic parachain that adds a number as part of its state. -use crate::{ - DummyExt, - parachain, - parachain::primitives::{ - RelayChainBlockNumber, - BlockData as GenericBlockData, - HeadData as GenericHeadData, - ValidationParams, - }, +use parachain::primitives::{ + RelayChainBlockNumber, + BlockData as GenericBlockData, + HeadData as GenericHeadData, + ValidationParams, }; use codec::{Decode, Encode}; @@ -48,8 +44,6 @@ struct BlockData { add: u64, } -const TEST_CODE: &[u8] = adder::WASM_BINARY; - fn hash_state(state: u64) -> [u8; 32] { tiny_keccak::keccak256(state.encode().as_slice()) } @@ -74,17 +68,15 @@ pub fn execute_good_on_parent() { let pool = parachain::wasm_executor::ValidationPool::new(); let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, + adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), + sp_core::testing::TaskExecutor::new(), ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); @@ -114,17 +106,15 @@ fn execute_good_chain_on_parent() { }; let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, + adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: number as RelayChainBlockNumber + 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), + sp_core::testing::TaskExecutor::new(), ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); @@ -155,16 +145,14 @@ fn execute_bad_on_parent() { }; let _ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, + adder::wasm_binary_unwrap(), ValidationParams { parent_head: GenericHeadData(parent_head.encode()), block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), + sp_core::testing::TaskExecutor::new(), ).unwrap_err(); } diff --git a/parachain/test-parachains/tests/code_upgrader/mod.rs b/parachain/test-parachains/tests/code_upgrader/mod.rs deleted file mode 100644 index c59e44fc122f078abe36308f45ed3531c1bd9eef..0000000000000000000000000000000000000000 --- a/parachain/test-parachains/tests/code_upgrader/mod.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Basic parachain that adds a number as part of its state. - -use parachain; - -use crate::{ - DummyExt, - parachain::primitives::{ - BlockData as GenericBlockData, - HeadData as GenericHeadData, - ValidationParams, ValidationCode, - }, -}; -use codec::{Decode, Encode}; -use code_upgrader::{hash_state, HeadData, BlockData, State}; - -const TEST_CODE: &[u8] = code_upgrader::WASM_BINARY; - -#[test] -pub fn execute_good_no_upgrade() { - let pool = parachain::wasm_executor::ValidationPool::new(); - - let parent_head = HeadData { - number: 0, - parent_hash: [0; 32], - post_state: hash_state(&State::default()), - }; - - let block_data = BlockData { - state: State::default(), - new_validation_code: None, - }; - - let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, - relay_chain_height: 1, - code_upgrade_allowed: None, - }, - DummyExt, - parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), - ).unwrap(); - - let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); - - assert!(ret.new_validation_code.is_none()); - assert_eq!(new_head.number, 1); - assert_eq!(new_head.parent_hash, parent_head.hash()); - assert_eq!(new_head.post_state, hash_state(&State::default())); -} - -#[test] -pub fn execute_good_with_upgrade() { - let pool = parachain::wasm_executor::ValidationPool::new(); - - let parent_head = HeadData { - number: 0, - parent_hash: [0; 32], - post_state: hash_state(&State::default()), - }; - - let block_data = BlockData { - state: State::default(), - new_validation_code: Some(ValidationCode(vec![1, 2, 3])), - }; - - let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, - relay_chain_height: 1, - code_upgrade_allowed: Some(20), - }, - DummyExt, - parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), - ).unwrap(); - - let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); - - assert_eq!(ret.new_validation_code.unwrap(), ValidationCode(vec![1, 2, 3])); - assert_eq!(new_head.number, 1); - assert_eq!(new_head.parent_hash, parent_head.hash()); - assert_eq!( - new_head.post_state, - hash_state(&State { - code: ValidationCode::default(), - pending_code: Some((ValidationCode(vec![1, 2, 3]), 20)), - }), - ); -} - -#[test] -#[should_panic] -pub fn code_upgrade_not_allowed() { - let pool = parachain::wasm_executor::ValidationPool::new(); - - let parent_head = HeadData { - number: 0, - parent_hash: [0; 32], - post_state: hash_state(&State::default()), - }; - - let block_data = BlockData { - state: State::default(), - new_validation_code: Some(ValidationCode(vec![1, 2, 3])), - }; - - parachain::wasm_executor::validate_candidate( - TEST_CODE, - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, - relay_chain_height: 1, - code_upgrade_allowed: None, - }, - DummyExt, - parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), - ).unwrap(); -} - -#[test] -pub fn applies_code_upgrade_after_delay() { - let pool = parachain::wasm_executor::ValidationPool::new(); - - let (new_head, state) = { - let parent_head = HeadData { - number: 0, - parent_hash: [0; 32], - post_state: hash_state(&State::default()), - }; - - let block_data = BlockData { - state: State::default(), - new_validation_code: Some(ValidationCode(vec![1, 2, 3])), - }; - - let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, - relay_chain_height: 1, - code_upgrade_allowed: Some(2), - }, - DummyExt, - parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), - ).unwrap(); - - let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); - - let parent_hash = parent_head.hash(); - let state = State { - code: ValidationCode::default(), - pending_code: Some((ValidationCode(vec![1, 2, 3]), 2)), - }; - assert_eq!(ret.new_validation_code.unwrap(), ValidationCode(vec![1, 2, 3])); - assert_eq!(new_head.number, 1); - assert_eq!(new_head.parent_hash, parent_hash); - assert_eq!(new_head.post_state, hash_state(&state)); - - (new_head, state) - }; - - { - let parent_head = new_head; - let block_data = BlockData { - state, - new_validation_code: None, - }; - - let ret = parachain::wasm_executor::validate_candidate( - TEST_CODE, - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - max_code_size: 1024, - max_head_data_size: 1024, - relay_chain_height: 2, - code_upgrade_allowed: None, - }, - DummyExt, - parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), - ).unwrap(); - - let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); - - assert!(ret.new_validation_code.is_none()); - assert_eq!(new_head.number, 2); - assert_eq!(new_head.parent_hash, parent_head.hash()); - assert_eq!( - new_head.post_state, - hash_state(&State { - code: ValidationCode(vec![1, 2, 3]), - pending_code: None, - }), - ); - } -} diff --git a/parachain/test-parachains/tests/lib.rs b/parachain/test-parachains/tests/lib.rs index cf6d63fd2acfb3eddf71411715302a20ec3f7707..3ad021a16e7035b16702af59cfbd40ed44329be7 100644 --- a/parachain/test-parachains/tests/lib.rs +++ b/parachain/test-parachains/tests/lib.rs @@ -15,19 +15,9 @@ // along with Polkadot. If not, see . mod adder; -mod code_upgrader; mod wasm_executor; -use parachain::{ - self, primitives::UpwardMessage, wasm_executor::{Externalities, run_worker}, -}; - -struct DummyExt; -impl Externalities for DummyExt { - fn post_upward_message(&mut self, _: UpwardMessage) -> Result<(), String> { - Ok(()) - } -} +use parachain::wasm_executor::run_worker; // This is not an actual test, but rather an entry point for out-of process WASM executor. // When executing tests the executor spawns currently executing binary, which happens to be test binary. diff --git a/parachain/test-parachains/tests/wasm_executor/mod.rs b/parachain/test-parachains/tests/wasm_executor/mod.rs index c6cf2407b3af92bfb0bfdcc3e8aaa29ee9c28f7c..b4f2211baa56d8175b4840beb63b4d8dd4b56c78 100644 --- a/parachain/test-parachains/tests/wasm_executor/mod.rs +++ b/parachain/test-parachains/tests/wasm_executor/mod.rs @@ -16,35 +16,29 @@ //! Basic parachain that adds a number as part of its state. -use parachain; -use crate::{adder, DummyExt}; -use crate::parachain::{ +use crate::adder; +use parachain::{ primitives::{BlockData, ValidationParams}, - wasm_executor::EXECUTION_TIMEOUT_SEC, + wasm_executor::{ValidationError, InvalidCandidate, EXECUTION_TIMEOUT_SEC}, }; -// Code that exposes `validate_block` and loops infinitely -const INFINITE_LOOP_CODE: &[u8] = halt::WASM_BINARY; - #[test] fn terminates_on_timeout() { let pool = parachain::wasm_executor::ValidationPool::new(); let result = parachain::wasm_executor::validate_candidate( - INFINITE_LOOP_CODE, + halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), + sp_core::testing::TaskExecutor::new(), ); match result { - Err(parachain::wasm_executor::Error::Timeout) => {}, + Err(ValidationError::InvalidCandidate(InvalidCandidate::Timeout)) => {}, r => panic!("{:?}", r), } @@ -61,30 +55,26 @@ fn parallel_execution() { let pool2 = pool.clone(); let thread = std::thread::spawn(move || parachain::wasm_executor::validate_candidate( - INFINITE_LOOP_CODE, + halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool2), + sp_core::testing::TaskExecutor::new(), ).ok()); let _ = parachain::wasm_executor::validate_candidate( - INFINITE_LOOP_CODE, + halt::wasm_binary_unwrap(), ValidationParams { block_data: BlockData(Vec::new()), parent_head: Default::default(), - max_code_size: 1024, - max_head_data_size: 1024, relay_chain_height: 1, - code_upgrade_allowed: None, + hrmp_mqc_heads: Vec::new(), }, - DummyExt, parachain::wasm_executor::ExecutionMode::RemoteTest(&pool), + sp_core::testing::TaskExecutor::new(), ); thread.join().unwrap(); // total time should be < 2 x EXECUTION_TIMEOUT_SEC diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index b38d5cc97d91967a2082708d151db49777a3537e..797b21df1209c8bc9ba4e9eb94341b4e00e4e6a9 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,24 +1,26 @@ [package] name = "polkadot-primitives" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] serde = { version = "1.0.102", optional = true, features = ["derive"] } -parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "1.3.4", default-features = false, features = ["bit-vec", "derive"] } primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } application-crypto = { package = "sp-application-crypto", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } polkadot-parachain = { path = "../parachain", default-features = false } +polkadot-core-primitives = { path = "../core-primitives", default-features = false } trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] sp-serializer = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -27,17 +29,20 @@ pretty_assertions = "0.5.1" [features] default = ["std"] std = [ + "application-crypto/std", "parity-scale-codec/std", "primitives/std", - "system/std", "inherents/std", "trie/std", "sp-api/std", "sp-std/std", "sp-version/std", "sp-staking/std", + "sp-arithmetic/std", "runtime_primitives/std", "serde", "polkadot-parachain/std", + "polkadot-core-primitives/std", "bitvec/std", + "frame-system/std", ] diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index 9167d9910e37ca7b72af1fa58f3e3314523ef298..82a5e7ca2e03988dd94d5d50616e7bfd31e11739 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -20,106 +20,5 @@ #![cfg_attr(not(feature = "std"), no_std)] -use runtime_primitives::{generic, MultiSignature}; -pub use runtime_primitives::traits::{BlakeTwo256, Hash as HashT, Verify, IdentifyAccount}; - -pub mod parachain; - -pub use parity_scale_codec::Compact; - -/// An index to a block. -pub type BlockNumber = polkadot_parachain::primitives::RelayChainBlockNumber; - -/// An instant or duration in time. -pub type Moment = u64; - -/// Alias to type for a signature for a transaction on the relay chain. This allows one of several -/// kinds of underlying crypto to be used, so isn't a fixed size when encoded. -pub type Signature = MultiSignature; - -/// Alias to the public key used for this chain, actually a `MultiSigner`. Like the signature, this -/// also isn't a fixed size when encoded, as different cryptos have different size public keys. -pub type AccountPublic = ::Signer; - -/// Alias to the opaque account ID type for this chain, actually a `AccountId32`. This is always -/// 32 bytes. -pub type AccountId = ::AccountId; - -/// The type for looking up accounts. We don't expect more than 4 billion of them. -pub type AccountIndex = u32; - -/// Identifier for a chain. 32-bit should be plenty. -pub type ChainId = u32; - -/// A hash of some data used by the relay chain. -pub type Hash = primitives::H256; - -/// Index of a transaction in the relay chain. 32-bit should be plenty. -pub type Nonce = u32; - -/// The balance of an account. -/// 128-bits (or 38 significant decimal figures) will allow for 10m currency (10^7) at a resolution -/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (10^11 unit -/// denomination), or 10^18 total atomic units, to grow at 50%/year for 51 years (10^9 multiplier) -/// for an eventual total of 10^27 units (27 significant decimal figures). -/// We round denomination to 10^12 (12 sdf), and leave the other redundancy at the upper end so -/// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. -pub type Balance = u128; - -/// Header type. -pub type Header = generic::Header; -/// Block type. -pub type Block = generic::Block; -/// Block ID. -pub type BlockId = generic::BlockId; - -/// Opaque, encoded, unchecked extrinsic. -pub use runtime_primitives::OpaqueExtrinsic as UncheckedExtrinsic; - -/// Custom validity errors used in Polkadot while validating transactions. -#[repr(u8)] -pub enum ValidityError { - /// The Ethereum signature is invalid. - InvalidEthereumSignature = 0, - /// The signer has no claim. - SignerHasNoClaim = 1, - /// No permission to execute the call. - NoPermission = 2, - /// An invalid statement was made for a claim. - InvalidStatement = 3, -} - -impl From for u8 { - fn from(err: ValidityError) -> Self { - err as u8 - } -} - -/// App-specific crypto used for reporting equivocation/misbehavior in BABE, -/// GRANDPA and Parachains, described in the white paper as the fisherman role. -/// Any rewards for misbehavior reporting will be paid out to this account. -pub mod fisherman { - use super::{Signature, Verify}; - use primitives::crypto::KeyTypeId; - - /// Key type for the reporting module. Used for reporting BABE, GRANDPA - /// and Parachain equivocations. - pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"fish"); - - mod app { - use application_crypto::{app_crypto, sr25519}; - app_crypto!(sr25519, super::KEY_TYPE); - } - - /// Identity of the equivocation/misbehavior reporter. - pub type FishermanId = app::Public; - - /// An `AppCrypto` type to allow submitting signed transactions using the fisherman - /// application key as signer. - pub struct FishermanAppCrypto; - impl system::offchain::AppCrypto<::Signer, Signature> for FishermanAppCrypto { - type RuntimeAppPublic = FishermanId; - type GenericSignature = primitives::sr25519::Signature; - type GenericPublic = primitives::sr25519::Public; - } -} +pub mod v0; +pub mod v1; diff --git a/primitives/src/parachain.rs b/primitives/src/v0.rs similarity index 86% rename from primitives/src/parachain.rs rename to primitives/src/v0.rs index a24cb5b612de83bc8dffa688639bd1bf2bed2667..9f014c80c2650356ea7910557c6cd7c005d2fcbd 100644 --- a/primitives/src/parachain.rs +++ b/primitives/src/v0.rs @@ -21,18 +21,21 @@ use sp_std::prelude::*; use sp_std::cmp::Ordering; use parity_scale_codec::{Encode, Decode}; use bitvec::vec::BitVec; -use super::{Hash, Balance, BlockNumber}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] -use primitives::{bytes, crypto::Pair}; +use primitives::crypto::Pair; use primitives::RuntimeDebug; use runtime_primitives::traits::{AppVerify, Block as BlockT}; use inherents::InherentIdentifier; use application_crypto::KeyTypeId; +pub use runtime_primitives::traits::{BlakeTwo256, Hash as HashT, Verify, IdentifyAccount}; +pub use polkadot_core_primitives::*; +pub use parity_scale_codec::Compact; + pub use polkadot_parachain::primitives::{ Id, ParachainDispatchOrigin, LOWEST_USER_ID, UpwardMessage, HeadData, BlockData, ValidationCode, @@ -176,7 +179,7 @@ pub struct DutyRoster { /// These are global parameters that apply to all parachain candidates in a block. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Default))] -pub struct GlobalValidationSchedule { +pub struct GlobalValidationData { /// The maximum code size permitted, in bytes. pub max_code_size: u32, /// The maximum head-data size permitted, in bytes. @@ -220,6 +223,10 @@ pub struct CandidateCommitments { pub erasure_root: H, /// New validation code. pub new_validation_code: Option, + /// Number of `DownwardMessage`'s that were processed by the Parachain. + /// + /// It is expected that the Parachain processes them from first to last. + pub processed_downward_messages: u32, } /// Get a collator signature payload on a relay-parent, block-data combo. @@ -271,7 +278,7 @@ pub struct CandidateReceipt { /// The hash of the PoV-block. pub pov_block_hash: H, /// The global validation schedule. - pub global_validation: GlobalValidationSchedule, + pub global_validation: GlobalValidationData, /// The local validation data. pub local_validation: LocalValidationData, /// Commitments made as a result of validation. @@ -345,7 +352,7 @@ impl Ord for CandidateReceipt { #[cfg_attr(feature = "std", derive(Debug, Default))] pub struct OmittedValidationData { /// The global validation schedule. - pub global_validation: GlobalValidationSchedule, + pub global_validation: GlobalValidationData, /// The local validation data. pub local_validation: LocalValidationData, } @@ -377,6 +384,15 @@ pub struct AbridgedCandidateReceipt { pub commitments: CandidateCommitments, } +/// A candidate-receipt with commitments directly included. +pub struct CommitedCandidateReceipt { + /// The descriptor of the candidae. + pub descriptor: CandidateDescriptor, + + /// The commitments of the candidate receipt. + pub commitments: CandidateCommitments +} + impl + Encode> AbridgedCandidateReceipt { /// Check integrity vs. provided block data. pub fn check_signature(&self) -> Result<(), ()> { @@ -397,7 +413,6 @@ impl + Encode> AbridgedCandidateReceipt { /// the relay-chain block in which context it should be executed, which implies /// any blockchain state that must be referenced. pub fn hash(&self) -> Hash { - use runtime_primitives::traits::{BlakeTwo256, Hash}; BlakeTwo256::hash_of(self) } } @@ -455,8 +470,18 @@ impl AbridgedCandidateReceipt { pov_block_hash: *pov_block_hash, } } -} + /// Clone the relevant portions of the `AbridgedCandidateReceipt` to form a `CandidateDescriptor`. + pub fn to_descriptor(&self) -> CandidateDescriptor { + CandidateDescriptor { + para_id: self.parachain_index, + relay_parent: self.relay_parent, + collator: self.collator.clone(), + signature: self.signature.clone(), + pov_hash: self.pov_block_hash.clone(), + } + } +} impl PartialOrd for AbridgedCandidateReceipt { fn partial_cmp(&self, other: &Self) -> Option { @@ -473,6 +498,26 @@ impl Ord for AbridgedCandidateReceipt { } } +/// A unique descriptor of the candidate receipt, in a lightweight format. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct CandidateDescriptor { + /// The ID of the para this is a candidate for. + pub para_id: Id, + /// The hash of the relay-chain block this should be executed in + /// the context of. + // NOTE: the fact that the hash includes this value means that code depends + // on this for deduplication. Removing this field is likely to break things. + pub relay_parent: H, + /// The collator's relay-chain account ID + pub collator: CollatorId, + /// Signature on blake2-256 of components of this receipt: + /// The para ID, the relay parent, and the pov_hash. + pub signature: CollatorSignature, + /// The hash of the pov-block. + pub pov_hash: H, +} + /// A collation sent by a collator. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Default))] @@ -549,7 +594,6 @@ impl PoVBlock { /// Compute hash of block data. #[cfg(feature = "std")] pub fn hash(&self) -> Hash { - use runtime_primitives::traits::{BlakeTwo256, Hash}; BlakeTwo256::hash_of(&self) } } @@ -568,7 +612,7 @@ pub struct AvailableData { /// A chunk of erasure-encoded block data. #[derive(PartialEq, Eq, Clone, Encode, Decode, Default)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash))] pub struct ErasureChunk { /// The erasure-encoded chunk of data belonging to the candidate block. pub chunk: Vec, @@ -578,20 +622,10 @@ pub struct ErasureChunk { pub proof: Vec>, } -/// Parachain header raw bytes wrapper type. -#[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Header(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - -/// Activity bit field. -#[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] -pub struct Activity(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - /// Statements that can be made about parachain candidates. These are the /// actual values that are signed. #[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug, Hash))] pub enum CompactStatement { /// Proposal of a parachain candidate. #[codec(index = "1")] @@ -604,6 +638,18 @@ pub enum CompactStatement { Invalid(Hash), } +impl CompactStatement { + /// Get the underlying candidate hash this references. + pub fn candidate_hash(&self) -> &Hash { + match *self { + CompactStatement::Candidate(ref h) + | CompactStatement::Valid(ref h) + | CompactStatement::Invalid(ref h) + => h + } + } +} + /// A signed compact statement, suitable to be sent to the chain. pub type SignedStatement = Signed; @@ -704,88 +750,6 @@ impl FeeSchedule { } } -/// A bitfield concerning availability of backed candidates. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug))] -pub struct AvailabilityBitfield(pub BitVec); - -impl From> for AvailabilityBitfield { - fn from(inner: BitVec) -> Self { - AvailabilityBitfield(inner) - } -} - -/// A bitfield signed by a particular validator about the availability of pending candidates. -pub type SignedAvailabilityBitfield = Signed; - -/// A set of signed availability bitfields. Should be sorted by validator index, ascending. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct SignedAvailabilityBitfields(pub Vec); - -/// A backed (or backable, depending on context) candidate. -// TODO: yes, this is roughly the same as AttestedCandidate. -// After https://github.com/paritytech/polkadot/issues/1250 -// they should be unified to this type. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct BackedCandidate { - /// The candidate referred to. - pub candidate: AbridgedCandidateReceipt, - /// The validity votes themselves, expressed as signatures. - pub validity_votes: Vec, - /// The indices of the validators within the group, expressed as a bitfield. - pub validator_indices: BitVec, -} - -/// Verify the backing of the given candidate. -/// -/// Provide a lookup from the index of a validator within the group assigned to this para, -/// as opposed to the index of the validator within the overall validator set, as well as -/// the number of validators in the group. -/// -/// Also provide the signing context. -/// -/// Returns either an error, indicating that one of the signatures was invalid or that the index -/// was out-of-bounds, or the number of signatures checked. -pub fn check_candidate_backing + Encode>( - backed: &BackedCandidate, - signing_context: &SigningContext, - group_len: usize, - validator_lookup: impl Fn(usize) -> Option, -) -> Result { - if backed.validator_indices.len() != group_len { - return Err(()) - } - - if backed.validity_votes.len() > group_len { - return Err(()) - } - - // this is known, even in runtime, to be blake2-256. - let hash: Hash = backed.candidate.hash(); - - let mut signed = 0; - for ((val_in_group_idx, _), attestation) in backed.validator_indices.iter().enumerate() - .filter(|(_, signed)| **signed) - .zip(backed.validity_votes.iter()) - { - let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash.clone(), signing_context); - let sig = attestation.signature(); - - if sig.verify(&payload[..], &validator_id) { - signed += 1; - } else { - return Err(()) - } - } - - if signed != backed.validity_votes.len() { - return Err(()) - } - - Ok(signed) -} - sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. #[api_version(3)] @@ -798,7 +762,7 @@ sp_api::decl_runtime_apis! { fn active_parachains() -> Vec<(Id, Option<(CollatorId, Retriable)>)>; /// Get the global validation schedule that all parachains should /// be validated under. - fn global_validation_schedule() -> GlobalValidationSchedule; + fn global_validation_data() -> GlobalValidationData; /// Get the local validation data for a particular parachain. fn local_validation_data(id: Id) -> Option; /// Get the given parachain's head code blob. @@ -808,6 +772,8 @@ sp_api::decl_runtime_apis! { -> Option>; /// Get a `SigningContext` with current `SessionIndex` and parent hash. fn signing_context() -> SigningContext; + /// Get the `DownwardMessage`'s for the given parachain. + fn downward_messages(id: Id) -> Vec; } } @@ -849,7 +815,7 @@ impl EncodeAs for T { /// /// Note that the internal fields are not public; they are all accessable by immutable getters. /// This reduces the chance that they are accidentally mutated, invalidating the signature. -#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct Signed { /// The payload is part of the signed data. The rest is the signing context, /// which is known both at signing and at validation. @@ -874,6 +840,27 @@ impl, RealPayload: Encode> Signed( + payload: Payload, + validator_index: ValidatorIndex, + signature: ValidatorSignature, + context: &SigningContext, + key: &ValidatorId, + ) -> Option { + let s = Self { + payload, + validator_index, + signature, + real_payload: std::marker::PhantomData, + }; + + s.check_signature(context, key).ok()?; + + Some(s) + } + /// Sign this payload with the given context and key, storing the validator index. #[cfg(feature = "std")] pub fn sign( @@ -926,6 +913,55 @@ impl, RealPayload: Encode> Signed for u8 { + fn from(err: ValidityError) -> Self { + err as u8 + } +} + +/// App-specific crypto used for reporting equivocation/misbehavior in BABE, +/// GRANDPA and Parachains, described in the white paper as the fisherman role. +/// Any rewards for misbehavior reporting will be paid out to this account. +pub mod fisherman { + use super::{Signature, Verify}; + use primitives::crypto::KeyTypeId; + + /// Key type for the reporting module. Used for reporting BABE, GRANDPA + /// and Parachain equivocations. + pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"fish"); + + mod app { + use application_crypto::{app_crypto, sr25519}; + app_crypto!(sr25519, super::KEY_TYPE); + } + + /// Identity of the equivocation/misbehavior reporter. + pub type FishermanId = app::Public; + + /// An `AppCrypto` type to allow submitting signed transactions using the fisherman + /// application key as signer. + pub struct FishermanAppCrypto; + impl frame_system::offchain::AppCrypto<::Signer, Signature> for FishermanAppCrypto { + type RuntimeAppPublic = FishermanId; + type GenericSignature = primitives::sr25519::Signature; + type GenericPublic = primitives::sr25519::Public; + } +} + + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/src/v1.rs b/primitives/src/v1.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2a8ec43d5a7131950248e377cb26efc882c0d31 --- /dev/null +++ b/primitives/src/v1.rs @@ -0,0 +1,749 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! V1 Primitives. + +use sp_std::prelude::*; +use parity_scale_codec::{Encode, Decode}; +use bitvec::vec::BitVec; + +use primitives::RuntimeDebug; +use runtime_primitives::traits::AppVerify; +use inherents::InherentIdentifier; +use sp_arithmetic::traits::{BaseArithmetic, Saturating, Zero}; + +pub use runtime_primitives::traits::{BlakeTwo256, Hash as HashT}; + +// Export some core primitives. +pub use polkadot_core_primitives::v1::{ + BlockNumber, Moment, Signature, AccountPublic, AccountId, AccountIndex, + ChainId, Hash, Nonce, Balance, Header, Block, BlockId, UncheckedExtrinsic, + Remark, DownwardMessage, +}; + +// Export some polkadot-parachain primitives +pub use polkadot_parachain::primitives::{ + Id, ParachainDispatchOrigin, LOWEST_USER_ID, UpwardMessage, HeadData, BlockData, + ValidationCode, +}; + +// Export some basic parachain primitives from v0. +pub use crate::v0::{ + CollatorId, CollatorSignature, PARACHAIN_KEY_TYPE_ID, ValidatorId, ValidatorIndex, + ValidatorSignature, SigningContext, Signed, ValidityAttestation, + CompactStatement, SignedStatement, ErasureChunk, EncodeAs, +}; + +// More exports from v0 for std. +#[cfg(feature = "std")] +pub use crate::v0::{ValidatorPair, CollatorPair}; + +pub use sp_staking::SessionIndex; + +/// Unique identifier for the Inclusion Inherent +pub const INCLUSION_INHERENT_IDENTIFIER: InherentIdentifier = *b"inclusn0"; + +/// Get a collator signature payload on a relay-parent, block-data combo. +pub fn collator_signature_payload>( + relay_parent: &H, + para_id: &Id, + persisted_validation_data_hash: &Hash, + pov_hash: &Hash, +) -> [u8; 100] { + // 32-byte hash length is protected in a test below. + let mut payload = [0u8; 100]; + + payload[0..32].copy_from_slice(relay_parent.as_ref()); + u32::from(*para_id).using_encoded(|s| payload[32..32 + s.len()].copy_from_slice(s)); + payload[36..68].copy_from_slice(persisted_validation_data_hash.as_ref()); + payload[68..100].copy_from_slice(pov_hash.as_ref()); + + payload +} + +fn check_collator_signature>( + relay_parent: &H, + para_id: &Id, + persisted_validation_data_hash: &Hash, + pov_hash: &Hash, + collator: &CollatorId, + signature: &CollatorSignature, +) -> Result<(),()> { + let payload = collator_signature_payload( + relay_parent, + para_id, + persisted_validation_data_hash, + pov_hash, + ); + + if signature.verify(&payload[..], collator) { + Ok(()) + } else { + Err(()) + } +} + +/// A unique descriptor of the candidate receipt. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default, Hash))] +pub struct CandidateDescriptor { + /// The ID of the para this is a candidate for. + pub para_id: Id, + /// The hash of the relay-chain block this is executed in the context of. + pub relay_parent: H, + /// The collator's sr25519 public key. + pub collator: CollatorId, + /// The blake2-256 hash of the persisted validation data. This is extra data derived from + /// relay-chain state which may vary based on bitfields included before the candidate. + /// Thus it cannot be derived entirely from the relay-parent. + pub persisted_validation_data_hash: Hash, + /// The blake2-256 hash of the pov. + pub pov_hash: Hash, + /// Signature on blake2-256 of components of this receipt: + /// The parachain index, the relay parent, the validation data hash, and the pov_hash. + pub signature: CollatorSignature, +} + +impl> CandidateDescriptor { + /// Check the signature of the collator within this descriptor. + pub fn check_collator_signature(&self) -> Result<(), ()> { + check_collator_signature( + &self.relay_parent, + &self.para_id, + &self.persisted_validation_data_hash, + &self.pov_hash, + &self.collator, + &self.signature, + ) + } +} + +/// A candidate-receipt. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct CandidateReceipt { + /// The descriptor of the candidate. + pub descriptor: CandidateDescriptor, + /// The hash of the encoded commitments made as a result of candidate execution. + pub commitments_hash: Hash, +} + +impl CandidateReceipt { + /// Get a reference to the candidate descriptor. + pub fn descriptor(&self) -> &CandidateDescriptor { + &self.descriptor + } + + /// Computes the blake2-256 hash of the receipt. + pub fn hash(&self) -> Hash where H: Encode { + BlakeTwo256::hash_of(self) + } +} + +/// All data pertaining to the execution of a para candidate. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct FullCandidateReceipt { + /// The inner candidate receipt. + pub inner: CandidateReceipt, + /// The validation data derived from the relay-chain state at that + /// point. The hash of the persisted validation data should + /// match the `persisted_validation_data_hash` in the descriptor + /// of the receipt. + pub validation_data: ValidationData, +} + +/// A candidate-receipt with commitments directly included. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default, Hash))] +pub struct CommittedCandidateReceipt { + /// The descriptor of the candidate. + pub descriptor: CandidateDescriptor, + /// The commitments of the candidate receipt. + pub commitments: CandidateCommitments, +} + +impl CommittedCandidateReceipt { + /// Get a reference to the candidate descriptor. + pub fn descriptor(&self) -> &CandidateDescriptor { + &self.descriptor + } +} + +impl CommittedCandidateReceipt { + /// Transforms this into a plain CandidateReceipt. + pub fn to_plain(&self) -> CandidateReceipt { + CandidateReceipt { + descriptor: self.descriptor.clone(), + commitments_hash: self.commitments.hash(), + } + } + + /// Computes the hash of the committed candidate receipt. + /// + /// This computes the canonical hash, not the hash of the directly encoded data. + /// Thus this is a shortcut for `candidate.to_plain().hash()`. + pub fn hash(&self) -> Hash where H: Encode { + self.to_plain().hash() + } +} + +impl PartialOrd for CommittedCandidateReceipt { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CommittedCandidateReceipt { + fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { + // TODO: compare signatures or something more sane + // https://github.com/paritytech/polkadot/issues/222 + self.descriptor().para_id.cmp(&other.descriptor().para_id) + .then_with(|| self.commitments.head_data.cmp(&other.commitments.head_data)) + } +} + +/// The validation data provide information about how to validate both the inputs and +/// outputs of a candidate. +/// +/// There are two types of validation data: persisted and transient. +/// Their respective sections of the guide elaborate on their functionality in more detail. +/// +/// This information is derived from the chain state and will vary from para to para, +/// although some of the fields may be the same for every para. +/// +/// Persisted validation data are generally derived from some relay-chain state to form inputs +/// to the validation function, and as such need to be persisted by the availability system to +/// avoid dependence on availability of the relay-chain state. The backing phase of the +/// inclusion pipeline ensures that everything that is included in a valid fork of the +/// relay-chain already adheres to the transient constraints. +/// +/// The validation data also serve the purpose of giving collators a means of ensuring that +/// their produced candidate and the commitments submitted to the relay-chain alongside it +/// will pass the checks done by the relay-chain when backing, and give validators +/// the same understanding when determining whether to second or attest to a candidate. +/// +/// Since the commitments of the validation function are checked by the +/// relay-chain, secondary checkers can rely on the invariant that the relay-chain +/// only includes para-blocks for which these checks have already been done. As such, +/// there is no need for the validation data used to inform validators and collators about +/// the checks the relay-chain will perform to be persisted by the availability system. +/// Nevertheless, we expose it so the backing validators can validate the outputs of a +/// candidate before voting to submit it to the relay-chain and so collators can +/// collate candidates that satisfy the criteria implied these transient validation data. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct ValidationData { + /// The persisted validation data. + pub persisted: PersistedValidationData, + /// The transient validation data. + pub transient: TransientValidationData, +} + +/// Validation data that needs to be persisted for secondary checkers. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct PersistedValidationData { + /// The parent head-data. + pub parent_head: HeadData, + /// The relay-chain block number this is in the context of. + pub block_number: N, + /// The list of MQC heads for the inbound channels paired with the sender para ids. This + /// vector is sorted ascending by the para id and doesn't contain multiple entries with the same + /// sender. + pub hrmp_mqc_heads: Vec<(Id, Hash)>, +} + +impl PersistedValidationData { + /// Compute the blake2-256 hash of the persisted validation data. + pub fn hash(&self) -> Hash { + BlakeTwo256::hash_of(self) + } +} + +/// Validation data for checking outputs of the validation-function. +/// As such, they also inform the collator about how to construct the candidate. +/// +/// These are transient because they are not necessary beyond the point where the +/// candidate is backed. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +pub struct TransientValidationData { + /// The maximum code size permitted, in bytes. + pub max_code_size: u32, + /// The maximum head-data size permitted, in bytes. + pub max_head_data_size: u32, + /// The balance of the parachain at the moment of validation. + pub balance: Balance, + /// Whether the parachain is allowed to upgrade its validation code. + /// + /// This is `Some` if so, and contains the number of the minimum relay-chain + /// height at which the upgrade will be applied, if an upgrade is signaled + /// now. + /// + /// A parachain should enact its side of the upgrade at the end of the first + /// parablock executing in the context of a relay-chain block with at least this + /// height. This may be equal to the current perceived relay-chain block height, in + /// which case the code upgrade should be applied at the end of the signaling + /// block. + pub code_upgrade_allowed: Option, +} + +/// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default, Hash))] +pub struct CandidateCommitments { + /// Fees paid from the chain to the relay chain validators. + pub fees: Balance, + /// Messages destined to be interpreted by the Relay chain itself. + pub upward_messages: Vec, + /// The root of a block's erasure encoding Merkle tree. + pub erasure_root: Hash, + /// New validation code. + pub new_validation_code: Option, + /// The head-data produced as a result of execution. + pub head_data: HeadData, +} + +impl CandidateCommitments { + /// Compute the blake2-256 hash of the commitments. + pub fn hash(&self) -> Hash { + BlakeTwo256::hash_of(self) + } +} + +/// A Proof-of-Validity +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug))] +pub struct PoV { + /// The block witness data. + pub block_data: BlockData, +} + +impl PoV { + /// Get the blake2-256 hash of the PoV. + #[cfg(feature = "std")] + pub fn hash(&self) -> Hash { + BlakeTwo256::hash_of(self) + } +} + +/// A bitfield concerning availability of backed candidates. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +pub struct AvailabilityBitfield(pub BitVec); + +impl From> for AvailabilityBitfield { + fn from(inner: BitVec) -> Self { + AvailabilityBitfield(inner) + } +} + +/// A bitfield signed by a particular validator about the availability of pending candidates. +pub type SignedAvailabilityBitfield = Signed; + +/// A set of signed availability bitfields. Should be sorted by validator index, ascending. +pub type SignedAvailabilityBitfields = Vec; + +/// A backed (or backable, depending on context) candidate. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct BackedCandidate { + /// The candidate referred to. + pub candidate: CommittedCandidateReceipt, + /// The validity votes themselves, expressed as signatures. + pub validity_votes: Vec, + /// The indices of the validators within the group, expressed as a bitfield. + pub validator_indices: BitVec, +} + +impl BackedCandidate { + /// Get a reference to the descriptor of the para. + pub fn descriptor(&self) -> &CandidateDescriptor { + &self.candidate.descriptor + } +} + +/// Verify the backing of the given candidate. +/// +/// Provide a lookup from the index of a validator within the group assigned to this para, +/// as opposed to the index of the validator within the overall validator set, as well as +/// the number of validators in the group. +/// +/// Also provide the signing context. +/// +/// Returns either an error, indicating that one of the signatures was invalid or that the index +/// was out-of-bounds, or the number of signatures checked. +pub fn check_candidate_backing + Clone + Encode>( + backed: &BackedCandidate, + signing_context: &SigningContext, + group_len: usize, + validator_lookup: impl Fn(usize) -> Option, +) -> Result { + if backed.validator_indices.len() != group_len { + return Err(()) + } + + if backed.validity_votes.len() > group_len { + return Err(()) + } + + // this is known, even in runtime, to be blake2-256. + let hash: Hash = backed.candidate.hash(); + + let mut signed = 0; + for ((val_in_group_idx, _), attestation) in backed.validator_indices.iter().enumerate() + .filter(|(_, signed)| **signed) + .zip(backed.validity_votes.iter()) + { + let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; + let payload = attestation.signed_payload(hash.clone(), signing_context); + let sig = attestation.signature(); + + if sig.verify(&payload[..], &validator_id) { + signed += 1; + } else { + return Err(()) + } + } + + if signed != backed.validity_votes.len() { + return Err(()) + } + + Ok(signed) +} + +/// The unique (during session) index of a core. +#[derive(Encode, Decode, Default, PartialOrd, Ord, Eq, PartialEq, Clone, Copy)] +#[cfg_attr(feature = "std", derive(Debug))] +pub struct CoreIndex(pub u32); + +impl From for CoreIndex { + fn from(i: u32) -> CoreIndex { + CoreIndex(i) + } +} + +/// The unique (during session) index of a validator group. +#[derive(Encode, Decode, Default, Clone, Copy)] +#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq, Debug))] +pub struct GroupIndex(pub u32); + +impl From for GroupIndex { + fn from(i: u32) -> GroupIndex { + GroupIndex(i) + } +} + +/// A claim on authoring the next block for a given parathread. +#[derive(Clone, Encode, Decode, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub struct ParathreadClaim(pub Id, pub CollatorId); + +/// An entry tracking a claim to ensure it does not pass the maximum number of retries. +#[derive(Clone, Encode, Decode, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub struct ParathreadEntry { + /// The claim. + pub claim: ParathreadClaim, + /// Number of retries. + pub retries: u32, +} + +/// What is occupying a specific availability core. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub enum CoreOccupied { + /// A parathread. + Parathread(ParathreadEntry), + /// A parachain. + Parachain, +} + +/// This is the data we keep available for each candidate included in the relay chain. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub struct AvailableData { + /// The Proof-of-Validation of the candidate. + pub pov: PoV, + /// The persisted validation data needed for secondary checks. + pub validation_data: PersistedValidationData, +} + +/// A helper data-type for tracking validator-group rotations. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub struct GroupRotationInfo { + /// The block number where the session started. + pub session_start_block: N, + /// How often groups rotate. 0 means never. + pub group_rotation_frequency: N, + /// The current block number. + pub now: N, +} + +impl GroupRotationInfo { + /// Returns the index of the group needed to validate the core at the given index, assuming + /// the given number of cores. + /// + /// `core_index` should be less than `cores`, which is capped at u32::max(). + pub fn group_for_core(&self, core_index: CoreIndex, cores: usize) -> GroupIndex { + if self.group_rotation_frequency == 0 { return GroupIndex(core_index.0) } + if cores == 0 { return GroupIndex(0) } + + let cores = sp_std::cmp::min(cores, u32::max_value() as usize); + let blocks_since_start = self.now.saturating_sub(self.session_start_block); + let rotations = blocks_since_start / self.group_rotation_frequency; + + let idx = (core_index.0 as usize + rotations as usize) % cores; + GroupIndex(idx as u32) + } +} + +impl GroupRotationInfo { + /// Returns the block number of the next rotation after the current block. If the current block + /// is 10 and the rotation frequency is 5, this should return 15. + /// + /// If the group rotation frequency is 0, returns 0. + pub fn next_rotation_at(&self) -> N { + if self.group_rotation_frequency.is_zero() { return Zero::zero() } + + let cycle_once = self.now + self.group_rotation_frequency; + cycle_once - ( + cycle_once.saturating_sub(self.session_start_block) % self.group_rotation_frequency + ) + } + + /// Returns the block number of the last rotation before or including the current block. If the + /// current block is 10 and the rotation frequency is 5, this should return 10. + /// + /// If the group rotation frequency is 0, returns 0. + pub fn last_rotation_at(&self) -> N { + if self.group_rotation_frequency.is_zero() { return Zero::zero() } + self.now - ( + self.now.saturating_sub(self.session_start_block) % self.group_rotation_frequency + ) + } +} + +/// Information about a core which is currently occupied. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub struct OccupiedCore { + /// The ID of the para occupying the core. + pub para_id: Id, + /// If this core is freed by availability, this is the assignment that is next up on this + /// core, if any. None if there is nothing queued for this core. + pub next_up_on_available: Option, + /// The relay-chain block number this began occupying the core at. + pub occupied_since: N, + /// The relay-chain block this will time-out at, if any. + pub time_out_at: N, + /// If this core is freed by being timed-out, this is the assignment that is next up on this + /// core. None if there is nothing queued for this core or there is no possibility of timing + /// out. + pub next_up_on_time_out: Option, + /// A bitfield with 1 bit for each validator in the set. `1` bits mean that the corresponding + /// validators has attested to availability on-chain. A 2/3+ majority of `1` bits means that + /// this will be available. + pub availability: BitVec, + /// The group assigned to distribute availability pieces of this candidate. + pub group_responsible: GroupIndex, +} + +/// Information about a core which is currently occupied. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug, Default))] +pub struct ScheduledCore { + /// The ID of a para scheduled. + pub para_id: Id, + /// The collator required to author the block, if any. + pub collator: Option, +} + +/// The state of a particular availability core. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub enum CoreState { + /// The core is currently occupied. + #[codec(index = "0")] + Occupied(OccupiedCore), + /// The core is currently free, with a para scheduled and given the opportunity + /// to occupy. + /// + /// If a particular Collator is required to author this block, that is also present in this + /// variant. + #[codec(index = "1")] + Scheduled(ScheduledCore), + /// The core is currently free and there is nothing scheduled. This can be the case for parathread + /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + #[codec(index = "2")] + Free, +} + +impl CoreState { + /// If this core state has a `para_id`, return it. + pub fn para_id(&self) -> Option { + match self { + Self::Occupied(OccupiedCore { para_id, ..}) => Some(*para_id), + Self::Scheduled(ScheduledCore { para_id, .. }) => Some(*para_id), + Self::Free => None, + } + } +} + +/// An assumption being made about the state of an occupied core. +#[derive(Clone, Copy, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub enum OccupiedCoreAssumption { + /// The candidate occupying the core was made available and included to free the core. + #[codec(index = "0")] + Included, + /// The candidate occupying the core timed out and freed the core without advancing the para. + #[codec(index = "1")] + TimedOut, + /// The core was not occupied to begin with. + #[codec(index = "2")] + Free, +} + +/// An even concerning a candidate. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +pub enum CandidateEvent { + /// This candidate receipt was backed in the most recent block. + #[codec(index = "0")] + CandidateBacked(CandidateReceipt, HeadData), + /// This candidate receipt was included and became a parablock at the most recent block. + #[codec(index = "1")] + CandidateIncluded(CandidateReceipt, HeadData), + /// This candidate receipt was not made available in time and timed out. + #[codec(index = "2")] + CandidateTimedOut(CandidateReceipt, HeadData), +} + +sp_api::decl_runtime_apis! { + /// The API for querying the state of parachains on-chain. + pub trait ParachainHost { + /// Get the current validators. + fn validators() -> Vec; + + /// Returns the validator groups and rotation info localized based on the block whose state + /// this is invoked on. Note that `now` in the `GroupRotationInfo` should be the successor of + /// the number of the block. + fn validator_groups() -> (Vec>, GroupRotationInfo); + + /// Yields information on all availability cores. Cores are either free or occupied. Free + /// cores can have paras assigned to them. + fn availability_cores() -> Vec>; + + /// Yields the full validation data for the given ParaId along with an assumption that + /// should be used if the para currently occupieds a core. + /// + /// Returns `None` if either the para is not registered or the assumption is `Freed` + /// and the para already occupies a core. + fn full_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option>; + + /// Yields the persisted validation data for the given ParaId along with an assumption that + /// should be used if the para currently occupies a core. + /// + /// Returns `None` if either the para is not registered or the assumption is `Freed` + /// and the para already occupies a core. + fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option>; + + /// Returns the session index expected at a child of the block. + /// + /// This can be used to instantiate a `SigningContext`. + fn session_index_for_child() -> SessionIndex; + + /// Fetch the validation code used by a para, making the given `OccupiedCoreAssumption`. + /// + /// Returns `None` if either the para is not registered or the assumption is `Freed` + /// and the para already occupies a core. + fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option; + + /// Get the receipt of a candidate pending availability. This returns `Some` for any paras + /// assigned to occupied cores in `availability_cores` and `None` otherwise. + fn candidate_pending_availability(para_id: Id) -> Option>; + + /// Get a vector of events concerning candidates that occurred within a block. + // NOTE: this needs to skip block initialization as events are wiped within block + // initialization. + #[skip_initialize_block] + fn candidate_events() -> Vec>; + } +} + +/// Custom validity errors used in Polkadot while validating transactions. +#[repr(u8)] +pub enum ValidityError { + /// The Ethereum signature is invalid. + InvalidEthereumSignature = 0, + /// The signer has no claim. + SignerHasNoClaim = 1, + /// No permission to execute the call. + NoPermission = 2, + /// An invalid statement was made for a claim. + InvalidStatement = 3, +} + +impl From for u8 { + fn from(err: ValidityError) -> Self { + err as u8 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn group_rotation_info_calculations() { + let info = GroupRotationInfo { + session_start_block: 10u32, + now: 15, + group_rotation_frequency: 5, + }; + + assert_eq!(info.next_rotation_at(), 20); + assert_eq!(info.last_rotation_at(), 15); + + let info = GroupRotationInfo { + session_start_block: 10u32, + now: 11, + group_rotation_frequency: 0, + }; + + assert_eq!(info.next_rotation_at(), 0); + assert_eq!(info.last_rotation_at(), 0); + } + + #[test] + fn collator_signature_payload_is_valid() { + // if this fails, collator signature verification code has to be updated. + let h = Hash::default(); + assert_eq!(h.as_ref().len(), 32); + + let _payload = collator_signature_payload( + &Hash::from([1; 32]), + &5u32.into(), + &Hash::from([2; 32]), + &Hash::from([3; 32]), + ); + } +} diff --git a/roadmap/implementors-guide/.gitignore b/roadmap/implementers-guide/.gitignore similarity index 100% rename from roadmap/implementors-guide/.gitignore rename to roadmap/implementers-guide/.gitignore diff --git a/roadmap/implementors-guide/README.md b/roadmap/implementers-guide/README.md similarity index 88% rename from roadmap/implementors-guide/README.md rename to roadmap/implementers-guide/README.md index 909e5055974cf4c8ffd84e6d059937502dd539a2..0bbc07746912307f05ac1eb4e80f154b4cae6598 100644 --- a/roadmap/implementors-guide/README.md +++ b/roadmap/implementers-guide/README.md @@ -4,6 +4,6 @@ The implementers' guide is compiled from several source files with [mdBook](http ```sh cargo install mdbook mdbook-linkcheck mdbook-graphviz -mdbook serve roadmap/implementors-guide +mdbook serve roadmap/implementers-guide open http://localhost:3000 ``` diff --git a/roadmap/implementors-guide/book.toml b/roadmap/implementers-guide/book.toml similarity index 100% rename from roadmap/implementors-guide/book.toml rename to roadmap/implementers-guide/book.toml diff --git a/roadmap/implementors-guide/src/README.md b/roadmap/implementers-guide/src/README.md similarity index 79% rename from roadmap/implementors-guide/src/README.md rename to roadmap/implementers-guide/src/README.md index 9bc4430bda151d2cebbe6b32aeab223e00d214db..64bff6cb6e2d49d5bb7767a9fd3dcf834dbbbdff 100644 --- a/roadmap/implementors-guide/src/README.md +++ b/roadmap/implementers-guide/src/README.md @@ -1,5 +1,5 @@ # Preamble -This document aims to describe the purpose, functionality, and implementation of a host for Polkadot's _parachains_. It is not for the implementor of a specific parachain but rather for the implementor of the Parachain Host, which provides security and advancement for constituent parachains. In practice, this is for the implementors of Polkadot. +This document aims to describe the purpose, functionality, and implementation of a host for Polkadot's _parachains_. It is not for the implementer of a specific parachain but rather for the implementer of the Parachain Host, which provides security and advancement for constituent parachains. In practice, this is for the implementers of Polkadot. There are a number of other documents describing the research in more detail. All referenced documents will be linked here and should be read alongside this document for the best understanding of the full picture. However, this is the only document which aims to describe key aspects of Polkadot's particular instantiation of much of that research down to low-level technical details and software architecture. diff --git a/roadmap/implementors-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md similarity index 72% rename from roadmap/implementors-guide/src/SUMMARY.md rename to roadmap/implementers-guide/src/SUMMARY.md index b5dc3840c605eaf16f7e79c5e6ce7e0d38fae08f..dce85af47d422aea6d4a5f0500c715d2ebcd511a 100644 --- a/roadmap/implementors-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -5,6 +5,7 @@ - [Whence Parachains](whence-parachains.md) - [Parachains Overview](parachains-overview.md) - [Architecture Overview](architecture.md) +- [Messaging Overview](messaging.md) - [Runtime Architecture](runtime/README.md) - [Initializer Module](runtime/initializer.md) - [Configuration Module](runtime/configuration.md) @@ -14,9 +15,22 @@ - [InclusionInherent Module](runtime/inclusioninherent.md) - [Validity Module](runtime/validity.md) - [Router Module](runtime/router.md) +- [Runtime APIs](runtime-api/README.md) + - [Validators](runtime-api/validators.md) + - [Validator Groups](runtime-api/validator-groups.md) + - [Availability Cores](runtime-api/availability-cores.md) + - [Persisted Validation Data](runtime-api/persisted-validation-data.md) + - [Full Validation Data](runtime-api/full-validation-data.md) + - [Session Index](runtime-api/session-index.md) + - [Validation Code](runtime-api/validation-code.md) + - [Candidate Pending Availability](runtime-api/candidate-pending-availability.md) + - [Candidate Events](runtime-api/candidate-events.md) - [Node Architecture](node/README.md) - [Subsystems and Jobs](node/subsystems-and-jobs.md) - [Overseer](node/overseer.md) + - [Collators](node/collators/README.md) + - [Collation Generation](node/collators/collation-generation.md) + - [Collator Protocol](node/collators/collator-protocol.md) - [Backing Subsystems](node/backing/README.md) - [Candidate Selection](node/backing/candidate-selection.md) - [Candidate Backing](node/backing/candidate-backing.md) @@ -26,10 +40,9 @@ - [Availability Distribution](node/availability/availability-distribution.md) - [Bitfield Distribution](node/availability/bitfield-distribution.md) - [Bitfield Signing](node/availability/bitfield-signing.md) - - [Collators](node/collators/README.md) - - [Collation Generation](node/collators/collation-generation.md) - - [Collation Distribution](node/collators/collation-distribution.md) - [Validity](node/validity/README.md) + - [Approvals](node/validity/approvals.md) + - [Approval assignments](node/validity/assignmets.md) - [Utility Subsystems](node/utility/README.md) - [Availability Store](node/utility/availability-store.md) - [Candidate Validation](node/utility/candidate-validation.md) @@ -38,6 +51,7 @@ - [Misbehavior Arbitration](node/utility/misbehavior-arbitration.md) - [Peer Set Manager](node/utility/peer-set-manager.md) - [Runtime API Requests](node/utility/runtime-api.md) + - [Chain API Requests](node/utility/chain-api.md) - [Data Structures and Types](types/README.md) - [Candidate](types/candidate.md) - [Backing](types/backing.md) @@ -46,6 +60,7 @@ - [Runtime](types/runtime.md) - [Chain](types/chain.md) - [Messages](types/messages.md) + - [Network](types/network.md) [Glossary](glossary.md) [Further Reading](further-reading.md) diff --git a/roadmap/implementors-guide/src/architecture.md b/roadmap/implementers-guide/src/architecture.md similarity index 100% rename from roadmap/implementors-guide/src/architecture.md rename to roadmap/implementers-guide/src/architecture.md diff --git a/roadmap/implementors-guide/src/further-reading.md b/roadmap/implementers-guide/src/further-reading.md similarity index 100% rename from roadmap/implementors-guide/src/further-reading.md rename to roadmap/implementers-guide/src/further-reading.md diff --git a/roadmap/implementors-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md similarity index 74% rename from roadmap/implementors-guide/src/glossary.md rename to roadmap/implementers-guide/src/glossary.md index 21cf5fc291c7767c23011cd1a8fd1d7e4dc4d7c5..63294d1d77fd69bd538938a04a505b7cd510fa7a 100644 --- a/roadmap/implementors-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -7,12 +7,15 @@ Here you can find definitions of a bunch of jargon, usually specific to the Polk - Backed Candidate: A Backable Candidate noted in a relay-chain block - Backing: A set of statements proving that a Parachain Candidate is backable. - Collator: A node who generates Proofs-of-Validity (PoV) for blocks of a specific parachain. +- DMP: (Downward Message Passing). Message passing from the relay-chain to a parachain. - Extrinsic: An element of a relay-chain block which triggers a specific entry-point of a runtime module with given arguments. - GRANDPA: (Ghost-based Recursive ANcestor Deriving Prefix Agreement). The algorithm validators use to guarantee finality of the Relay Chain. +- HRMP: (Horizontally Relay-routed Message Passing). A mechanism for message passing between parachains (hence horizontal) that leverages the relay-chain storage. Predates XCMP. - Inclusion Pipeline: The set of steps taken to carry a Parachain Candidate from authoring, to backing, to availability and full inclusion in an active fork of its parachain. - Module: A component of the Runtime logic, encapsulating storage, routines, and entry-points. - Module Entry Point: A recipient of new information presented to the Runtime. This may trigger routines. - Module Routine: A piece of code executed within a module by block initialization, closing, or upon an entry point being triggered. This may execute computation, and read or write storage. +- MQC: (Message Queue Chain). A cryptographic data structure that resembles an append-only linked list which doesn't store original values but only their hashes. The whole structure is described by a single hash, referred as a "head". When a value is appended, it's contents hashed with the previous head creating a hash that becomes a new head. - Node: A participant in the Polkadot network, who follows the protocols of communication and connection to other nodes. Nodes form a peer-to-peer network topology without a central authority. - Parachain Candidate, or Candidate: A proposed block for inclusion into a parachain. - Parablock: A block in a parachain. @@ -26,8 +29,11 @@ Here you can find definitions of a bunch of jargon, usually specific to the Polk - Runtime API: A means for the node-side behavior to access structured information based on the state of a fork of the blockchain. - Secondary Checker: A validator who has been randomly selected to perform secondary approval checks on a parablock which is pending approval. - Subsystem: A long-running task which is responsible for carrying out a particular category of work. +- UMP: (Upward Message Passing) A vertical message passing mechanism from a parachain to the relay chain. - Validator: Specially-selected node in the network who is responsible for validating parachain blocks and issuing attestations about their validity. - Validation Function: A piece of Wasm code that describes the state-transition function of a parachain. +- VMP: (Vertical Message Passing) A family of mechanisms that are responsible for message exchange between the relay chain and parachains. +- XCMP (Cross-Chain Message Passing) A type of horizontal message passing (i.e. between parachains) that allows secure message passing directly between parachains and has minimal resource requirements from the relay chain, thus highly scalable. Also of use is the [Substrate Glossary](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary). diff --git a/roadmap/implementers-guide/src/messaging.md b/roadmap/implementers-guide/src/messaging.md new file mode 100644 index 0000000000000000000000000000000000000000..62fbe3cfea88f6daf0f528f15e8945f6df524078 --- /dev/null +++ b/roadmap/implementers-guide/src/messaging.md @@ -0,0 +1,97 @@ +# Messaging Overview + +Polkadot has a few mechanisms that are responsible for message passing. They can be generally divided +on two categories: Horizontal and Vertical. Horizontal Message Passing (HMP) refers to mechanisms +that are responsible for exchanging messages between parachains. Vertical Message Passing (VMP) is +used for communication between the relay chain and parachains. + +## Vertical Message Passing + +```dot process +digraph { + rc [shape=Mdiamond label="Relay Chain"]; + p1 [shape=box label = "Parachain"]; + + rc -> p1 [label="DMP"]; + p1 -> rc [label="UMP"]; +} +``` + +Downward Message Passing (DMP) is a mechanism for delivering messages to parachains from the relay chain. + +Each parachain has its own queue that stores all pending inbound downward messages. A parachain +doesn't have to process all messages at once, however, there are rules as to how the downward message queue +should be processed. Currently, at least one message must be consumed per candidate if the queue is not empty. +The downward message queue doesn't have a cap on its size and it is up to the relay-chain to put mechanisms +that prevent spamming in place. + +Upward Message Passing (UMP) is a mechanism responsible for delivering messages in the opposite direction: +from a parachain up to the relay chain. Upward messages can serve different purposes and can be of different + kinds. + +One kind of message is `Dispatchable`. They could be thought of similarly to extrinsics sent to a relay chain: they also +invoke exposed runtime entrypoints, they consume weight and require fees. The difference is that they originate from +a parachain. Each parachain has a queue of dispatchables to be executed. There can be only so many dispatchables at a time. +The weight that processing of the dispatchables can consume is limited by a preconfigured value. Therefore, it is possible +that some dispatchables will be left for later blocks. To make the dispatching more fair, the queues are processed turn-by-turn +in a round robin fashion. + +Upward messages are also used by a parachain to request opening and closing HRMP channels (HRMP will be described below). + +Other kinds of upward messages can be introduced in the future as well. Potential candidates are +new validation code signalling, or other requests to the relay chain. + +## Horizontal Message Passing + +```dot process +digraph { + rc [shape=Mdiamond color="gray" fontcolor="gray" label="Relay Chain"]; + + subgraph { + rank = "same" + p1 [shape=box label = "Parachain 1"]; + p2 [shape=box label = "Parachain 2"]; + } + + rc -> p1 [label="DMP" color="gray" fontcolor="gray"]; + p1 -> rc [label="UMP" color="gray" fontcolor="gray"]; + + rc -> p2 [label="DMP" color="gray" fontcolor="gray"]; + p2 -> rc [label="UMP" color="gray" fontcolor="gray"]; + + p2 -> p1 [dir=both label="XCMP"]; +} +``` + +### Cross-Chain Message Passing + +The most important member of this family is XCMP. + +> ℹ️ XCMP is currently under construction and details are subject for change. + +XCMP is a message passing mechanism between parachains that require minimal involvement of the relay chain. +The relay chain provides means for sending parachains to authenticate messages sent to recipient parachains. + +Semantically communication occurs through so called channels. A channel is unidirectional and it has +two endpoints, for sender and for recipient. A channel can be opened only if the both parties agree +and closed unilaterally. + +Only the channel metadata is stored on the relay-chain in a very compact form: all messages and their +contents sent by the sender parachain are encoded using only one root hash. This root is referred as +MQC head. + +The authenticity of the messages must be proven using that root hash to the receiving party at the +candidate authoring time. The proof stems from the relay parent storage that contains the root hash of the channel. +Since not all messages are required to be processed by the receiver's candidate, only the processed +messages are supplied (i.e. preimages), rest are provided as hashes. + +Further details can be found at the official repository for the +[Cross-Consensus Message Format (XCM)](https://github.com/paritytech/xcm-format/blob/master/README.md), as well as +at the [W3F research website](https://research.web3.foundation/en/latest/polkadot/XCMP.html) and +[this blogpost](https://medium.com/web3foundation/polkadots-messaging-scheme-b1ec560908b7). + +HRMP (Horizontally Relay-routed Message Passing) is a stop gap that predates XCMP. Semantically, it mimics XCMP's interface. +The crucial difference from XCMP though is that all the messages are stored in the relay-chain storage. That makes +things simple but at the same time that makes HRMP more demanding in terms of resources thus making it more expensive. + +Once XCMP is available we expect to retire HRMP. diff --git a/roadmap/implementers-guide/src/node/README.md b/roadmap/implementers-guide/src/node/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6d7e7a887f7cd505cf5fd0a83e9ef63ae78cb09 --- /dev/null +++ b/roadmap/implementers-guide/src/node/README.md @@ -0,0 +1,24 @@ +# Node Architecture + +## Design Goals + +* Modularity: Components of the system should be as self-contained as possible. Communication boundaries between components should be well-defined and mockable. This is key to creating testable, easily reviewable code. +* Minimizing side effects: Components of the system should aim to minimize side effects and to communicate with other components via message-passing. +* Operational Safety: The software will be managing signing keys where conflicting messages can lead to large amounts of value to be slashed. Care should be taken to ensure that no messages are signed incorrectly or in conflict with each other. + +The architecture of the node-side behavior aims to embody the Rust principles of ownership and message-passing to create clean, isolatable code. Each resource should have a single owner, with minimal sharing where unavoidable. + +Many operations that need to be carried out involve the network, which is asynchronous. This asynchrony affects all core subsystems that rely on the network as well. The approach of hierarchical state machines is well-suited to this kind of environment. + +We introduce a hierarchy of state machines consisting of an overseer supervising subsystems, where Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems. + +## Assumptions + +The Node-side code comes with a set of assumptions that we build upon. These assumptions encompass most of the fundamental blockchain functionality. + +We assume the following constraints regarding provided basic functionality: + * The underlying **consensus** algorithm, whether it is BABE or SASSAFRAS is implemented. + * There is a **chain synchronization** protocol which will search for and download the longest available chains at all times. + * The **state** of all blocks at the head of the chain is available. There may be **state pruning** such that state of the last `k` blocks behind the last finalized block are is available, as well as the state of all their descendents. This assumption implies that the state of all active leaves and their last `k` ancestors are all available. The underlying implementation is expected to support `k` of a few hundred blocks, but we reduce this to a very conservative `k=5` for our purposes. + * There is an underlying **networking** framework which provides **peer discovery** services which will provide us with peers and will not create "loopback" connections to our own node. The number of peers we will have is assumed to be bounded at 1000. + * There is a **transaction pool** and a **transaction propagation** mechanism which maintains a set of current transactions and distributes to connected peers. Current transactions are those which are not outdated relative to some "best" fork of the chain, which is part of the active heads, and have not been included in the best fork. diff --git a/roadmap/implementors-guide/src/node/availability/README.md b/roadmap/implementers-guide/src/node/availability/README.md similarity index 100% rename from roadmap/implementors-guide/src/node/availability/README.md rename to roadmap/implementers-guide/src/node/availability/README.md diff --git a/roadmap/implementors-guide/src/node/availability/availability-distribution.md b/roadmap/implementers-guide/src/node/availability/availability-distribution.md similarity index 54% rename from roadmap/implementors-guide/src/node/availability/availability-distribution.md rename to roadmap/implementers-guide/src/node/availability/availability-distribution.md index 008f3e91fbe7d7c7ef184de463087d1d9e1a64a0..de34f3b8ed9068431156c4f771be38aa2c34ccc1 100644 --- a/roadmap/implementors-guide/src/node/availability/availability-distribution.md +++ b/roadmap/implementers-guide/src/node/availability/availability-distribution.md @@ -6,36 +6,34 @@ After a candidate is backed, the availability of the PoV block must be confirmed ## Protocol -`ProtocolId`:`b"avad"` +`PeerSet`: `Validation` Input: -- NetworkBridgeUpdate(update) +- NetworkBridgeUpdateV1(update) Output: -- NetworkBridge::RegisterEventProducer(`ProtocolId`) -- NetworkBridge::SendMessage(`[PeerId]`, `ProtocolId`, `Bytes`) +- NetworkBridge::SendValidationMessage(`[PeerId]`, message) - NetworkBridge::ReportPeer(PeerId, cost_or_benefit) - AvailabilityStore::QueryPoV(candidate_hash, response_channel) - AvailabilityStore::StoreChunk(candidate_hash, chunk_index, inclusion_proof, chunk_data) ## Functionality -Register on startup an event producer with `NetworkBridge::RegisterEventProducer`. - For each relay-parent in our local view update, look at all backed candidates pending availability. Distribute via gossip all erasure chunks for all candidates that we have to peers. -We define an operation `live_candidates(relay_heads) -> Set` which returns a set of [`CommittedCandidateReceipt`s](../../types/candidate.md#committed-candidate-receipt) a given set of relay chain heads that implies a set of candidates whose availability chunks should be currently gossiped. This is defined as all candidates pending availability in any of those relay-chain heads or any of their last `K` ancestors. We assume that state is not pruned within `K` blocks of the chain-head. +We define an operation `live_candidates(relay_heads) -> Set` which returns a set of [`CommittedCandidateReceipt`s](../../types/candidate.md#committed-candidate-receipt). +This is defined as all candidates pending availability in any of those relay-chain heads or any of their last `K` ancestors in the same session. We assume that state is not pruned within `K` blocks of the chain-head. `K` commonly is small and is currently fixed to `K=3`. -We will send any erasure-chunks that correspond to candidates in `live_candidates(peer_most_recent_view_update)`. Likewise, we only accept and forward messages pertaining to a candidate in `live_candidates(current_heads)`. Each erasure chunk should be accompanied by a merkle proof that it is committed to by the erasure trie root in the candidate receipt, and this gossip system is responsible for checking such proof. +We will send any erasure-chunks that correspond to candidates in `live_candidates(peer_most_recent_view_update)`. +Likewise, we only accept and forward messages pertaining to a candidate in `live_candidates(current_heads)`. +Each erasure chunk should be accompanied by a merkle proof that it is committed to by the erasure trie root in the candidate receipt, and this gossip system is responsible for checking such proof. We re-attempt to send anything live to a peer upon any view update from that peer. -On our view change, for all live candidates, we will check if we have the PoV by issuing a `QueryPoV` message and waiting for the response. If the query returns `Some`, we will perform the erasure-coding and distribute all messages to peers that will accept them. +On our view change, for all live candidates, we will check if we have the PoV by issuing a `QueryAvailabileData` message and waiting for the response. If the query returns `Some`, we will perform the erasure-coding and distribute all messages to peers that will accept them. If we are operating as a validator, we note our index `i` in the validator set and keep the `i`th availability chunk for any live candidate, as we receive it. We keep the chunk and its merkle proof in the [Availability Store](../utility/availability-store.md) by sending a `StoreChunk` command. This includes chunks and proofs generated as the result of a successful `QueryPoV`. -> TODO: back-and-forth is kind of ugly but drastically simplifies the pruning in the availability store, as it creates an invariant that chunks are only stored if the candidate was actually backed -> -> K=3? +The back-and-forth seems suboptimal at first glance, but drastically simplifies the pruning in the availability store, as it creates an invariant that chunks are only stored if the candidate was actually backed. diff --git a/roadmap/implementers-guide/src/node/availability/bitfield-distribution.md b/roadmap/implementers-guide/src/node/availability/bitfield-distribution.md new file mode 100644 index 0000000000000000000000000000000000000000..53bd8a1bced61f5783bfeee937786c9b4741d80e --- /dev/null +++ b/roadmap/implementers-guide/src/node/availability/bitfield-distribution.md @@ -0,0 +1,34 @@ +# Bitfield Distribution + +Validators vote on the availability of a backed candidate by issuing signed bitfields, where each bit corresponds to a single candidate. These bitfields can be used to compactly determine which backed candidates are available or not based on a 2/3+ quorum. + +## Protocol + +`PeerSet`: `Validation` + +Input: +[`BitfieldDistributionMessage`](../../types/overseer-protocol.md#bitfield-distribution-message) which are gossiped to all peers, no matter if validator or not. + +Output: + +- `NetworkBridge::SendValidationMessage([PeerId], message)` gossip a verified incoming bitfield on to interested subsystems within this validator node. +- `NetworkBridge::ReportPeer(PeerId, cost_or_benefit)` improve or penalize the reputation of peers based on the messages that are received relative to the current view. +- `ProvisionerMessage::ProvisionableData(ProvisionableData::Bitfield(relay_parent, SignedAvailabilityBitfield))` pass + on the bitfield to the other submodules via the overseer. + +## Functionality + +This is implemented as a gossip system. + +It is necessary to track peer connection, view change, and disconnection events, in order to maintain an index of which peers are interested in which relay parent bitfields. + + +Before gossiping incoming bitfields, they must be checked to be signed by one of the validators +of the validator set relevant to the current relay parent. +Only accept bitfields relevant to our current view and only distribute bitfields to other peers when relevant to their most recent view. +Accept and distribute only one bitfield per validator. + + +When receiving a bitfield either from the network or from a `DistributeBitfield` message, forward it along to the block authorship (provisioning) subsystem for potential inclusion in a block. + +Peers connecting after a set of valid bitfield gossip messages was received, those messages must be cached and sent upon connection of new peers or re-connecting peers. diff --git a/roadmap/implementors-guide/src/node/availability/bitfield-signing.md b/roadmap/implementers-guide/src/node/availability/bitfield-signing.md similarity index 72% rename from roadmap/implementors-guide/src/node/availability/bitfield-signing.md rename to roadmap/implementers-guide/src/node/availability/bitfield-signing.md index 613736901d2f4ea4100d5f6353e818f633ca7373..0ca9badd32e2f418d401307f722a07b83177be5a 100644 --- a/roadmap/implementors-guide/src/node/availability/bitfield-signing.md +++ b/roadmap/implementers-guide/src/node/availability/bitfield-signing.md @@ -4,6 +4,10 @@ Validators vote on the availability of a backed candidate by issuing signed bitf ## Protocol +Input: + +There is no dedicated input mechanism for bitfield signing. Instead, Bitfield Signing produces a bitfield representing the current state of availability on `StartWork`. + Output: - BitfieldDistribution::DistributeBitfield: distribute a locally signed bitfield @@ -11,15 +15,15 @@ Output: ## Functionality -Upon onset of a new relay-chain head with `StartWork`, launch bitfield signing job for the head. Stop the job on `StopWork`. +Upon receipt of an `ActiveLeavesUpdate`, launch bitfield signing job for each `activated` head. Stop the job for each `deactivated` head. ## Bitfield Signing Job Localized to a specific relay-parent `r` If not running as a validator, do nothing. +- Begin by waiting a fixed period of time so availability distribution has the chance to make candidates available. - Determine our validator index `i`, the set of backed candidates pending availability in `r`, and which bit of the bitfield each corresponds to. -- > TODO: wait T time for availability distribution? - Start with an empty bitfield. For each bit in the bitfield, if there is a candidate pending availability, query the [Availability Store](../utility/availability-store.md) for whether we have the availability chunk for our validator index. - For all chunks we have, set the corresponding bit in the bitfield. - Sign the bitfield and dispatch a `BitfieldDistribution::DistributeBitfield` message. diff --git a/roadmap/implementors-guide/src/node/backing/README.md b/roadmap/implementers-guide/src/node/backing/README.md similarity index 100% rename from roadmap/implementors-guide/src/node/backing/README.md rename to roadmap/implementers-guide/src/node/backing/README.md diff --git a/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/roadmap/implementers-guide/src/node/backing/candidate-backing.md new file mode 100644 index 0000000000000000000000000000000000000000..afea5e8ee40255ec43414b08695ba870a663378f --- /dev/null +++ b/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -0,0 +1,139 @@ +# Candidate Backing + +The Candidate Backing subsystem ensures every parablock considered for relay block inclusion has been seconded by at least one validator, and approved by a quorum. Parablocks for which no validator will assert correctness are discarded. If the block later proves invalid, the initial backers are slashable; this gives polkadot a rational threat model during subsequent stages. + +Its role is to produce backable candidates for inclusion in new relay-chain blocks. It does so by issuing signed [`Statement`s][Statement] and tracking received statements signed by other validators. Once enough statements are received, they can be combined into backing for specific candidates. + +Note that though the candidate backing subsystem attempts to produce as many backable candidates as possible, it does _not_ attempt to choose a single authoritative one. The choice of which actually gets included is ultimately up to the block author, by whatever metrics it may use; those are opaque to this subsystem. + +Once a sufficient quorum has agreed that a candidate is valid, this subsystem notifies the [Provisioner][PV], which in turn engages block production mechanisms to include the parablock. + +## Protocol + +Input: [`CandidateBackingMessage`][CBM] + +Output: + +- [`CandidateValidationMessage`][CVM] +- [`RuntimeApiMessage`][RAM] +- [`CandidateSelectionMessage`][CSM] +- [`ProvisionerMessage`][PM] +- [`PoVDistributionMessage`][PDM] +- [`StatementDistributionMessage`][SDM] + +## Functionality + +The [Candidate Selection][CS] subsystem is the primary source of non-overseer messages into this subsystem. That subsystem generates appropriate [`CandidateBackingMessage`s][CBM] and passes them to this subsystem. + +This subsystem requests validation from the [Candidate Validation][CV] and generates an appropriate [`Statement`][Statement]. All `Statement`s are then passed on to the [Statement Distribution][SD] subsystem to be gossiped to peers. When [Candidate Validation][CV] decides that a candidate is invalid, and it was recommended to us to second by our own [Candidate Selection][CS] subsystem, a message is sent to the [Candidate Selection][CS] subsystem with the candidate's hash so that the collator which recommended it can be penalized. + +The subsystem should maintain a set of handles to Candidate Backing Jobs that are currently live, as well as the relay-parent to which they correspond. + +### On Overseer Signal + +* If the signal is an [`OverseerSignal`][OverseerSignal]`::ActiveLeavesUpdate`: + * spawn a Candidate Backing Job for each `activated` head, storing a bidirectional channel with the Candidate Backing Job in the set of handles. + * cease the Candidate Backing Job for each `deactivated` head, if any. +* If the signal is an [`OverseerSignal`][OverseerSignal]`::Conclude`: Forward conclude messages to all jobs, wait a small amount of time for them to join, and then exit. + +### On Receiving `CandidateBackingMessage` + +* If the message is a [`CandidateBackingMessage`][CBM]`::GetBackedCandidates`, get all backable candidates from the statement table and send them back. +* If the message is a [`CandidateBackingMessage`][CBM]`::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. +* If the message is a [`CandidateBackingMessage`][CBM]`::Statement`, count the statement to the quorum. If the statement in the message is `Seconded` and it contains a candidate that belongs to our assignment, request the corresponding `PoV` from the `PoVDistribution` and launch validation. Issue our own `Valid` or `Invalid` statement as a result. + +> big TODO: "contextual execution" +> +> * At the moment we only allow inclusion of _new_ parachain candidates validated by _current_ validators. +> * Allow inclusion of _old_ parachain candidates validated by _current_ validators. +> * Allow inclusion of _old_ parachain candidates validated by _old_ validators. +> +> This will probably blur the lines between jobs, will probably require inter-job communication and a short-term memory of recently backable, but not backed candidates. + +## Candidate Backing Job + +The Candidate Backing Job represents the work a node does for backing candidates with respect to a particular relay-parent. + +The goal of a Candidate Backing Job is to produce as many backable candidates as possible. This is done via signed [`Statement`s][STMT] by validators. If a candidate receives a majority of supporting Statements from the Parachain Validators currently assigned, then that candidate is considered backable. + +### On Startup + +* Fetch current validator set, validator -> parachain assignments from [`Runtime API`][RA] subsystem using [`RuntimeApiRequest::Validators`][RAM] and [`RuntimeApiRequest::ValidatorGroups`][RAM] +* Determine if the node controls a key in the current validator set. Call this the local key if so. +* If the local key exists, extract the parachain head and validation function from the [`Runtime API`][RA] for the parachain the local key is assigned to by issuing a [`RuntimeApiRequest::Validators`][RAM] +* Issue a [`RuntimeApiRequest::SigningContext`][RAM] message to get a context that will later be used upon signing. + +### On Receiving New Candidate Backing Message + +```rust +match msg { + CetBackedCandidates(hash, tx) => { + // Send back a set of backable candidates. + } + CandidateBackingMessage::Second(hash, candidate) => { + if candidate is unknown and in local assignment { + spawn_validation_work(candidate, parachain head, validation function) + } + } + CandidateBackingMessage::Statement(hash, statement) => { + // count to the votes on this candidate + if let Statement::Seconded(candidate) = statement { + if candidate.parachain_id == our_assignment { + spawn_validation_work(candidate, parachain head, validation function) + } + } + } +} +``` + +Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))` message. +`Invalid` statements that conflict with already witnessed `Seconded` and `Valid` statements for the given candidate, statements that are double-votes, self-contradictions and so on, should result in issuing a [`ProvisionerMessage`][PM]`::MisbehaviorReport` message for each newly detected case of this kind. + +### Validating Candidates. + +```rust +fn spawn_validation_work(candidate, parachain head, validation function) { + asynchronously { + let pov = (fetch pov block).await + + let valid = (validate pov block).await; + if valid { + // make PoV available for later distribution. Send data to the availability store to keep. + // sign and dispatch `valid` statement to network if we have not seconded the given candidate. + } else { + // sign and dispatch `invalid` statement to network. + } + } +} +``` + +### Fetch Pov Block + +Create a `(sender, receiver)` pair. +Dispatch a [`PoVDistributionMessage`][PDM]`::FecthPoV(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. + +### Validate PoV Block + +Create a `(sender, receiver)` pair. +Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, sender)` and listen on the receiver for a response. + +### Distribute Signed Statemnet + +Dispatch a [`StatementDistributionMessage`][PDM]`::Share(relay_parent, SignedFullStatement)`. + +[OverseerSignal]: ../../types/overseer-protocol.md#overseer-signal +[Statement]: ../../types/backing.md#statement-type +[STMT]: ../../types/backing.md#statement-type +[CSM]: ../../types/overseer-protocol.md#candidate-selection-message +[RAM]: ../../types/overseer-protocol.md#runtime-api-message +[CVM]: ../../types/overseer-protocol.md#validation-request-type +[PM]: ../../types/overseer-protocol.md#provisioner-message +[CBM]: ../../types/overseer-protocol.md#candidate-backing-message +[PDM]: ../../types/overseer-protocol.md#pov-distribution-message +[SDM]: ../../types/overseer-protocol.md#statement-distribution-message + +[CS]: candidate-selection.md +[CV]: ../utility/candidate-validation.md +[SD]: statement-distribution.md +[RA]: ../utility/runtime-api.md +[PV]: ../utility/provisioner.md diff --git a/roadmap/implementors-guide/src/node/backing/candidate-selection.md b/roadmap/implementers-guide/src/node/backing/candidate-selection.md similarity index 100% rename from roadmap/implementors-guide/src/node/backing/candidate-selection.md rename to roadmap/implementers-guide/src/node/backing/candidate-selection.md diff --git a/roadmap/implementors-guide/src/node/backing/pov-distribution.md b/roadmap/implementers-guide/src/node/backing/pov-distribution.md similarity index 61% rename from roadmap/implementors-guide/src/node/backing/pov-distribution.md rename to roadmap/implementers-guide/src/node/backing/pov-distribution.md index 1cede766743cc7cf5348bc3c2fa2b23e0b5d4d9a..9b32abbd560a4dac21d246ec8f44ac197b623e93 100644 --- a/roadmap/implementors-guide/src/node/backing/pov-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/pov-distribution.md @@ -4,21 +4,20 @@ This subsystem is responsible for distributing PoV blocks. For now, unified with ## Protocol -`ProtocolId`: `b"povd"` +`PeerSet`: `Validation` Input: [`PoVDistributionMessage`](../../types/overseer-protocol.md#pov-distribution-message) Output: -- NetworkBridge::RegisterEventProducer(`ProtocolId`) -- NetworkBridge::SendMessage(`[PeerId]`, `ProtocolId`, `Bytes`) +- NetworkBridge::SendMessage(`[PeerId]`, message) - NetworkBridge::ReportPeer(PeerId, cost_or_benefit) ## Functionality -This network protocol is responsible for distributing [`PoV`s](../../types/availability.md#proof-of-validity) by gossip. Since PoVs are heavy in practice, gossip is far from the most efficient way to distribute them. In the future, this should be replaced by a better network protocol that finds validators who have validated the block and connects to them directly. This protocol is descrbied +This network protocol is responsible for distributing [`PoV`s](../../types/availability.md#proof-of-validity) by gossip. Since PoVs are heavy in practice, gossip is far from the most efficient way to distribute them. In the future, this should be replaced by a better network protocol that finds validators who have validated the block and connects to them directly. This protocol is descrbied. This protocol is described in terms of "us" and our peers, with the understanding that this is the procedure that any honest node will run. It has the following goals: - We never have to buffer an unbounded amount of data @@ -26,11 +25,13 @@ This protocol is described in terms of "us" and our peers, with the understandin As we are gossiping, we need to track which PoVs our peers are waiting for to avoid sending them data that they are not expecting. It is not reasonable to expect our peers to buffer unexpected PoVs, just as we will not buffer unexpected PoVs. So notifying our peers about what is being awaited is key. However it is important that the notifications system is also bounded. -For this, in order to avoid reaching into the internals of the [Statement Distribution](statement-distribution.md) Subsystem, we can rely on an expected propery of candidate backing: that each validator can only second one candidate at each chain head. So we can set a cap on the number of PoVs each peer is allowed to notify us that they are waiting for at a given relay-parent. This cap will be the number of validators at that relay-parent. And the view update mechanism of the [Network Bridge](../utility/network-bridge.md) ensures that peers are only allowed to consider a certain set of relay-parents as live. So this bounding mechanism caps the amount of data we need to store per peer at any time at `sum({ n_validators_at_head(head) | head in view_heads })`. Additionally, peers should only be allowed to notify us of PoV hashes they are waiting for in the context of relay-parents in our own local view, which means that `n_validators_at_head` is implied to be `0` for relay-parents not in our own local view. +For this, in order to avoid reaching into the internals of the [Statement Distribution](statement-distribution.md) Subsystem, we can rely on an expected propery of candidate backing: that each validator can second up to 2 candidates per chain head. This will typically be only one, because they are only supposed to issue one, but they can equivocate if they are willing to be slashed. So we can set a cap on the number of PoVs each peer is allowed to notify us that they are waiting for at a given relay-parent. This cap will be twice the number of validators at that relay-parent. In practice, this is a very lax upper bound that can be reduced much further if desired. -View updates from peers and our own view updates are received from the network bridge. These will lag somewhat behind the `StartWork` and `StopWork` messages received from the overseer, which will influence the actual data we store. The `OurViewUpdate`s from the [`NetworkBridgeEvent`](../../types/overseer-protocol.md#network-bridge-update) must be considered canonical in terms of our peers' perception of us. +The view update mechanism of the [Network Bridge](../utility/network-bridge.md) ensures that peers are only allowed to consider a certain set of relay-parents as live. So this bounding mechanism caps the amount of data we need to store per peer at any time at `sum({ 2 * n_validators_at_head(head) * sizeof(hash) for head in view_heads })`. Additionally, peers should only be allowed to notify us of PoV hashes they are waiting for in the context of relay-parents in our own local view, which means that `n_validators_at_head` is implied to be `0` for relay-parents not in our own local view. -Lastly, the system needs to be bootstrapped with our own perception of which PoVs we are cognizant of but awaiting data for. This is done by receipt of the [`PoVDistributionMessage`](../../types/overseer-protocol.md#pov-distribution-message)::ValidatorStatement variant. We can ignore anything except for `Seconded` statements. +View updates from peers and our own view updates are received from the network bridge. These will lag somewhat behind the `ActiveLeavesUpdate` messages received from the overseer, which will influence the actual data we store. The `OurViewUpdate`s from the [`NetworkBridgeEvent`](../../types/overseer-protocol.md#network-bridge-update) must be considered canonical in terms of our peers' perception of us. + +Lastly, the system needs to be bootstrapped with our own perception of which PoVs we are cognizant of but awaiting data for. This is done by receipt of the [`PoVDistributionMessage`](../../types/overseer-protocol.md#pov-distribution-message)::FetchPoV variant. Proper operation of this subsystem depends on the descriptors passed faithfully representing candidates which have been seconded by other validators. ## Formal Description @@ -45,7 +46,6 @@ struct State { struct BlockBasedState { known: Map, // should be a shared PoV in practice. these things are heavy. - awaited: Set, // awaited PoVs by blake2-256 hash. fetching: Map]>, n_validators: usize, } @@ -55,38 +55,25 @@ struct PeerState { } ``` -We also assume the following network messages, which are sent and received by the [Network Bridge](../utility/network-bridge.md) - -```rust -enum NetworkMessage { - /// Notification that we are awaiting the given PoVs (by hash) against a - /// specific relay-parent hash. - Awaiting(Hash, Vec), - /// Notification of an awaited PoV, in a given relay-parent context. - /// (relay_parent, pov_hash, pov) - SendPoV(Hash, Hash, PoV), -} -``` +We also use the [`PoVDistributionV1Message`](../../types/network.md#pov-distribution) as our `NetworkMessage`, which are sent and received by the [Network Bridge](../utility/network-bridge.md) Here is the logic of the state machine: *Overseer Signals* -- On `StartWork(relay_parent)`: - - Get the number of validators at that relay parent by querying the [Runtime API](../utility/runtime-api.md) for the validators and then counting them. - - Create a blank entry in `relay_parent_state` under `relay_parent` with correct `n_validators` set. -- On `StopWork(relay_parent)`: - - Remove the entry for `relay_parent` from `relay_parent_state`. -- On `Concluded`: conclude. +- On `ActiveLeavesUpdate(relay_parent)`: + - For each relay-parent in the `activated` list: + - Get the number of validators at that relay parent by querying the [Runtime API](../utility/runtime-api.md) for the validators and then counting them. + - Create a blank entry in `relay_parent_state` under `relay_parent` with correct `n_validators` set. + - For each relay-parent in the `deactivated` list: + - Remove the entry for `relay_parent` from `relay_parent_state`. +- On `Conclude`: conclude. *PoV Distribution Messages* -- On `ValidatorStatement(relay_parent, statement)` - - If this is not `Statement::Seconded`, ignore. - - If there is an entry under `relay_parent` in `relay_parent_state`, add the `pov_hash` of the seconded Candidate's [`CandidateDescriptor`](../../types/candidate.md#candidate-descriptor) to the `awaited` set of the entry. - - If the `pov_hash` was not previously awaited and there are `n_validators` or fewer entries in the `awaited` set, send `NetworkMessage::Awaiting(relay_parent, vec![pov_hash])` to all peers. - On `FetchPoV(relay_parent, descriptor, response_channel)` - If there is no entry in `relay_parent_state` under `relay_parent`, ignore. - If there is a PoV under `descriptor.pov_hash` in the `known` map, send that PoV on the channel and return. - Otherwise, place the `response_channel` in the `fetching` map under `descriptor.pov_hash`. + - If the `pov_hash` had no previous entry in `fetching` and there are `2 * n_validators` or fewer entries in the `fetching` set, send `NetworkMessage::Awaiting(relay_parent, vec![pov_hash])` to all peers. - On `DistributePoV(relay_parent, descriptor, PoV)` - If there is no entry in `relay_parent_state` under `relay_parent`, ignore. - Complete and remove any channels under `descriptor.pov_hash` in the `fetching` map. @@ -96,26 +83,28 @@ Here is the logic of the state machine: *Network Bridge Updates* - On `PeerConnected(peer_id, observed_role)` - Make a fresh entry in the `peer_state` map for the `peer_id`. -- On `PeerDisconnected(peer_id) +- On `PeerDisconnected(peer_id)` - Remove the entry for `peer_id` from the `peer_state` map. - On `PeerMessage(peer_id, bytes)` - If the bytes do not decode to a `NetworkMessage` or the `peer_id` has no entry in the `peer_state` map, report and ignore. - If this is `NetworkMessage::Awaiting(relay_parent, pov_hashes)`: - If there is no entry under `peer_state.awaited` for the `relay_parent`, report and ignore. - If `relay_parent` is not contained within `our_view`, report and ignore. - - Otherwise, if the `awaited` map combined with the `pov_hashes` would have more than `relay_parent_state[relay_parent].n_validators` entries, report and ignore. Note that we are leaning on the property of the network bridge that it sets our view based on `StartWork` messages. + - Otherwise, if the peer's `awaited` map combined with the `pov_hashes` would have more than ` 2 * relay_parent_state[relay_parent].n_validators` entries, report and ignore. Note that we are leaning on the property of the network bridge that it sets our view based on `activated` heads in `ActiveLeavesUpdate` signals. - For each new `pov_hash` in `pov_hashes`, if there is a `pov` under `pov_hash` in the `known` map, send the peer a `NetworkMessage::SendPoV(relay_parent, pov_hash, pov)`. - Otherwise, add the `pov_hash` to the `awaited` map - If this is `NetworkMessage::SendPoV(relay_parent, pov_hash, pov)`: - - If there is no entry under `relay_parent` in `relay_parent_state` or no entry under `pov_hash` in our `awaited` map for that `relay_parent`, report and ignore. + - If there is no entry under `relay_parent` in `relay_parent_state` or no entry under `pov_hash` in our `fetching` map for that `relay_parent`, report and ignore. - If the blake2-256 hash of the pov doesn't equal `pov_hash`, report and ignore. - - Complete and remove any listeners in the `fetching` map under `pov_hash`. + - Complete and remove any listeners in the `fetching` map under `pov_hash`. However, leave an empty set of listeners in the `fetching` map to denote that this was something we once awaited. This will allow us to recognize peers who have sent us something we were expecting, but just a little late. - Add to `known` map. + - Remove the `pov_hash` from the `peer.awaited` map, if any. - Send `NetworkMessage::SendPoV(relay_parent, descriptor.pov_hash, PoV)` to all peers who have the `descriptor.pov_hash` in the set under `relay_parent` in the `peer.awaited` map and remove the entry from `peer.awaited`. - On `PeerViewChange(peer_id, view)` - If Peer is unknown, ignore. - Ensure there is an entry under `relay_parent` for each `relay_parent` in `view` within the `peer.awaited` map, creating blank `awaited` lists as necessary. - Remove all entries under `peer.awaited` that are not within `view`. + - For all hashes in `view` but were not within the old, send the peer all the keys in our `fetching` map under the block-based state for that hash - i.e. notify the peer of everything we are awaiting at that hash. - On `OurViewChange(view)` - Update `our_view` to `view` diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/roadmap/implementers-guide/src/node/backing/statement-distribution.md new file mode 100644 index 0000000000000000000000000000000000000000..f5258b4155e78631c5bde46541ed20abc3eedc5a --- /dev/null +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -0,0 +1,72 @@ +# Statement Distribution + +The Statement Distribution Subsystem is responsible for distributing statements about seconded candidates between validators. + +## Protocol + +`PeerSet`: `Validation` + +Input: + +- NetworkBridgeUpdate(update) + +Output: + +- NetworkBridge::SendMessage(`[PeerId]`, message) +- NetworkBridge::ReportPeer(PeerId, cost_or_benefit) + +## Functionality + +Implemented as a gossip protocol. Handle updates to our view and peers' views. Neighbor packets are used to inform peers which chain heads we are interested in data for. + +Statement Distribution is the only backing subsystem which has any notion of peer nodes, who are any full nodes on the network. Validators will also act as peer nodes. + +It is responsible for distributing signed statements that we have generated and forwarding them, and for detecting a variety of Validator misbehaviors for reporting to [Misbehavior Arbitration](../utility/misbehavior-arbitration.md). During the Backing stage of the inclusion pipeline, it's the main point of contact with peer nodes. On receiving a signed statement from a peer, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. + +Track equivocating validators and stop accepting information from them. Establish a data-dependency order: + +- In order to receive a `Seconded` message we have the on corresponding chain head in our view +- In order to receive an `Invalid` or `Valid` message we must have received the corresponding `Seconded` message. + +And respect this data-dependency order from our peers by respecting their views. This subsystem is responsible for checking message signatures. + +The Statement Distribution subsystem sends statements to peer nodes. + +## Peer Receipt State Machine + +There is a very simple state machine which governs which messages we are willing to receive from peers. Not depicted in the state machine: on initial receipt of any [`SignedFullStatement`](../../types/backing.md#signed-statement-type), validate that the provided signature does in fact sign the included data. Note that each individual parablock candidate gets its own instance of this state machine; it is perfectly legal to receive a `Valid(X)` before a `Seconded(Y)`, as long as a `Seconded(X)` has been received. + +A: Initial State. Receive `SignedFullStatement(Statement::Second)`: extract `Statement`, forward to Candidate Backing and PoV Distribution, proceed to B. Receive any other `SignedFullStatement` variant: drop it. + +B: Receive any `SignedFullStatement`: check signature, forward to Candidate Backing. Receive `OverseerMessage::StopWork`: proceed to C. + +C: Receive any message for this block: drop it. + +## Peer Knowledge Tracking + +The peer receipt state machine implies that for parsimony of network resources, we should model the knowledge of our peers, and help them out. For example, let's consider a case with peers A, B, and C, validators X and Y, and candidate M. A sends us a `Statement::Second(M)` signed by X. We've double-checked it, and it's valid. While we're checking it, we receive a copy of X's `Statement::Second(M)` from `B`, along with a `Statement::Valid(M)` signed by Y. + +Our response to A is just the `Statement::Valid(M)` signed by Y. However, we haven't heard anything about this from C. Therefore, we send it everything we have: first a copy of X's `Statement::Second`, then Y's `Statement::Valid`. + +This system implies a certain level of duplication of messages--we received X's `Statement::Second` from both our peers, and C may experience the same--but it minimizes the degree to which messages are simply dropped. + +And respect this data-dependency order from our peers. This subsystem is responsible for checking message signatures. + +No jobs. We follow view changes from the [`NetworkBridge`](../utility/network-bridge.md), which in turn is updated by the overseer. + +## Equivocations and Flood Protection + +An equivocation is a double-vote by a validator. The [Candidate Backing](candidate-backing.md) Subsystem is better-suited than this one to detect equivocations as it adds votes to quorum trackers. + +At this level, we are primarily concerned about flood-protection, and to some extent, detecting equivocations is a part of that. In particular, we are interested in detecting equivocations of `Seconded` statements. Since every other statement is dependent on `Seconded` statements, ensuring that we only ever hold a bounded number of `Seconded` statements is sufficient for flood-protection. + +The simple approach is to say that we only receive up to two `Seconded` statements per validator per chain head. However, the marginal cost of equivocation, conditional on having already equivocated, is close to 0, since a single double-vote offence is counted as all double-vote offences for a particular chain-head. Even if it were not, there is some amount of equivocations that can be done such that the marginal cost of issuing further equivocations is close to 0, as there would be an amount of equivocations necessary to be completely and totally obliterated by the slashing algorithm. We fear the validator with nothing left to lose. + +With that in mind, this simple approach has a caveat worth digging deeper into. + +First: We may be aware of two equivocated `Seconded` statements issued by a validator. A totally honest peer of ours can also be aware of one or two different `Seconded` statements issued by the same validator. And yet another peer may be aware of one or two _more_ `Seconded` statements. And so on. This interacts badly with pre-emptive sending logic. Upon sending a `Seconded` statement to a peer, we will want to pre-emptively follow up with all statements relative to that candidate. Waiting for acknowledgement introduces latency at every hop, so that is best avoided. What can happen is that upon receipt of the `Seconded` statement, the peer will discard it as it falls beyond the bound of 2 that it is allowed to store. It cannot store anything in memory about discarded candidates as that would introduce a DoS vector. Then, the peer would receive from us all of the statements pertaining to that candidate, which, from its perspective, would be undesired - they are data-dependent on the `Seconded` statement we sent them, but they have erased all record of that from their memory. Upon receiving a potential flood of undesired statements, this 100% honest peer may choose to disconnect from us. In this way, an adversary may be able to partition the network with careful distribution of equivocated `Seconded` statements. + +The fix is to track, per-peer, the hashes of up to 4 candidates per validator (per relay-parent) that the peer is aware of. It is 4 because we may send them 2 and they may send us 2 different ones. We track the data that they are aware of as the union of things we have sent them and things they have sent us. If we receive a 1st or 2nd `Seconded` statement from a peer, we note it in the peer's known candidates even if we do disregard the data locally. And then, upon receipt of any data dependent on that statement, we do not reduce that peer's standing in our eyes, as the data was not undesired. + +There is another caveat to the fix: we don't want to allow the peer to flood us because it has set things up in a way that it knows we will drop all of its traffic. +We also track how many statements we have received per peer, per candidate, and per chain-head. This is any statement concerning a particular candidate: `Seconded`, `Valid`, or `Invalid`. If we ever receive a statement from a peer which would push any of these counters beyond twice the amount of validators at the chain-head, we begin to lower the peer's standing and eventually disconnect. This bound is a massive overestimate and could be reduced to twice the number of validators in the corresponding validator group. It is worth noting that the goal at the time of writing is to ensure any finite bound on the amount of stored data, as any equivocation results in a large slash. diff --git a/roadmap/implementors-guide/src/node/collators/README.md b/roadmap/implementers-guide/src/node/collators/README.md similarity index 100% rename from roadmap/implementors-guide/src/node/collators/README.md rename to roadmap/implementers-guide/src/node/collators/README.md diff --git a/roadmap/implementers-guide/src/node/collators/collation-generation.md b/roadmap/implementers-guide/src/node/collators/collation-generation.md new file mode 100644 index 0000000000000000000000000000000000000000..ab3f80273d60dce522cd764554f4e05c2c076be1 --- /dev/null +++ b/roadmap/implementers-guide/src/node/collators/collation-generation.md @@ -0,0 +1,51 @@ +# Collation Generation + +The collation generation subsystem is executed on collator nodes and produces candidates to be distributed to validators. If configured to produce collations for a para, it produces collations and then feeds them to the [Collator Protocol][CP] subsystem, which handles the networking. + +## Protocol + +Input: `CollationGenerationMessage` + +```rust +enum CollationGenerationMessage { + Initialize(CollationGenerationConfig), +} +``` + +No more than one initialization message should ever be sent to the collation generation subsystem. + +Output: `CollationDistributionMessage` + +## Functionality + +The process of generating a collation for a parachain is very parachain-specific. As such, the details of how to do so are left beyond the scope of this description. The subsystem should be implemented as an abstract wrapper, which is aware of this configuration: + +```rust +pub struct Collation { + /// Hash of `CandidateCommitments` as understood by the collator. + pub commitments_hash: Hash, + pub proof_of_validity: PoV, +} + +struct CollationGenerationConfig { + key: CollatorPair, + collator: Box Box>> + para_id: ParaId, +} +``` + +The configuration should be optional, to allow for the case where the node is not run with the capability to collate. + +On `ActiveLeavesUpdate`: + +* If there is no collation generation config, ignore. +* Otherwise, for each `activated` head in the update: + * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime API. + > TODO: figure out what to do in the case of occupied cores; see [this issue](https://github.com/paritytech/polkadot/issues/1573). + * Determine an occupied core assumption to make about the para. Scheduled cores can make `OccupiedCoreAssumption::Free`. + * Use the Runtime API subsystem to fetch the full validation data. + * Invoke the `collator`, and use its outputs to produce a `CandidateReceipt`, signed with the configuration's `key`. + * Dispatch a [`CollatorProtocolMessage`][CPM]`::DistributeCollation(receipt, pov)`. + +[CP]: collator-protocol.md +[CPM]: ../../types/overseer-protocol.md#collatorprotocolmessage diff --git a/roadmap/implementers-guide/src/node/collators/collator-protocol.md b/roadmap/implementers-guide/src/node/collators/collator-protocol.md new file mode 100644 index 0000000000000000000000000000000000000000..8863c3e23d71c57272688e5e2bcf6ec1de414a71 --- /dev/null +++ b/roadmap/implementers-guide/src/node/collators/collator-protocol.md @@ -0,0 +1,119 @@ +# Collator Protocol + +The Collator Protocol implements the network protocol by which collators and validators communicate. It is used by collators to distribute collations to validators and used by validators to accept collations by collators. + +Collator-to-Validator networking is more difficult than Validator-to-Validator networking because the set of possible collators for any given para is unbounded, unlike the validator set. Validator-to-Validator networking protocols can easily be implemented as gossip because the data can be bounded, and validators can authenticate each other by their `PeerId`s for the purposes of instantiating and accepting connections. + +Since, at least at the level of the para abstraction, the collator-set for any given para is unbounded, validators need to make sure that they are receiving connections from capable and honest collators and that their bandwidth and time are not being wasted by attackers. Communicating across this trust-boundary is the most difficult part of this subsystem. + +Validation of candidates is a heavy task, and furthermore, the [`PoV`][PoV] itself is a large piece of data. Empirically, `PoV`s are on the order of 10MB. + +> TODO: note the incremental validation function Ximin proposes at https://github.com/paritytech/polkadot/issues/1348 + +As this network protocol serves as a bridge between collators and validators, it communicates primarily with one subsystem on behalf of each. As a collator, this will receive messages from the [`CollationGeneration`][CG] subsystem. As a validator, this will communicate with the [`CandidateBacking`][CB] subsystem. + +## Protocol + +Input: [`CollatorProtocolMessage`][CPM] + +Output: + - [`RuntimeApiMessage`][RAM] + - [`NetworkBridgeMessage`][NBM] + +## Functionality + +This network protocol uses the `Collation` peer-set of the [`NetworkBridge`][NB]. + +It uses the [`CollatorProtocolV1Message`](../../types/network.md#collator-protocol) as its `WireMessage` + +Since this protocol functions both for validators and collators, it is easiest to go through the protocol actions for each of them separately. + +Validators and collators. +```dot process +digraph { + c1 [shape=MSquare, label="Collator 1"]; + c2 [shape=MSquare, label="Collator 2"]; + + v1 [shape=MSquare, label="Validator 1"]; + v2 [shape=MSquare, label="Validator 2"]; + + c1 -> v1; + c1 -> v2; + c2 -> v2; +} +``` + +### Collators + +It is assumed that collators are only collating on a single parachain. Collations are generated by the [Collation Generation][CG] subsystem. We will keep up to one local collation per relay-parent, based on `DistributeCollation` messages. If the para is not scheduled or next up on any core, at the relay-parent, or the relay-parent isn't in the active-leaves set, we ignore the message as it must be invalid in that case - although this indicates a logic error elsewhere in the node. + +We keep track of the Para ID we are collating on as a collator. This starts as `None`, and is updated with each `CollateOn` message received. If the `ParaId` of a collation requested to be distributed does not match the one we expect, we ignore the message. + +As with most other subsystems, we track the active leaves set by following `ActiveLeavesUpdate` signals. + +For the purposes of actually distributing a collation, we need to be connected to the validators who are interested in collations on that `ParaId` at this point in time. We assume that there is a discovery API for connecting to a set of validators. + +> TODO: design & expose the discovery API not just for connecting to such peers but also to determine which of our current peers are validators. + +As seen in the [Scheduler Module][SCH] of the runtime, validator groups are fixed for an entire session and their rotations across cores are predictable. Collators will want to do these things when attempting to distribute collations at a given relay-parent: + * Determine which core the para collated-on is assigned to. + * Determine the group on that core and the next group on that core. + * Issue a discovery request for the validators of the current group and the next group with[`NetworkBridgeMessage`][NBM]`::ConnectToValidators`. + +Once connected to the relevant peers for the current group assigned to the core (transitively, the para), advertise the collation to any of them which advertise the relay-parent in their view (as provided by the [Network Bridge][NB]). If any respond with a request for the full collation, provide it. Upon receiving a view update from any of these peers which includes a relay-parent for which we have a collation that they will find relevant, advertise the collation to them if we haven't already. + +### Validators + +On the validator side of the protocol, validators need to accept incoming connections from collators. They should keep some peer slots open for accepting new speculative connections from collators and should disconnect from collators who are not relevant. + +```dot process +digraph G { + label = "Declaring, advertising, and providing collations"; + labelloc = "t"; + rankdir = LR; + + subgraph cluster_collator { + rank = min; + label = "Collator"; + graph[style = border, rank = min]; + + c1, c2 [label = ""]; + } + + subgraph cluster_validator { + rank = same; + label = "Validator"; + graph[style = border]; + + v1, v2 [label = ""]; + } + + c1 -> v1 [label = "Declare and advertise"]; + + v1 -> c2 [label = "Request"]; + + c2 -> v2 [label = "Provide"]; + + v2 -> v2 [label = "Note Good/Bad"]; +} +``` + +When peers connect to us, they can `Declare` that they represent a collator with given public key. Once they've declared that, they can begin to send advertisements of collations. The peers should not send us any advertisements for collations that are on a relay-parent outside of our view. + +The protocol tracks advertisements received and the source of the advertisement. The advertisement source is the `PeerId` of the peer who sent the message. We accept one advertisement per collator per source per relay-parent. + +As a validator, we will handle requests from other subsystems to fetch a collation on a specific `ParaId` and relay-parent. These requests are made with the [`CollatorProtocolMessage`][CPM]`::FetchCollation`. To do so, we need to first check if we have already gathered a collation on that `ParaId` and relay-parent. If not, we need to select one of the advertisements and issue a request for it. If we've already issued a request, we shouldn't issue another one until the first has returned. + +When acting on an advertisement, we issue a `WireMessage::RequestCollation`. If the request times out, we need to note the collator as being unreliable and reduce its priority relative to other collators. And then make another request - repeat until we get a response or the chain has moved on. + +As a validator, once the collation has been fetched some other subsystem will inspect and do deeper validation of the collation. The subsystem will report to this subsystem with a [`CollatorProtocolMessage`][CPM]`::ReportCollator` or `NoteGoodCollation` message. In that case, if we are connected directly to the collator, we apply a cost to the `PeerId` associated with the collator and potentially disconnect or blacklist it. + +[PoV]: ../../types/availability.md#proofofvalidity +[CPM]: ../../types/overseer-protocol.md#collatorprotocolmessage +[CG]: collation-generation.md +[CB]: ../backing/candidate-backing.md +[NB]: ../utility/network-bridge.md +[CBM]: ../../types/overseer-protocol.md#candidatebackingmesage +[RAM]: ../../types/overseer-protocol.md#runtimeapimessage +[NBM]: ../../types/overseer-protocol.md#networkbridgemessage +[SCH]: ../../runtime/scheduler.md diff --git a/roadmap/implementors-guide/src/node/overseer.md b/roadmap/implementers-guide/src/node/overseer.md similarity index 76% rename from roadmap/implementors-guide/src/node/overseer.md rename to roadmap/implementers-guide/src/node/overseer.md index 27c7c7ebb44d5b298b8af3b11b731bb9df7a949d..0466185430aa1bf750ce1643ee8b08b17095594e 100644 --- a/roadmap/implementors-guide/src/node/overseer.md +++ b/roadmap/implementers-guide/src/node/overseer.md @@ -24,7 +24,7 @@ The hierarchy of subsystems: ``` -The overseer determines work to do based on block import events and block finalization events. It does this by keeping track of the set of relay-parents for which work is currently being done. This is known as the "active leaves" set. It determines an initial set of active leaves on startup based on the data on-disk, and uses events about blockchain import to update the active leaves. Updates lead to [`OverseerSignal`](../types/overseer-protocol.md#overseer-signal)`::StartWork` and [`OverseerSignal`](../types/overseer-protocol.md#overseer-signal)`::StopWork` being sent according to new relay-parents, as well as relay-parents to stop considering. Block import events inform the overseer of leaves that no longer need to be built on, now that they have children, and inform us to begin building on those children. Block finalization events inform us when we can stop focusing on blocks that appear to have been orphaned. +The overseer determines work to do based on block import events and block finalization events. It does this by keeping track of the set of relay-parents for which work is currently being done. This is known as the "active leaves" set. It determines an initial set of active leaves on startup based on the data on-disk, and uses events about blockchain import to update the active leaves. Updates lead to [`OverseerSignal`](../types/overseer-protocol.md#overseer-signal)`::ActiveLeavesUpdate` being sent according to new relay-parents, as well as relay-parents to stop considering. Block import events inform the overseer of leaves that no longer need to be built on, now that they have children, and inform us to begin building on those children. Block finalization events inform us when we can stop focusing on blocks that appear to have been orphaned. The overseer's logic can be described with these functions: @@ -32,15 +32,14 @@ The overseer's logic can be described with these functions: * Start all subsystems * Determine all blocks of the blockchain that should be built on. This should typically be the head of the best fork of the chain we are aware of. Sometimes add recent forks as well. -* For each of these blocks, send an `OverseerSignal::StartWork` to all subsystems. +* Send an `OverseerSignal::ActiveLeavesUpdate` to all subsystems with `activated` containing each of these blocks. * Begin listening for block import and finality events ## On Block Import Event * Apply the block import event to the active leaves. A new block should lead to its addition to the active leaves set and its parent being deactivated. -* For any deactivated leaves send an `OverseerSignal::StopWork` message to all subsystems. -* For any activated leaves send an `OverseerSignal::StartWork` message to all subsystems. -* Ensure all `StartWork` messages are flushed before resuming activity as a message router. +* Send an `OverseerSignal::ActiveLeavesUpdate` message to all subsystems containing all activated and deactivated leaves. +* Ensure all `ActiveLeavesUpdate` messages are flushed before resuming activity as a message router. > TODO: in the future, we may want to avoid building on too many sibling blocks at once. the notion of a "preferred head" among many competing sibling blocks would imply changes in our "active leaves" update rules here @@ -48,7 +47,7 @@ The overseer's logic can be described with these functions: * Note the height `h` of the newly finalized block `B`. * Prune all leaves from the active leaves which have height `<= h` and are not `B`. -* Issue `OverseerSignal::StopWork` for all deactivated leaves. +* Issue `OverseerSignal::ActiveLeavesUpdate` containing all deactivated leaves. ## On Subsystem Failure @@ -78,15 +77,19 @@ When a subsystem wants to communicate with another subsystem, or, more typically First, the subsystem that spawned a job is responsible for handling the first step of the communication. The overseer is not aware of the hierarchy of tasks within any given subsystem and is only responsible for subsystem-to-subsystem communication. So the sending subsystem must pass on the message via the overseer to the receiving subsystem, in such a way that the receiving subsystem can further address the communication to one of its internal tasks, if necessary. -This communication prevents a certain class of race conditions. When the Overseer determines that it is time for subsystems to begin working on top of a particular relay-parent, it will dispatch a `StartWork` message to all subsystems to do so, and those messages will be handled asynchronously by those subsystems. Some subsystems will receive those messsages before others, and it is important that a message sent by subsystem A after receiving `StartWork` message will arrive at subsystem B after its `StartWork` message. If subsystem A maintaned an independent channel with subsystem B to communicate, it would be possible for subsystem B to handle the side message before the `StartWork` message, but it wouldn't have any logical course of action to take with the side message - leading to it being discarded or improperly handled. Well-architectured state machines should have a single source of inputs, so that is what we do here. +This communication prevents a certain class of race conditions. When the Overseer determines that it is time for subsystems to begin working on top of a particular relay-parent, it will dispatch a `ActiveLeavesUpdate` message to all subsystems to do so, and those messages will be handled asynchronously by those subsystems. Some subsystems will receive those messsages before others, and it is important that a message sent by subsystem A after receiving `ActiveLeavesUpdate` message will arrive at subsystem B after its `ActiveLeavesUpdate` message. If subsystem A maintaned an independent channel with subsystem B to communicate, it would be possible for subsystem B to handle the side message before the `ActiveLeavesUpdate` message, but it wouldn't have any logical course of action to take with the side message - leading to it being discarded or improperly handled. Well-architectured state machines should have a single source of inputs, so that is what we do here. -One exception is reasonable to make for responses to requests. A request should be made via the overseer in order to ensure that it arrives after any relevant `StartWork` message. A subsystem issuing a request as a result of a `StartWork` message can safely receive the response via a side-channel for two reasons: +One exception is reasonable to make for responses to requests. A request should be made via the overseer in order to ensure that it arrives after any relevant `ActiveLeavesUpdate` message. A subsystem issuing a request as a result of a `ActiveLeavesUpdate` message can safely receive the response via a side-channel for two reasons: 1. It's impossible for a request to be answered before it arrives, it is provable that any response to a request obeys the same ordering constraint. -1. The request was sent as a result of handling a `StartWork` message. Then there is no possible future in which the `StartWork` message has not been handled upon the receipt of the response. +1. The request was sent as a result of handling a `ActiveLeavesUpdate` message. Then there is no possible future in which the `ActiveLeavesUpdate` message has not been handled upon the receipt of the response. So as a single exception to the rule that all communication must happen via the overseer we allow the receipt of responses to requests via a side-channel, which may be established for that purpose. This simplifies any cases where the outside world desires to make a request to a subsystem, as the outside world can then establish a side-channel to receive the response on. It's important to note that the overseer is not aware of the internals of subsystems, and this extends to the jobs that they spawn. The overseer isn't aware of the existence or definition of those jobs, and is only aware of the outer subsystems with which it interacts. This gives subsystem implementations leeway to define internal jobs as they see fit, and to wrap a more complex hierarchy of state machines than having a single layer of jobs for relay-parent-based work. Likewise, subsystems aren't required to spawn jobs. Certain types of subsystems, such as those for shared storage or networking resources, won't perform block-based work but would still benefit from being on the Overseer's message bus. These subsystems can just ignore the overseer's signals for block-based work. Furthermore, the protocols by which subsystems communicate with each other should be well-defined irrespective of the implementation of the subsystem. In other words, their interface should be distinct from their implementation. This will prevent subsystems from accessing aspects of each other that are beyond the scope of the communication boundary. + +## On shutdown + +Send an `OverseerSignal::Conclude` message to each subsystem and wait some time for them to conclude before hard-exiting. diff --git a/roadmap/implementors-guide/src/node/subsystems-and-jobs.md b/roadmap/implementers-guide/src/node/subsystems-and-jobs.md similarity index 86% rename from roadmap/implementors-guide/src/node/subsystems-and-jobs.md rename to roadmap/implementers-guide/src/node/subsystems-and-jobs.md index a9a65b3c43d2b95f58c574234f3f9f9866c6eb3e..8ca504f080f7cde4304dfe69c879c846b2eb7187 100644 --- a/roadmap/implementors-guide/src/node/subsystems-and-jobs.md +++ b/roadmap/implementers-guide/src/node/subsystems-and-jobs.md @@ -9,3 +9,5 @@ Most work that happens on the Node-side is related to building on top of a speci Since this goal of determining when to start and conclude work relative to a specific relay-parent is common to most, if not all subsystems, it is logically the job of the Overseer to distribute those signals as opposed to each subsystem duplicating that effort, potentially being out of synchronization with each other. Subsystem A should be able to expect that subsystem B is working on the same relay-parents as it is. One of the Overseer's tasks is to provide this heartbeat, or synchronized rhythm, to the system. The work that subsystems spawn to be done on a specific relay-parent is known as a job. Subsystems should set up and tear down jobs according to the signals received from the overseer. Subsystems may share or cache state between jobs. + +Subsystems must be robust to spurious exits. The outputs of the set of subsystems as a whole comprises of signed messages and data committed to disk. Care must be taken to avoid issuing messages that are not substantiated. Since subsystems need to be safe under spurious exits, it is the expected behavior that an `OverseerSignal::Conclude` can just lead to breaking the loop and exiting directly as opposed to waiting for everything to shut down gracefully. diff --git a/roadmap/implementors-guide/src/node/utility/README.md b/roadmap/implementers-guide/src/node/utility/README.md similarity index 100% rename from roadmap/implementors-guide/src/node/utility/README.md rename to roadmap/implementers-guide/src/node/utility/README.md diff --git a/roadmap/implementers-guide/src/node/utility/availability-store.md b/roadmap/implementers-guide/src/node/utility/availability-store.md new file mode 100644 index 0000000000000000000000000000000000000000..f8d6f1b67a3688ce0d555c002ca322516e0e9c06 --- /dev/null +++ b/roadmap/implementers-guide/src/node/utility/availability-store.md @@ -0,0 +1,201 @@ +# Availability Store + +This is a utility subsystem responsible for keeping available certain data and pruning that data. + +The two data types: + +- Full PoV blocks of candidates we have validated +- Availability chunks of candidates that were backed and noted available on-chain. + +For each of these data we have pruning rules that determine how long we need to keep that data available. + +PoV hypothetically only need to be kept around until the block where the data was made fully available is finalized. However, disputes can revert finality, so we need to be a bit more conservative. We should keep the PoV until a block that finalized availability of it has been finalized for 1 day. + +> TODO: arbitrary, but extracting `acceptance_period` is kind of hard here... + +Availability chunks need to be kept available until the dispute period for the corresponding candidate has ended. We can accomplish this by using the same criterion as the above, plus a delay. This gives us a pruning condition of the block finalizing availability of the chunk being final for 1 day + 1 hour. + +> TODO: again, concrete acceptance-period would be nicer here, but complicates things + +There is also the case where a validator commits to make a PoV available, but the corresponding candidate is never backed. In this case, we keep the PoV available for 1 hour. + +> TODO: ideally would be an upper bound on how far back contextual execution is OK. + +There may be multiple competing blocks all ending the availability phase for a particular candidate. Until (and slightly beyond) finality, it will be unclear which of those is actually the canonical chain, so the pruning records for PoVs and Availability chunks should keep track of all such blocks. + +## Lifetime of the PoV in the storage + +```dot process +digraph { + label = "Block life FSM\n\n\n"; + labelloc = "t"; + rankdir="LR"; + + st [label = "Stored"; shape = circle] + inc [label = "Included"; shape = circle] + fin [label = "Finalized"; shape = circle] + prn [label = "Pruned"; shape = circle] + + st -> inc [label = "Block\nincluded"] + st -> prn [label = "Stored block\ntimed out"] + inc -> fin [label = "Block\nfinalized"] + fin -> prn [label = "Block keep time\n(1 day) elapsed"] +} +``` + +## Lifetime of the chunk in the storage + +```dot process +digraph { + label = "Chunk life FSM\n\n\n"; + labelloc = "t"; + rankdir="LR"; + + chst [label = "Chunk\nStored"; shape = circle] + st [label = "Block\nStored"; shape = circle] + inc [label = "Included"; shape = circle] + fin [label = "Finalized"; shape = circle] + prn [label = "Pruned"; shape = circle] + + chst -> inc [label = "Block\nincluded"] + st -> inc [label = "Block\nincluded"] + st -> prn [label = "Stored block\ntimed out"] + inc -> fin [label = "Block\nfinalized"] + fin -> prn [label = "Block keep time\n(1 day + 1 hour) elapsed"] +} +``` + +## Protocol + +Input: [`AvailabilityStoreMessage`][ASM] + +Output: +- [`RuntimeApiMessage`][RAM] + +## Functionality + +On `ActiveLeavesUpdate`: + +For each head in the `activated` list: + - Note any new candidates backed in the block. Update pruning records for any stored `PoVBlock`s. + - Note any newly-included candidates backed in the block. Update pruning records for any stored availability chunks. + +On `OverseerSignal::BlockFinalized(_)` events: + +- Handle all pruning based on the newly-finalized block. + +On `QueryPoV` message: + +- Return the PoV block, if any, for that candidate hash. + +On `QueryChunk` message: + +- Determine if we have the chunk indicated by the parameters and return it and its inclusion proof via the response channel if so. + +On `StoreChunk` message: + +- Store the chunk along with its inclusion proof under the candidate hash and validator index. + +On `StorePoV` message: + +- Store the block, if the validator index is provided, store the respective chunk as well. + +On finality event: + +- For the finalized block and any earlier block (if any) update pruning records of `PoV`s and chunks to keep them for respective periods after finality. + +### Note any backed, included and timedout candidates in the block by `hash`. + +- Create a `(sender, receiver)` pair. +- Dispatch a [`RuntimeApiMessage`][RAM]`::Request(hash, RuntimeApiRequest::CandidateEvents(sender)` and listen on the receiver for a response. +- For every event in the response:`CandidateEvent::CandidateIncluded`. + * For every `CandidateEvent::CandidateBacked` do nothing + * For every `CandidateEvent::CandidateIncluded` update pruning records of any blocks that the node stored previously. + * For every `CandidateEvent::CandidateTimedOut` use pruning records to prune the data; delete the info from records. + +## Schema + +### PoV pruning + +We keep a record about every PoV we keep, tracking its state and the time after which this PoV should be pruned. + +As the state of the `Candidate` changes, so does the `Prune At` time according to the rules defined earlier. + +| Record 1 | .. | Record N | +|----------------|----|----------------| +| CandidateHash1 | .. | CandidateHashN | +| Prune At | .. | Prune At | +| CandidateState | .. | CandidateState | + +### Chunk pruning + +Chunk pruning is organized in a similar schema as PoV pruning. + +| Record 1 | .. | Record N | +|----------------|----|----------------| +| CandidateHash1 | .. | CandidateHashN | +| Prune At | .. | Prune At | +| CandidateState | .. | CandidateState | + +### Included blocks caching + +In order to process finality events correctly we need to cache the set of parablocks included into each relay block beginning with the last finalized block and up to the most recent heads. We have to cache this data since we are only able to query this info from the state for the `k` last blocks where `k` is a relatively small number (for more info see `Assumptions`) + +These are used to update Chunk pruning and PoV pruning records upon finality: +When another block finality notification is received: + - For any record older than this block: + - Update pruning + - Remove the record + +| Relay Block N | .. | Chain Head 1 | Chain Head 2 | +|---------------|----|--------------|--------------| +| CandidateN_1 Included | .. | Candidate1_1 Included | Candidate2_1 Included | +| CandidateN_2 Included | .. | Candidate1_2 Included | Candidete2_2 Included | +| .. | .. | .. | .. | +| CandidateN_M Included | .. | Candidate1_K Included | Candidate2_L Included | + +> TODO: It's likely we will have to have a way to go from block hash to `BlockNumber` to make this work. + +### Blocks + +Blocks are simply stored as `(Hash, AvailableData)` key-value pairs. + +### Chunks + +Chunks are stored as `(Hash, Vec)` key-value pairs. + +## Basic scenarios to test + +Basically we need to test the correctness of data flow through state FSMs described earlier. These tests obviously assume that some mocking of time is happening. + +- Stored data that is never included pruned in necessary timeout + - A block (and/or a chunk) is added to the store. + - We never note that the respective candidate is included. + - Until a defined timeout the data in question is available. + - After this timeout the data is no longer available. + +- Stored data is kept until we are certain it is finalized. + - A block (and/or a chunk) is added to the store. + - It is available. + - Before the inclusion timeout expires notify storage that the candidate was included. + - The data is still available. + - Wait for an absurd amount of time (longer than 1 day). + - Check that the data is still available. + - Send finality notification about the block in question. + - Wait for some time below finalized data timeout. + - The data is still available. + - Wait until the data should have been pruned. + - The data is no longer available. + +- Forkfulness of the relay chain is taken into account + - Block `B1` is added to the store. + - Block `B2` is added to the store. + - Notify the subsystem that both `B1` and `B2` were included in different leafs of relay chain. + - Notify the subsystem that the leaf with `B1` was finalized. + - Leaf with `B2` is never finalized. + - Leaf with `B2` is pruned and its data is no longer available. + - Wait until the finalized data of `B1` should have been pruned. + - `B1` is no longer available. + +[RAM]: ../../types/overseer-protocol.md#runtime-api-message +[ASM]: ../../types/overseer-protocol.md#availability-store-message diff --git a/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/roadmap/implementers-guide/src/node/utility/candidate-validation.md new file mode 100644 index 0000000000000000000000000000000000000000..19108bd78e811f48bb42d714595b1e06de4ec58b --- /dev/null +++ b/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -0,0 +1,48 @@ +# Candidate Validation + +This subsystem is responsible for handling candidate validation requests. It is a simple request/response server. + +A variety of subsystems want to know if a parachain block candidate is valid. None of them care about the detailed mechanics of how a candidate gets validated, just the results. This subsystem handles those details. + +## Protocol + +Input: [`CandidateValidationMessage`](../../types/overseer-protocol.md#validation-request-type) + +Output: Validation result via the provided response side-channel. + +## Functionality + +This subsystem answers two types of requests: one which draws out validation data from the state, and another which accepts all validation data exhaustively. The goal of both request types is to validate a candidate. There are three possible outputs of validation: either the candidate is valid, the candidate is invalid, or an internal error occurred. Whatever the end result is, it will be returned on the response channel to the requestor. + +Parachain candidates are validated against their validation function: A piece of Wasm code that is describes the state-transition of the parachain. Validation function execution is not metered. This means that an execution which is an infinite loop or simply takes too long must be forcibly exited by some other means. For this reason, we recommend dispatching candidate validation to be done on subprocesses which can be killed if they time-out. + +Upon receiving a validation request, the first thing the candidate validation subsystem should do is make sure it has all the necessary parameters to the validation function. These are: + * The Validation Function itself. + * The [`CandidateDescriptor`](../../types/candidate.md#candidatedescriptor). + * The [`ValidationData`](../../types/candidate.md#validationdata). + * The [`PoV`](../../types/availability.md#proofofvalidity). + +### Determining Parameters + +For a [`CandidateValidationMessage`][CVM]`::ValidateFromExhaustive`, these parameters are exhaustively provided. The [`TransientValidationData`](../../types/candidate.md#transientvalidationdata) is optional, and is used to perform further checks on the outputs of validation. + +For a [`CandidateValidationMessage`][CVM]`::ValidateFromChainState`, some more work needs to be done. Due to the uncertainty of Availability Cores (implemented in the [`Scheduler`](../../runtime/scheduler.md) module of the runtime), a candidate at a particular relay-parent and for a particular para may have two different valid validation-data to be executed under depending on what is assumed to happen if the para is occupying a core at the onset of the new block. This is encoded as an `OccupiedCoreAssumption` in the runtime API. + +The way that we can determine which assumption the candidate is meant to be executed under is simply to do an exhaustive check of both possibilities based on the state of the relay-parent. First we fetch the validation data under the assumption that the block occupying becomes available. If the `validation_data_hash` of the `CandidateDescriptor` matches this validation data, we use that. Otherwise, if the `validation_data_hash` matches the validation data fetched under the `TimedOut` assumption, we use that. Otherwise, we return a `ValidationResult::Invalid` response and conclude. + +Then, we can fetch the validation code from the runtime based on which type of candidate this is. This gives us all the parameters. The descriptor and PoV come from the request itself, and the other parameters have been derived from the state. + +> TODO: This would be a great place for caching to avoid making lots of runtime requests. That would need a job, though. + +### Execution of the Parachain Wasm + +Once we have all parameters, we can spin up a background task to perform the validation in a way that doesn't hold up the entire event loop. Before invoking the validation function itself, this should first do some basic checks: + * The collator signature is valid + * The PoV provided matches the `pov_hash` field of the descriptor + +After that, we can invoke the validation function. Lastly, if available, we do some final checks on the output using the `TransientValidationData`: + * The produced head-data is no larger than the maximum allowed. + * The produced code upgrade, if any, is no larger than the maximum allowed, and a code upgrade was allowed to be signaled. + * The amount and size of produced upward messages is not too large. + +[CVM]: ../../types/overseer-protocol.md#validationrequesttype diff --git a/roadmap/implementers-guide/src/node/utility/chain-api.md b/roadmap/implementers-guide/src/node/utility/chain-api.md new file mode 100644 index 0000000000000000000000000000000000000000..6469db262ab5fa812ae7e8998f427bef2fa5c512 --- /dev/null +++ b/roadmap/implementers-guide/src/node/utility/chain-api.md @@ -0,0 +1,19 @@ +# Chain API + +The Chain API subsystem is responsible for providing a single point of access to chain state data via a set of pre-determined queries. + +## Protocol + +Input: [`ChainApiMessage`](../../types/overseer-protocol.md#chain-api-message) + +Output: None + +## Functionality + +On receipt of `ChainApiMessage`, answer the request and provide the response to the side-channel embedded within the request. + +Currently, the following requests are supported: +* Block hash to number +* Finalized block number to hash +* Last finalized block number +* Ancestors diff --git a/roadmap/implementors-guide/src/node/utility/misbehavior-arbitration.md b/roadmap/implementers-guide/src/node/utility/misbehavior-arbitration.md similarity index 100% rename from roadmap/implementors-guide/src/node/utility/misbehavior-arbitration.md rename to roadmap/implementers-guide/src/node/utility/misbehavior-arbitration.md diff --git a/roadmap/implementers-guide/src/node/utility/network-bridge.md b/roadmap/implementers-guide/src/node/utility/network-bridge.md new file mode 100644 index 0000000000000000000000000000000000000000..ef04090629f177b21e56a1e1e5bc24e5fbc82079 --- /dev/null +++ b/roadmap/implementers-guide/src/node/utility/network-bridge.md @@ -0,0 +1,123 @@ +# Network Bridge + +One of the main features of the overseer/subsystem duality is to avoid shared ownership of resources and to communicate via message-passing. However, implementing each networking subsystem as its own network protocol brings a fair share of challenges. + +The most notable challenge is coordinating and eliminating race conditions of peer connection and disconnection events. If we have many network protocols that peers are supposed to be connected on, it is difficult to enforce that a peer is indeed connected on all of them or the order in which those protocols receive notifications that peers have connected. This becomes especially difficult when attempting to share peer state across protocols. All of the Parachain-Host's gossip protocols eliminate DoS with a data-dependency on current chain heads. However, it is inefficient and confusing to implement the logic for tracking our current chain heads as well as our peers' on each of those subsystems. Having one subsystem for tracking this shared state and distributing it to the others is an improvement in architecture and efficiency. + +One other piece of shared state to track is peer reputation. When peers are found to have provided value or cost, we adjust their reputation accordingly. + +So in short, this Subsystem acts as a bridge between an actual network component and a subsystem's protocol. The implementation of the underlying network component is beyond the scope of this module. We make certain assumptions about the network component: + * The network allows registering of protocols and multiple versions of each protocol. + * The network handles version negotiation of protocols with peers and only connects the peer on the highest version of the protocol. + * Each protocol has its own peer-set, although there may be some overlap. + * The network provides peer-set management utilities for discovering the peer-IDs of validators and a means of dialing peers with given IDs. + + +The network bridge makes use of the peer-set feature, but is not generic over peer-set. Instead, it exposes two peer-sets that event producers can attach to: `Validation` and `Collation`. More information can be found on the documentation of the [`NetworkBridgeMessage`][NBM]. + +## Protocol + +Input: [`NetworkBridgeMessage`][NBM] + + +Output: + - [`AvailabilityDistributionMessage`][AvD]`::NetworkBridgeUpdateV1` + - [`BitfieldDistributionMessage`][BitD]`::NetworkBridgeUpdateV1` + - [`PoVDistributionMessage`][PoVD]`::NetworkBridgeUpdateV1` + - [`StatementDistributionMessage`][StmtD]`::NetworkBridgeUpdateV1` + - [`CollatorProtocolMessage`][CollP]`::NetworkBridgeUpdateV1` + +## Functionality + +This network bridge sends messages of these types over the network. + +```rust +enum ProtocolMessage { + ProtocolMessage(M), + ViewUpdate(View), +} +``` + +and instantiates this type twice, once using the [`ValidationProtocolV1`][VP1] message type, and once with the [`CollationProtocolV1`][CP1] message type. + +```rust +type ValidationV1Message = ProtocolMessage; +type CollationV1Message = ProtocolMessage; +``` + +### Startup + +On startup, we register two protocols with the underlying network utility. One for validation and one for collation. We register only version 1 of each of these protocols. + +### Main Loop + +The bulk of the work done by this subsystem is in responding to network events, signals from the overseer, and messages from other subsystems. + +Each network event is associated with a particular peer-set. + +### Overseer Signal: ActiveLeavesUpdate + +The `activated` and `deactivated` lists determine the evolution of our local view over time. A `ProtocolMessage::ViewUpdate` is issued to each connected peer on each peer-set, and a `NetworkBridgeEvent::OurViewChange` is issued to each event handler for each protocol. + +If we are connected to the same peer on both peer-sets, we will send the peer two view updates as a result. + +### Network Event: Peer Connected + +Issue a `NetworkBridgeEvent::PeerConnected` for each [Event Handler](#event-handlers) of the peer-set and negotiated protocol version of the peer. + +### Network Event: Peer Disconnected + +Issue a `NetworkBridgeEvent::PeerDisconnected` for each [Event Handler](#event-handlers) of the peer-set and negotiated protocol version of the peer. + +### Network Event: ProtocolMessage + +Map the message onto the corresponding [Event Handler](#event-handlers) based on the peer-set this message was received on and dispatch via overseer. + +### Network Event: ViewUpdate + +- Check that the new view is valid and note it as the most recent view update of the peer on this peer-set. +- Map a `NetworkBridgeEvent::PeerViewChange` onto the corresponding [Event Handler](#event-handlers) based on the peer-set this message was received on and dispatch via overseer. + +### ReportPeer + +- Adjust peer reputation according to cost or benefit provided + +### SendValidationMessage + +- Issue a corresponding `ProtocolMessage` to each listed peer on the validation peer-set. + +### SendCollationMessage + +- Issue a corresponding `ProtocolMessage` to each listed peer on the collation peer-set. + +### ConnectToValidators + +- Determine the DHT keys to use for each validator based on the relay-chain state and Runtime API. +- Recover the Peer IDs of the validators from the DHT. There may be more than one peer ID per validator. +- Accumulate all `(ValidatorId, PeerId)` pairs and send on the response channel. +- Feed all Peer IDs to peer set manager the underlying network provides, indicating the expected peer-set. + +## Event Handlers + +Network bridge event handlers are the intended recipients of particular network protocol messages. These are each a variant of a message to be sent via the overseer. + +### Validation V1 + +* `StatementDistributionV1Message -> StatementDistributionMessage::NetworkBridgeUpdateV1` +* `PoVDistributionV1Message -> PoVDistributionMessage::NetworkBridgeUpdateV1` +* `AvailabilityDistributionV1Message -> AvailabilityDistributionMessage::NetworkBridgeUpdateV1` +* `BitfieldDistributionV1Message -> BitfieldDistributionMessage::NetworkBridgeUpdateV1` + +### Collation V1 + +* `CollatorProtocolV1Message -> CollatorProtocolMessage::NetworkBridgeUpdateV1` + +[NBM]: ../../types/overseer-protocol.md#network-bridge-message +[AvD]: ../../types/overseer-protocol.md#availability-distribution-message +[BitD]: ../../types/overseer-protocol.md#bitfield-distribution-message +[PoVD]: ../../types/overseer-protocol.md#pov-distribution-message +[StmtD]: ../../types/overseer-protocol.md#statement-distribution-message +[CollP]: ../../types/overseer-protocol.md#collator-protocol-message + +[VP1]: ../../types/network.md#validation-v1 +[CP1]: ../../types/network.md#collation-v1 diff --git a/roadmap/implementors-guide/src/node/utility/peer-set-manager.md b/roadmap/implementers-guide/src/node/utility/peer-set-manager.md similarity index 100% rename from roadmap/implementors-guide/src/node/utility/peer-set-manager.md rename to roadmap/implementers-guide/src/node/utility/peer-set-manager.md diff --git a/roadmap/implementers-guide/src/node/utility/provisioner.md b/roadmap/implementers-guide/src/node/utility/provisioner.md new file mode 100644 index 0000000000000000000000000000000000000000..ec417e5daa03f918d4707d8fb4f2e5432f9df1f1 --- /dev/null +++ b/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -0,0 +1,113 @@ +# Provisioner + +Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all potential block authors. + +A major feature of the provisioner: this subsystem is responsible for ensuring that parachain block candidates are sufficiently available before sending them to potential block authors. + +## Provisionable Data + +There are several distinct types of provisionable data, but they share this property in common: all should eventually be included in a relay chain block. + +### Backed Candidates + +The block author can choose 0 or 1 backed parachain candidates per parachain; the only constraint is that each backed candidate has the appropriate relay parent. However, the choice of a backed candidate must be the block author's; the provisioner must ensure that block authors are aware of all available [`BackedCandidate`s](../../types/backing.md#backed-candidate). + +### Signed Bitfields + +[Signed bitfields](../../types/availability.md#signed-availability-bitfield) are attestations from a particular validator about which candidates it believes are available. + +### Misbehavior Reports + +Misbehavior reports are self-contained proofs of misbehavior by a validator or group of validators. For example, it is very easy to verify a double-voting misbehavior report: the report contains two votes signed by the same key, advocating different outcomes. Concretely, misbehavior reports become inherents which cause dots to be slashed. + +Note that there is no mechanism in place which forces a block author to include a misbehavior report which it doesn't like, for example if it would be slashed by such a report. The chain's defense against this is to have a relatively long slash period, such that it's likely to encounter an honest author before the slash period expires. + +### Dispute Inherent + +The dispute inherent is similar to a misbehavior report in that it is an attestation of misbehavior on the part of a validator or group of validators. Unlike a misbehavior report, it is not self-contained: resolution requires coordinated action by several validators. The canonical example of a dispute inherent involves an approval checker discovering that a set of validators has improperly approved an invalid parachain block: resolving this requires the entire validator set to re-validate the block, so that the minority can be slashed. + +Dispute resolution is complex and is explained in substantially more detail [here](../../runtime/validity.md). + +> TODO: The provisioner is responsible for selecting remote disputes to replay. Let's figure out the details. + +## Protocol + +Input: [`ProvisionerMessage`](../../types/overseer-protocol.md#provisioner-message). Backed candidates come from the [Candidate Backing subsystem](../backing/candidate-backing.md), signed bitfields come from the [Bitfield Distribution subsystem](../availability/bitfield-distribution.md), and misbehavior reports and disputes come from the [Misbehavior Arbitration subsystem](misbehavior-arbitration.md). + +At initialization, this subsystem has no outputs. Block authors can send a `ProvisionerMessage::RequestBlockAuthorshipData`, which includes a channel over which provisionable data can be sent. All appropriate provisionable data will then be sent over this channel, as it is received. + +Note that block authors must re-send a `ProvisionerMessage::RequestBlockAuthorshipData` for each relay parent they are interested in receiving provisionable data for. + +## Block Production + +When a validator is selected by BABE to author a block, it becomes a block producer. The provisioner is the subsystem best suited to choosing which specific backed candidates and availability bitfields should be assembled into the block. To engage this functionality, a `ProvisionerMessage::RequestInherentData` is sent; the response is a set of non-conflicting candidates and the appropriate bitfields. Non-conflicting means that there are never two distinct parachain candidates included for the same parachain and that new parachain candidates cannot be backed until the previous one either gets declared available or expired. + +### Bitfield Selection + +Our goal with respect to bitfields is simple: maximize availability. However, it's not quite as simple as always including all bitfields; there are constraints which still need to be met: + +- We cannot choose more than one bitfield per validator. +- Each bitfield must correspond to an occupied core. + +Beyond that, a semi-arbitrary selection policy is fine. In order to meet the goal of maximizing availability, a heuristic of picking the bitfield with the greatest number of 1 bits set in the event of conflict is useful. + +### Candidate Selection + +The goal of candidate selection is to determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. + +To determine availability: + +- Get the list of core states from the runtime API +- For each core state: + - On `CoreState::Scheduled`, then we can make an `OccupiedCoreAssumption::Free`. + - On `CoreState::Occupied`, then we may be able to make an assumption: + - If the bitfields indicate availability and there is a scheduled `next_up_on_available`, then we can make an `OccupiedCoreAssumption::Included`. + - If the bitfields do not indicate availability, and there is a scheduled `next_up_on_time_out`, and `occupied_core.time_out_at == block_number_under_production`, then we can make an `OccupiedCoreAssumption::TimedOut`. + - If we did not make an `OccupiedCoreAssumption`, then continue on to the next core. + - Now compute the core's `validation_data_hash`: get the `PersistedValidationData` from the runtime, given the known `ParaId` and `OccupiedCoreAssumption`; + - Find an appropriate candidate for the core. + - There are two constraints: `backed_candidate.candidate.descriptor.para_id == scheduled_core.para_id && candidate.candidate.descriptor.validation_data_hash == computed_validation_data_hash`. + - In the event that more than one candidate meets the constraints, selection between the candidates is arbitrary. However, not more than one candidate can be selected per core. + +The end result of this process is a vector of `BackedCandidate`s, sorted in order of their core index. + +### Determining Bitfield Availability + +An occupied core has a `CoreAvailability` bitfield. We also have a list of `SignedAvailabilityBitfield`s. We need to determine from these whether or not a core at a particular index has become available. + +The key insight required is that `CoreAvailability` is transverse to the `SignedAvailabilityBitfield`s: if we conceptualize the list of bitfields as many rows, each bit of which is its own column, then `CoreAvailability` for a given core index is the vertical slice of bits in the set at that index. + +To compute bitfield availability, then: + +- Start with a copy of `OccupiedCore.availability` +- For each bitfield in the list of `SignedAvailabilityBitfield`s: + - Get the bitfield's `validator_index` + - Update the availability. Conceptually, assuming bit vectors: `availability[validator_index] |= bitfield[core_idx]` +- Availability has a 2/3 threshold. Therefore: `3 * availability.count_ones() >= 2 * availability.len()` + +### Notes + +See also: [Scheduler Module: Availability Cores](../../runtime/scheduler.md#availability-cores). + +One might ask: given `ProvisionerMessage::RequestInherentData`, what's the point of `ProvisionerMessage::RequestBlockAuthorshipData`? The answer is that the block authorship data includes more information than is present in the inherent data; disputes, for example. + +## Functionality + +The subsystem should maintain a set of handles to Block Authorship Provisioning Jobs that are currently live. + +### On Overseer Signal + +- `ActiveLeavesUpdate`: + - For each `activated` head: + - spawn a Block Authorship Provisioning Job with the given relay parent, storing a bidirectional channel with that job. + - For each `deactivated` head: + - terminate the Block Authorship Provisioning Job for the given relay parent, if any. +- `Conclude`: Forward `Conclude` to all jobs, waiting a small amount of time for them to join, and then hard-exiting. + +### On `ProvisionerMessage` + +Forward the message to the appropriate Block Authorship Provisioning Job, or discard if no appropriate job is currently active. + +## Block Authorship Provisioning Job + +Maintain the set of channels to block authors. On receiving provisionable data, send a copy over each channel. diff --git a/roadmap/implementors-guide/src/node/utility/runtime-api.md b/roadmap/implementers-guide/src/node/utility/runtime-api.md similarity index 87% rename from roadmap/implementors-guide/src/node/utility/runtime-api.md rename to roadmap/implementers-guide/src/node/utility/runtime-api.md index 05ceb85f4d5a979fb4bc99f0666522b6987c3e36..79df9a1d2d82227860405b4265ff8595eb12cec8 100644 --- a/roadmap/implementors-guide/src/node/utility/runtime-api.md +++ b/roadmap/implementers-guide/src/node/utility/runtime-api.md @@ -16,4 +16,4 @@ On receipt of `RuntimeApiMessage::Request(relay_parent, request)`, answer the re ## Jobs -> TODO Don't limit requests based on parent hash, but limit caching. No caching should be done for any requests on relay_parents that are not live based on `StartWork` or `StopWork` messages. Maybe with some leeway for things that have just been stopped. +> TODO Don't limit requests based on parent hash, but limit caching. No caching should be done for any requests on relay_parents that are not active based on `ActiveLeavesUpdate` messages. Maybe with some leeway for things that have just been stopped. diff --git a/roadmap/implementors-guide/src/node/validity/README.md b/roadmap/implementers-guide/src/node/validity/README.md similarity index 100% rename from roadmap/implementors-guide/src/node/validity/README.md rename to roadmap/implementers-guide/src/node/validity/README.md diff --git a/roadmap/implementers-guide/src/node/validity/approvals.md b/roadmap/implementers-guide/src/node/validity/approvals.md new file mode 100644 index 0000000000000000000000000000000000000000..4ebfb806078f8f495d5c040499690571f05710b3 --- /dev/null +++ b/roadmap/implementers-guide/src/node/validity/approvals.md @@ -0,0 +1,50 @@ +# Approvals subsystem + +The approval subsystem determines whether a relay chain block can be considered for finality. It does so by running validity checks on the candidates included in, aka declared available in, that relay chain block. + +These approval validity checks differ from the backing validity checks performed before starting availability: + +- In backing, adversaries could select when they propose invalid candidates based upon when they control the parachain's backing validators who perform the checks. + +- In approvals, we randomly assign individual validators to check specific candidates without giving adversaries' foreknowledge about either which honest validators get assigned to which candidates, or even how many check. Availability prevents adversaries from choosing which validators obtain their possibly invalid candidate. + +As such, approval checks provide significantly more security than backing checks, so Polkadot achieves some fixed security level most efficiently when we perform more approval checks per backing check or per relay chain block. + +... + +Approval has roughly two parts: + +- **Assignments** determines which validators performs approval checks on which candidates. It ensures that each candidate receives enough random checkers, while reducing adversaries' odds for obtaining enough checkers, and limiting adversaries' foreknowledge. It tracks approval votes to identify when "no show" approval check takes suspiciously long, perhaps indicating the node being under attack, and assigns more checks in this case. It tracks relay chain equivocations to determine when adversaries possibly gained foreknowledge about assignments, and adds additional checks in this case. + +- **Approval checks** listens to the assignments subsystem for outgoing assignment notices that we shall check specific candidates. It then performs these checks by first invoking the reconstruction subsystem to obtain the candidate, second invoking the candidate validity utility subsystem upon the candidate, and finally sending out an approval vote, or perhaps initiating a dispute. + +These both run first as off-chain consensus protocols using messages gossiped among all validators, and second as an on-chain record of this off-chain protocols' progress after the fact. We need the on-chain protocol to provide rewards for the on-chain protocol, and doing an on-chain protocol simplify interaction with GRANDPA. + +Approval requires two gossiped message types, assignment notices created by its assignments subsystem, and approval votes sent by our approval checks subsystem when authorized by the candidate validity utility subsystem. + +... + +Any validators resyncing the chain after falling behind should track approvals using only the on-chain protocol. In particular, they should avoid sending their own assignment noticed and thus save themselves considerable validation work util they have a full synced chain. + +### Approval keys + +We need two separate keys for the approval subsystem: + +- **Approval assignment keys** are sr25519/schnorrkel keys used only for the assignment criteria VRFs. We implicitly sign assignment notices with approval assignment keys by including their relay chain context and additional data in the VRF's extra message, but exclude these from its VRF input. + +- **Approval vote keys** would only sign off on candidate parablock validity and has no natural key type restrictions. We could reuse the ed25519 grandpa keys for this purpose since these signatures control access to grandpa, although distant future node configurations might favor separate roles. + +Approval vote keys could relatively easily be handled by some hardened signer tooling, perhaps even HSMs assuming we select ed25519 for approval vote keys. Approval assignment keys might or might not support hardened signer tooling, but doing so sounds far more complex. In fact, assignment keys determine only VRF outputs that determine approval checker assignments, for which they can only act or not act, so they cannot equivocate, lie, etc. and represent little if any slashing risk for validator operators. + +In future, we shall determine which among the several hardening techniques best benefits the netwrok as a whole. We could provide a multi-process multi-machine architecture for validators, perhaps even reminiscent of GNUNet, or perhaps more resembling smart HSM tooling. We might instead design a system that more resembled full systems, like like Cosmos' sentry nodes. In either case, approval assignments might be handled by a slightly hardened machine, but not necessarily nearly as hardened as approval votes, but approval votes machines must similarly run foreign WASM code, which increases their risk, so assignments being separate sounds helpful. + +### Gossip + +Any validator could send their assignment notices and/or approval votes too early. We gossip the approval votes because they represent a major commitment by the validator. We delay gossiping the assignment notices unless their delay tranche exceeds our local clock excessively. + +### Future work + +We could consider additional gossip messages with which nodes claims "slow availability" and/or "slow candidate" to fine tune the assignments "no show" system, but long enough "no show" delays suffice probably. + +We shall develop more practical experience with UDP once the availability system works using direct UDP connections. In this, we should discover if reconstruction performs adequately with a complete graphs or +benefits from topology restrictions. At this point, an assignment notices could implicitly request pieces from a random 1/3rd, perhaps topology restricted, which saves one gossip round. If this preliminary fast reconstruction fails, then nodes' request alternative pieces directly. There is an interesting design space in how this overlaps with "slow availability" claims. diff --git a/roadmap/implementers-guide/src/node/validity/assignments.md b/roadmap/implementers-guide/src/node/validity/assignments.md new file mode 100644 index 0000000000000000000000000000000000000000..e2a17d6e7ef788020f830d45be9af8c34b9e0410 --- /dev/null +++ b/roadmap/implementers-guide/src/node/validity/assignments.md @@ -0,0 +1,144 @@ +# Approval assignments + +Approval assignment determines on which candidate parachain blocks each validator performs approval checks. An approval session considers only one relay chain block and assigns only those candidates that relay chain block declares available. + +Assignment balances several concerns: + +- limits adversaries' foreknowledge about assignments, +- ensures enough checkers, and +- distributes assignments relatively equitably. + +Assignees determine their own assignments to check specific candidates using two or three assignment criteria. Assignees never reveal their assignments until relevant, and gossip delays assignments sent early, which limits others' foreknowledge. Assignees learn their assignment only with the relay chain block. + +All criteria require the validator evaluate a verifiable random function (VRF) using their VRF secret key. All criteria input specific data called "stories" about the session's relay chain block, and output candidates to check and a precedence called a `DelayTranche`. + +We liberate availability cores when their candidate becomes available of course, but one approval assignment criteria continues associating each candidate with the core number it occupied when it became available. + +Assignment operates in loosely timed rounds determined by this `DelayTranche`s, which proceed roughly 12 times faster than six second block production assuming half second gossip times. If a candidate `C` needs more approval checkers by the time we reach round `t` then any validators with an assignment to `C` in delay tranche `t` gossip their send assignment notice for `C`. We continue until all candidates have enough approval checkers assigned. We take entire tranches together if we do not yet have enough, so we expect strictly more than enough checkers. We also take later tranches if some checkers return their approval votes too slow (see no shows below). + +Assignment ensures validators check those relay chain blocks for which they have delay tranche zero aka the highest precedence, so that adversaries always face honest checkers equal to the expected number of assignments with delay tranche zero. + +Among these criteria, the BABE VRF output provides the story for two, which reduces how frequently adversaries could position their own checkers. We have one criterion whose story consists of the candidate's block hash plus external knowledge that a relay chain equivocation exists with a conflicting candidate. It provides unforeseeable assignments when adversaries gain foreknowledge about the other two by committing an equivocation in relay chain block production. + +## Announcements / Notices + +We gossip assignment notices among nodes so that all validators know which validators should check each candidate, and if any candidate requires more checkers. + +Assignment notices consist of a relay chain context given by a block hash, an assignment criteria, consisting of the criteria identifier and optionally a criteria specific field, an assignee identifier, and a VRF signature by the assignee, which itself consists of a VRF pre-output and a DLEQ proof. Its VRF input consists of the criteria, usually including a criteria specific field, and a "story" about its relay chain context block. + +We never include stories inside the gossip messages containing assignment notices, but require each validator reconstruct them. We never care about assignments in the disputes process, so this does not complicate remote disputes. + +In a Schnorr VRF, there is an extra signed message distinct from this input, which we set to the relay chain block hash. As a result, assignment notices are self signing and can be "politely" gossiped without additional signatures, meaning between nodes who can compute the story from the relay chain context. In other words, if we cannot compute the story required by an assignment notice's VRF part then our self signing property fails and we cannot verify its origin. We could fix this with either another signature layer (64 bytes) or by including the VRF input point computed from the story (32 bytes), but doing so appears unhelpful. + +Any validator could send their assignment notices and/or approval votes too early. We gossip the approval votes early because they represent a major commitment by the validator. We delay gossiping the assignment notices until they agree with our local clock however. We also impose a politeness condition that the recipient knows the relay chain context used by the assignment notice. + +## Stories + +We based assignment criteria upon two possible "stories" about the relay chain block `R` that included the candidate aka declared the candidate available. All stories have an output that attempts to minimize adversarial influence, which then acts as the VRF input for an assignment criteria. + +We first have a `RelayVRFStory` that outputs the randomness from another VRF output produced by the relay chain block producer when creating `R`. Among honest nodes, only this one relay chain block producer who creates `R` knew the story in advance, and even they knew nothing two epochs previously. + +In BABE, we create this value calling `schnorrkel::vrf::VRFInOut::make_bytes` with a context "A&V RC-VRF", with the `VRFInOut` coming from either the VRF that authorized block production for primary blocks, or else from the secondary block VRF for the secondary block type. + +In Sassafras, we shall always use the non-anonymized recycling VRF output, never the anonymized ring VRF that authorizes block production. We do not currently know if Sassafras shall have a separate schnorrkel key, but if it reuses its ring VRF key there is an equivalent `ring_vrf::VRFInOut::make_bytes`. + +We like that `RelayVRFStory` admits relatively few choices, but an adversary who equivocates in relay chain block production could learn assignments that depend upon the `RelayVRFStory` too early because the same relay chain VRF appears in multiple blocks. + +We therefore provide a secondary `RelayEquivocationStory` that outputs the candidate's block hash, but only for candidate equivocations. We say a candidate `C` in `R` is an equivocation when there exists another relay chain block `R1` that equivocates for `R` in the sense that `R` and `R1` have the same `RelayVRFStory`, but `R` contains `C` and `R1` does not contain `C`. + +We want checkers for candidate equivocations that lie outside our preferred relay chain as well, which represents a slightly different usage for the assignments module, and might require more information in the gossip messages. + +## Assignment criteria + +Assignment criteria compute actual assignments using stories and the validators' secret approval assignment key. Assignment criteria output a `Position` consisting of both a `ParaId` to be checked, as well as a precedence `DelayTranche` for when the assignment becomes valid. + +Assignment criteria come in three flavors, `RelayVRFModulo`, `RelayVRFDelay` and `RelayEquivocation`. Among these, both `RelayVRFModulo` and `RelayVRFDelay` run a VRF whose input is the output of a `RelayVRFStory`, while `RelayEquivocation` runs a VRF whose input is the output of a `RelayEquivocationStory`. + +Among these, we have two distinct VRF output computations: + +`RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number. It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core. We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block. We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly. All successful `RelayVRFModulo` samples are assigned delay tranche zero. + +There is no sampling process for `RelayVRFDelay` and `RelayEquivocation`. We instead run them on specific candidates and they compute a delay from their VRF output. `RelayVRFDelay` runs for all candidates included under, aka declared available by, a relay chain block, and inputs the associated VRF output via `RelayVRFStory`. `RelayEquivocation` runs only on candidate block equivocations, and inputs their block hashes via the `RelayEquivocation` story. + +`RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0. In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche. + +As future work (or TODO?), we should merge assignment notices with the same delay and story using `vrf_merge`. We cannot merge those with the same delay and different stories because `RelayEquivocationStory`s could change but `RelayVRFStory` never changes. + +## Announcer and Watcher/Tracker + +We track all validators' announced approval assignments for each candidate associated to each relay chain block, which tells us which validators were assigned to which candidates. + +We permit at most one assignment per candidate per story per validator, so one validator could be assigned under both the `RelayVRFDelay` and `RelayEquivocation` criteria, but not under both `RelayVRFModulo` and `RelayVRFDelay` criteria, since those both use the same story. We permit only one approval vote per candidate per validator, which counts for any applicable criteria. + +We announce, and start checking for, our own assignments when their tranche's delay is reached, but only if the tracker says the assignee candidate requires more approval checkers. We never announce an assignment we believe unnecessary because early announcements gives an adversary information. All delay tranche zero assignments always get announced, which includes all `RelayVRFModulo` assignments. + +In other words, if some candidate `C` needs more approval checkers by the time we reach round `t` then any validators with an assignment to `C` in delay tranche `t` gossip their send assignment notice for `C`, and begin reconstruction and validation for 'C. If however `C` reached enough assignments, then validators with later assignments skip announcing their assignments. + +We continue until all candidates have enough approval checkers assigned. We never prioritize assignments within tranches and count all or no assignments for a given tranche together, so we often overshoot the target number of assigned approval checkers. + +### No shows + +We have a "no show" timeout longer than one relay chain slot, so at least 6 seconds, during which we expect approval checks should succeed in reconstructing the candidate block, in redoing its erasure coding to check the candidate receipt, and finally in rechecking the candidate block itself. + +We consider a validator a "no show" if they do not approve or dispute within this "no show" timeout from our receiving their assignment notice. We time this from our receipt of their assignment notice instead of our imagined real time for their tranche because otherwise receiving late assignment notices creates immediate "no shows" and unnecessary work. + +We worry "no shows" represent a validator under denial of service attack, presumably to prevent it from reconstructing the candidate, but perhaps delaying it form gossiping a dispute too. We therefore always replace "no shows" by adding one entire extra delay tranche worth of validators, so such attacks always result in additional checkers. + +As an example, imagine we need 20 checkers, but tranche zero produces only 14, and tranche one only 4, then we take all 5 from tranche two, and thus require 23 checkers for that candidate. If one checker Charlie from tranche one or two does not respond within say 8 seconds, then we add all 7 checkers from tranche three. If again one checker Cindy from tranche three does not respond within 8 seconds then we take all 3 checkers from tranche four. We now have 33 checkers working on the candidate, so this escalated quickly. + +We escalated so quickly because we worried that Charlie and Cindy might be the only honest checkers assigned to that candidate. If therefore either Charlie or Cindy finally return an approval, then we can conclude approval, and abandon the checkers from tranche four. + +We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain. We discuss below how this helps reward validators who replace "no shows". + +We avoid slashing for "no shows" per se, although being "no show" could enter into some computation that punishes repeated poor performance, presumably replaces ImOnline, and we could reduce their rewards and further rewards those who filled in. + +As future work, we foresee expanding the "no show" scheme to anonymizes the additional checkers, like by using assignment noticed with a new criteria that employs a ring VRF and then all validators providing cover by requesting a couple erasure coded pieces, but such anonymity scheme sound extremely complex and lie far beyond our initial functionality. + +## Assignment postponement + +We expect validators could occasionally overloaded when they randomly acquire too many assignments. All these fluctuations amortize over multiple blocks fairly well, but this slows down finality. + +We therefore permit validators to delay sending their assignment noticed intentionally. If nobody knows about their assignment then they avoid creating "no shows" and the workload progresses normally. + +We strongly prefer if postponements come from tranches higher aka less important than zero because tranche zero checks provide somewhat more security. + +TODO: When? Is this optimal for the network? etc. + +## On-chain verification + +We should verify approval on-chain to reward approval checkers and to simplify integration with GRADPA. We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain, which helps with both these goals. + +In principle, all validators have some "tranche" at which they're assigned to the parachain candidate, which ensures we reach enough validators eventually. As noted above, we often retract "no shows" when the slow validator eventually shows up, so witnessing their initially being a "no show" helps manage rewards. + +We expect on-chain verification should work in two phases: We first record assignments notices and approval votes on-chain in relay chain block, doing the VRF or regular signature verification again in block verification, and inserting chain authenticated unsigned notes into the relay chain state that contain the checker, tranche, paraid, and relay block height for each assignment notice. We then later have another relay chain block that runs some "approved" intrinsic, which extract all these notes from the state and feeds them into our approval code. + +We now encounter one niche concern in the interaction between postponement and on-chain verification: Any validator with a tranche zero (or other low) assignment could delay sending an assignment notice, like because they postponed their assigned tranche (which is allowed). If they later send this assignment notices right around finality time, then they race with this approved. intrinsic: If their announcement gets on-chain (also allowed), then yes it delays finality. If it does not get on-chain, then yes we've one announcement that the off-chain consensus system says is valid, but the chain ignores for being too slow. + +We need the chain to win in this case, but doing this requires imposing an annoyingly long overarching delay upon finality. We might explore limits on postponement too, but this sounds much harder. + +## Paramaters + +We prefer doing approval checkers assignments under `RelayVRFModulo` as opposed to `RelayVRFDelay` because `RelayVRFModulo` avoids giving individual checkers too many assignments and tranche zero assignments benefit security the most. We suggest assigning at least 16 checkers under `RelayVRFModulo` although assignment levels have never been properly analysed. + +Our delay criteria `RelayVRFDelay` and `RelayEquivocation` both have two primary paramaters, expected checkers per tranche and the zeroth delay tranche width. + +We require expected checkers per tranche to be less than three because otherwise an adversary with 1/3 stake could force all nodes into checking all blocks. We strongly recommend expected checkers per tranche to be less than two, which helps avoid both accedental and intentional explosions. We also suggest expected checkers per tranche be larger than one, which helps prevent adversaries from predicting than advancing one tranche adds only their own validators. + +We improve security more with tranche zero assignments, so `RelayEquivocation` should consolidates its first several tranches into tranche zero. We describe this as the zeroth delay tranche width, which initially we set to 12 for `RelayEquivocation` and `1` for `RelayVRFDelay`. + +## Why VRFs? + +We do assignments with VRFs to give "enough" checkers some meaning beyond merely "expected" checkers: + +We could specify a protocol that used only system randomness, which works because our strongest defense is the expected number of honest checkers who assign themselves. In this, adversaries could trivially flood their own blocks with their own checkers, so this strong defense becomes our only defense, and delay tranches become useless, so some blocks actually have zero approval checkers and possibly only one checker overall. + +VRFs though require adversaries wait far longer between such attacks, which also helps against adversaries with little at stake because they compromised validators. VRFs raise user confidence that no such "drive by" attacks occurred because the delay tranche system ensure at least some minimum number of approval checkers. In this vein, VRFs permit reducing backing checks and increasing approval checks, which makes polkadot more efficient. + +## Gossip + +Any validator could send their assignment notices and/or approval votes too early. We gossip the approval votes because they represent a major commitment by the validator. We retain but delay gossiping the assignment notices until they agree with our local clock. + +Assignment notices being gossiped too early might create a denial of service vector. If so, we might exploite the relative time scheme that synchronises our clocks, which conceivably permits just dropoing excessively early assignments. + + + diff --git a/roadmap/implementors-guide/src/parachains-overview.md b/roadmap/implementers-guide/src/parachains-overview.md similarity index 80% rename from roadmap/implementors-guide/src/parachains-overview.md rename to roadmap/implementers-guide/src/parachains-overview.md index 8eff666225296fdaab4e8316f4fbb49338a7b88e..b8b39eee4f0fb5a5f800467ffbe50fcf5663cb63 100644 --- a/roadmap/implementors-guide/src/parachains-overview.md +++ b/roadmap/implementers-guide/src/parachains-overview.md @@ -18,7 +18,7 @@ Here is a description of the Inclusion Pipeline: the path a parachain block (or 1. Validators are selected and assigned to parachains by the Validator Assignment routine. 1. A collator produces the parachain block, which is known as a parachain candidate or candidate, along with a PoV for the candidate. -1. The collator forwards the candidate and PoV to validators assigned to the same parachain via the [Collation Distribution subsystem](node/collators/collation-distribution.md). +1. The collator forwards the candidate and PoV to validators assigned to the same parachain via the [Collator Protocol](node/collators/collator-protocol.md). 1. The validators assigned to a parachain at a given point in time participate in the [Candidate Backing subsystem](node/backing/candidate-backing.md) to validate candidates that were put forward for validation. Candidates which gather enough signed validity statements from validators are considered "backable". Their backing is the set of signed validity statements. 1. A relay-chain block author, selected by BABE, can note up to one (1) backable candidate for each parachain to include in the relay-chain block alongside its backing. A backable candidate once included in the relay-chain is considered backed in that fork of the relay-chain. 1. Once backed in the relay-chain, the parachain candidate is considered to be "pending availability". It is not considered to be included as part of the parachain until it is proven available. @@ -56,19 +56,109 @@ Reiterating the lifecycle of a candidate: 1. Included: Backed and considered available. 1. Accepted: Backed, available, and undisputed -> TODO Diagram: Inclusion Pipeline & Approval Subsystems interaction +```dot process Inclusion Pipeline +digraph { + subgraph cluster_vg { + label=< + Parachain Validators +
+ (subset of all) + > + labeljust=l + style=filled + color=lightgrey + node [style=filled color=white] + + v1 [label="Validator 1"] + v2 [label="Validator 2"] + v3 [label="Validator 3"] + + b [label="(3) Backable", shape=box] + + v1 -> v2 [label="(2) Seconded"] + v1 -> v3 [label="(2) Seconded"] + + v2 -> b [style=dashed arrowhead=none] + v3 -> b [style=dashed arrowhead=none] + v1 -> b [style=dashed arrowhead=none] + } + + v4 [label=< + Validator 4 (relay chain) +
+ + (selected by BABE) + + >] + + col [label="Collator"] + pa [label="(5) Relay Block (Pending Availability)", shape=box] + pb [label="Parablock", shape=box] + rc [label="Relay Chain Validators"] + + subgraph cluster_approval { + label=< + Secondary Checkers +
+ (subset of all) + > + labeljust=l + style=filled + color=lightgrey + node [style=filled color=white] + + a5 [label="Validator 5"] + a6 [label="Validator 6"] + a7 [label="Validator 7"] + } + + b -> v4 [label="(4) Backed"] + col -> v1 [label="(1) Candidate"] + v4 -> pa + pa -> pb [label="(6) a few blocks later..." arrowhead=none] + pb -> a5 + pb -> a6 + pb -> a7 + + a5 -> rc [label="(7) Approved"] + a6 -> rc [label="(7) Approved"] + a7 -> rc [label="(7) Approved"] +} +``` + +The diagram above shows the happy path of a block from (1) Candidate to the (7) Approved state. It is also important to take note of the fact that the relay-chain is extended by BABE, which is a forkful algorithm. That means that different block authors can be chosen at the same time, and may not be building on the same block parent. Furthermore, the set of validators is not fixed, nor is the set of parachains. And even with the same set of validators and parachains, the validators' assignments to parachains is flexible. This means that the architecture proposed in the next chapters must deal with the variability and multiplicity of the network state. ```dot process digraph { - rca [label = "Relay Block A" shape=rectangle] - rcb [label = "Relay Block B" shape=rectangle] - rcc [label = "Relay Block C" shape=rectangle] - - vg1 [label =<Validator Group 1

(Validator 4)
(Validator 1) (Validator 2)
(Validator 5)
>] - vg2 [label =<Validator Group 2

(Validator 7)
(Validator 3) (Validator 6)
>] + rca [label="Relay Block A" shape=box] + rcb [label="Relay Block B" shape=box] + rcc [label="Relay Block C" shape=box] + + vg1 [label=< + Validator Group 1 +
+
+ + (Validator 4) +
+ (Validator 1) (Validator 2) +
+ (Validator 5) +
+ >] + vg2 [label=< + Validator Group 2 +
+
+ + (Validator 7) +
+ (Validator 3) (Validator 6) +
+ >] rcb -> rca rcc -> rcb @@ -82,23 +172,46 @@ In this example, group 1 has received block C while the others have not due to n ```dot process digraph { - rca [label = "Relay Block A" shape=rectangle] - rcb [label = "Relay Block B" shape=rectangle] - rcc [label = "Relay Block C" shape=rectangle] - rcc_prime [label = "Relay Block C'" shape=rectangle] - - vg1 [label =<Validator Group 1

(Validator 4) (Validator 1)>] - vg2 [label =<Validator Group 2

(Validator 7) (Validator 6)>] - vg3 [label =<Validator Group 3

(Validator 2) (Validator 3)
(Validator 5)
>] + rca [label="Relay Block A" shape=box] + rcb [label="Relay Block B" shape=box] + rcc [label="Relay Block C" shape=box] + rcc_prime [label="Relay Block C'" shape=box] + + vg1 [label=< + Validator Group 1 +
+
+ + (Validator 4) (Validator 1) + + >] + vg2 [label=< + Validator Group 2 +
+
+ + (Validator 7) (Validator 6) + + >] + vg3 [label=< + Validator Group 3 +
+
+ + (Validator 2) (Validator 3) +
+ (Validator 5) +
+ >] rcb -> rca rcc -> rcb rcc_prime -> rcb - vg1 -> rcc [style=dashed arrowhead=none] + vg1 -> rcc [style=dashed arrowhead=none] vg2 -> rcc_prime [style=dashed arrowhead=none] vg3 -> rcc_prime [style=dashed arrowhead=none] - vg3 -> rcc [style=dashed arrowhead=none] + vg3 -> rcc [style=dashed arrowhead=none] } ``` diff --git a/roadmap/implementers-guide/src/runtime-api/README.md b/roadmap/implementers-guide/src/runtime-api/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a40290a2d0657a2cf771c6a298b79730fe47ae19 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/README.md @@ -0,0 +1,55 @@ +# Runtime APIs + +Runtime APIs are the means by which the node-side code extracts information from the state of the runtime. + +Every block in the relay-chain contains a *state root* which is the root hash of a state trie encapsulating all storage of runtime modules after execution of the block. This is a cryptographic commitment to a unique state. We use the terminology of accessing the *state at* a block to refer accessing the state referred to by the state root of that block. + +Although Runtime APIs are often used for simple storage access, they are actually empowered to do arbitrary computation. The implementation of the Runtime APIs lives within the Runtime as Wasm code and exposes extern functions that can be invoked with arguments and have a return value. Runtime APIs have access to a variety of host functions, which are contextual functions provided by the Wasm execution context, that allow it to carry out many different types of behaviors. + +Abilities provided by host functions includes: + +* State Access +* Offchain-DB Access +* Submitting transactions to the transaction queue +* Optimized versions of cryptographic functions +* More + +So it is clear that Runtime APIs are a versatile and powerful tool to leverage the state of the chain. In general, we will use Runtime APIs for these purposes: + +* Access of a storage item +* Access of a bundle of related storage items +* Deriving a value from storage based on arguments +* Submitting misbehavior reports + +More broadly, we have the goal of using Runtime APIs to write Node-side code that fulfills the requirements set by the Runtime. In particular, the constraints set forth by the [Scheduler](../runtime/scheduler.md) and [Inclusion](../runtime/inclusion.md) modules. These modules are responsible for advancing paras with a two-phase protocol where validators are first chosen to validate and back a candidate and then required to ensure availability of referenced data. In the second phase, validators are meant to attest to those para-candidates that they have their availability chunk for. As the Node-side code needs to generate the inputs into these two phases, the runtime API needs to transmit information from the runtime that is aware of the Availability Cores model instantiated by the Scheduler and Inclusion modules. + +Node-side code is also responsible for detecting and reporting misbehavior performed by other validators, and the set of Runtime APIs needs to provide methods for observing live disputes and submitting reports as transactions. + +The next sections will contain information on specific runtime APIs. The format is this: + +```rust +/// Fetch the value of the runtime API at the block. +/// +/// Definitionally, the `at` parameter cannot be any block that is not in the chain. +/// Thus the return value is unconditional. However, for in-practice implementations +/// it may be possible to provide an `at` parameter as a hash, which may not refer to a +/// valid block or one which implements the runtime API. In those cases it would be +/// best for the implementation to return an error indicating the failure mode. +fn some_runtime_api(at: Block, arg1: Type1, arg2: Type2, ...) -> ReturnValue; +``` + +Certain runtime APIs concerning the state of a para require the caller to provide an `OccupiedCoreAssumption`. This indicates how the result of the runtime API should be computed if there is a candidate from the para occupying an availability core in the [Inclusion Module](../runtime/inclusion.md). + +The choices of assumption are whether the candidate occupying that core should be assumed to have been made available and included or timed out and discarded, along with a third option to assert that the core was not occupied. This choice affects everything from the parent head-data, the validation code, and the state of message-queues. Typically, users will take the assumption that either the core was free or that the occupying candidate was included, as timeouts are expected only in adversarial circumstances and even so, only in a small minority of blocks directly following validator set rotations. + +```rust +/// An assumption being made about the state of an occupied core. +enum OccupiedCoreAssumption { + /// The candidate occupying the core was made available and included to free the core. + Included, + /// The candidate occupying the core timed out and freed the core without advancing the para. + TimedOut, + /// The core was not occupied to begin with. + Free, +} +``` \ No newline at end of file diff --git a/roadmap/implementers-guide/src/runtime-api/availability-cores.md b/roadmap/implementers-guide/src/runtime-api/availability-cores.md new file mode 100644 index 0000000000000000000000000000000000000000..561e817cca3f147c8ad7baa9b1c7d08a3380037e --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/availability-cores.md @@ -0,0 +1,56 @@ +# Availability Cores + +Yields information on all availability cores. Cores are either free or occupied. Free cores can have paras assigned to them. Occupied cores don't, but they can become available part-way through a block due to bitfields and then have something scheduled on them. To allow optimistic validation of candidates, the occupied cores are accompanied by information on what is upcoming. This information can be leveraged when validators perceive that there is a high likelihood of a core becoming available based on bitfields seen, and then optimistically validate something that would become scheduled based on that, although there is no guarantee on what the block producer will actually include in the block. + +See also the [Scheduler Module](../runtime/scheduler.md) for a high-level description of what an availability core is and why it exists. + +```rust +fn availability_cores(at: Block) -> Vec; +``` + +This is all the information that a validator needs about scheduling for the current block. It includes all information on [Scheduler](../runtime/scheduler.md) core-assignments and [Inclusion](../runtime/inclusion.md) state of blocks occupying availability cores. It includes data necessary to determine not only which paras are assigned now, but which cores are likely to become freed after processing bitfields, and exactly which bitfields would be necessary to make them so. + +```rust +struct OccupiedCore { + /// The ID of the para occupying the core. + para_id: ParaId, + /// If this core is freed by availability, this is the assignment that is next up on this + /// core, if any. None if there is nothing queued for this core. + next_up_on_available: Option, + /// The relay-chain block number this began occupying the core at. + occupied_since: BlockNumber, + /// The relay-chain block this will time-out at, if any. + time_out_at: BlockNumber, + /// If this core is freed by being timed-out, this is the assignment that is next up on this + /// core. None if there is nothing queued for this core or there is no possibility of timing + /// out. + next_up_on_time_out: Option, + /// A bitfield with 1 bit for each validator in the set. `1` bits mean that the corresponding + /// validators has attested to availability on-chain. A 2/3+ majority of `1` bits means that + /// this will be available. + availability: Bitfield, + /// The group assigned to distribute availability pieces of this candidate. + group_responsible: GroupIndex, +} + +struct ScheduledCore { + /// The ID of a para scheduled. + para_id: ParaId, + /// The collator required to author the block, if any. + collator: Option, +} + +enum CoreState { + /// The core is currently occupied. + Occupied(OccupiedCore), + /// The core is currently free, with a para scheduled and given the opportunity + /// to occupy. + /// + /// If a particular Collator is required to author this block, that is also present in this + /// variant. + Scheduled(ScheduledCore), + /// The core is currently free and there is nothing scheduled. This can be the case for parathread + /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + Free, +} +``` diff --git a/roadmap/implementers-guide/src/runtime-api/candidate-events.md b/roadmap/implementers-guide/src/runtime-api/candidate-events.md new file mode 100644 index 0000000000000000000000000000000000000000..3ebdcd04917bdf1686b9d2a549a1f922a3c2cc03 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/candidate-events.md @@ -0,0 +1,16 @@ +# Candidate Events + +Yields a vector of events concerning candidates that occurred within the given block. + +```rust +enum CandidateEvent { + /// This candidate receipt was backed in the most recent block. + CandidateBacked(CandidateReceipt, HeadData), + /// This candidate receipt was included and became a parablock at the most recent block. + CandidateIncluded(CandidateReceipt, HeadData), + /// This candidate receipt was not made available in time and timed out. + CandidateTimedOut(CandidateReceipt, HeadData), +} + +fn candidate_events(at: Block) -> Vec; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md b/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md new file mode 100644 index 0000000000000000000000000000000000000000..9c8969f6a958b2cce360ad83c1dc1e57d91e207e --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/candidate-pending-availability.md @@ -0,0 +1,7 @@ +# Candidate Pending Availability + +Get the receipt of a candidate pending availability. This returns `Some` for any paras assigned to occupied cores in `availability_cores` and `None` otherwise. + +```rust +fn candidate_pending_availability(at: Block, ParaId) -> Option; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/full-validation-data.md b/roadmap/implementers-guide/src/runtime-api/full-validation-data.md new file mode 100644 index 0000000000000000000000000000000000000000..884fad076e2fa37005054aaf1a1004d71787db18 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/full-validation-data.md @@ -0,0 +1,7 @@ +# Full Validation Data + +Yields the full [`ValidationData`](../types/candidate.md#validationdata) at the state of a given block. + +```rust +fn full_validation_data(at: Block, ParaId, OccupiedCoreAssumption) -> Option; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/persisted-validation-data.md b/roadmap/implementers-guide/src/runtime-api/persisted-validation-data.md new file mode 100644 index 0000000000000000000000000000000000000000..2fd3e55c8712c5c85193ee60b9e6c736dc0ec2b7 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/persisted-validation-data.md @@ -0,0 +1,11 @@ +# Persisted Validation Data + +Yields the [`PersistedValidationData`](../types/candidate.md#persistedvalidationdata) for the given [`ParaId`](../types/candidate.md#paraid) along with an assumption that should be used if the para currently occupies a core: + +```rust +/// Returns the persisted validation data for the given para and occupied core assumption. +/// +/// Returns `None` if either the para is not registered or the assumption is `Freed` +/// and the para already occupies a core. +fn persisted_validation_data(at: Block, ParaId, OccupiedCoreAssumption) -> Option; +``` \ No newline at end of file diff --git a/roadmap/implementers-guide/src/runtime-api/session-index.md b/roadmap/implementers-guide/src/runtime-api/session-index.md new file mode 100644 index 0000000000000000000000000000000000000000..1baf6a167dbb2543a1b6080dbe22dac266827fd5 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/session-index.md @@ -0,0 +1,12 @@ +# Session Index + +Get the session index that is expected at the child of a block. + +In the [`Initializer`](../runtime/initializer.md) module, session changes are buffered by one block. The session index of the child of any relay block is always predictable by that block's state. + +This session index can be used to derive a [`SigningContext`](../types/candidate.md#signing-context). + +```rust +/// Returns the session index expected at a child of the block. +fn session_index_for_child(at: Block) -> SessionIndex; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/validation-code.md b/roadmap/implementers-guide/src/runtime-api/validation-code.md new file mode 100644 index 0000000000000000000000000000000000000000..908e3bfbd1450c81d83ed3a8a0729269f6fb85ef --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/validation-code.md @@ -0,0 +1,7 @@ +# Validation Code + +Fetch the validation code used by a para, making the given `OccupiedCoreAssumption`. + +```rust +fn validation_code(at: Block, ParaId, OccupiedCoreAssumption) -> Option; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/validator-groups.md b/roadmap/implementers-guide/src/runtime-api/validator-groups.md new file mode 100644 index 0000000000000000000000000000000000000000..42b39f976d19977df6e882127dd759fe9596d370 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/validator-groups.md @@ -0,0 +1,35 @@ +# Validator Groups + +Yields the validator groups used during the current session. The validators in the groups are referred to by their index into the validator-set. + +```rust +/// A helper data-type for tracking validator-group rotations. +struct GroupRotationInfo { + session_start_block: BlockNumber, + group_rotation_frequency: BlockNumber, + now: BlockNumber, +} + +impl GroupRotationInfo { + /// Returns the index of the group needed to validate the core at the given index, + /// assuming the given amount of cores/groups. + fn group_for_core(&self, core_index, cores) -> GroupIndex; + + /// Returns the block number of the next rotation after the current block. If the current block + /// is 10 and the rotation frequency is 5, this should return 15. + /// + /// If the group rotation frequency is 0, returns 0. + fn next_rotation_at(&self) -> BlockNumber; + + /// Returns the block number of the last rotation before or including the current block. If the + /// current block is 10 and the rotation frequency is 5, this should return 10. + /// + /// If the group rotation frequency is 0, returns 0. + fn last_rotation_at(&self) -> BlockNumber; +} + +/// Returns the validator groups and rotation info localized based on the block whose state +/// this is invoked on. Note that `now` in the `GroupRotationInfo` should be the successor of +/// the number of the block. +fn validator_groups(at: Block) -> (Vec>, GroupRotationInfo); +``` diff --git a/roadmap/implementers-guide/src/runtime-api/validators.md b/roadmap/implementers-guide/src/runtime-api/validators.md new file mode 100644 index 0000000000000000000000000000000000000000..b7f1d964754755c2224dc89a3503835398a581a2 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/validators.md @@ -0,0 +1,7 @@ +# Validators + +Yields the validator-set at the state of a given block. This validator set is always the one responsible for backing parachains in the child of the provided block. + +```rust +fn validators(at: Block) -> Vec; +``` diff --git a/roadmap/implementors-guide/src/runtime/README.md b/roadmap/implementers-guide/src/runtime/README.md similarity index 64% rename from roadmap/implementors-guide/src/runtime/README.md rename to roadmap/implementers-guide/src/runtime/README.md index 2a806d4c2dbc9e545450b41e17fe70a622e0685d..5279752e911cdd92d496aa65cd12c9a30b873ea3 100644 --- a/roadmap/implementors-guide/src/runtime/README.md +++ b/roadmap/implementers-guide/src/runtime/README.md @@ -23,17 +23,13 @@ We will split the logic of the runtime up into these modules: The [Initializer module](initializer.md) is special - it's responsible for handling the initialization logic of the other modules to ensure that the correct initialization order and related invariants are maintained. The other modules won't specify a on-initialize logic, but will instead expose a special semi-private routine that the initialization module will call. The other modules are relatively straightforward and perform the roles described above. -The Parachain Host operates under a changing set of validators. Time is split up into periodic sessions, where each session brings a potentially new set of validators. Sessions are buffered by one, meaning that the validators of the upcoming session are fixed and always known. Parachain Host runtime modules need to react to changes in the validator set, as it will affect the runtime logic for processing candidate backing, availability bitfields, and misbehavior reports. The Parachain Host modules can't determine ahead-of-time exactly when session change notifications are going to happen within the block (note: this depends on module initialization order again - better to put session before parachains modules). Ideally, session changes are always handled before initialization. It is clearly a problem if we compute validator assignments to parachains during initialization and then the set of validators changes. In the best case, we can recognize that re-initialization needs to be done. In the worst case, bugs would occur. +The Parachain Host operates under a changing set of validators. Time is split up into periodic sessions, where each session brings a potentially new set of validators. Sessions are buffered by one, meaning that the validators of the upcoming session `n+1` are determined at the end of session `n-1`, right before session `n` starts. Parachain Host runtime modules need to react to changes in the validator set, as it will affect the runtime logic for processing candidate backing, availability bitfields, and misbehavior reports. The Parachain Host modules can't determine ahead-of-time exactly when session change notifications are going to happen within the block (note: this depends on module initialization order again - better to put session before parachains modules). -There are 3 main ways that we can handle this issue: +The relay chain is intended to use BABE or SASSAFRAS, which both have the property that a session changing at a block is determined not by the number of the block but instead by the time the block is authored. In some sense, sessions change in-between blocks, not at blocks. This has the side effect that the session of a child block cannot be determined solely by the parent block's identifier. Being able to unilaterally determine the validator-set at a specific block based on its parent hash would make a lot of Node-side logic much simpler. -1. Establish an invariant that session change notifications always happen after initialization. This means that when we receive a session change notification before initialization, we call the initialization routines before handling the session change. -1. Require that session change notifications always occur before initialization. Brick the chain if session change notifications ever happen after initialization. -1. Handle both the before and after cases. +In order to regain the property that the validator set of a block is predictable by its parent block, we delay session changes' application to Parachains by 1 block. This means that if there is a session change at block X, that session change will be stored and applied during initialization of direct descendents of X. This principal side effect of this change is that the Parachains runtime can disagree with session or consensus modules about which session it currently is. Misbehavior reporting routines in particular will be affected by this, although not severely. The parachains runtime might believe it is the last block of the session while the system is really in the first block of the next session. In such cases, a historical validator-set membership proof will need to accompany any misbehavior report, although they typically do not need to during current-session misbehavior reports. -Although option 3 is the most comprehensive, it runs counter to our goal of simplicity. Option 1 means requiring the runtime to do redundant work at all sessions and will also mean, like option 3, that designing things in such a way that initialization can be rolled back and reapplied under the new environment. That leaves option 2, although it is a "nuclear" option in a way and requires us to constrain the parachain host to only run in full runtimes with a certain order of operations. - -So the other role of the initializer module is to forward session change notifications to modules in the initialization order, throwing an unrecoverable error if the notification is received after initialization. Session change is the point at which the [Configuration Module](configuration.md) updates the configuration. Most of the other modules will handle changes in the configuration during their session change operation, so the initializer should provide both the old and new configuration to all the other +So the other role of the initializer module is to forward session change notifications to modules in the initialization order. Session change is also the point at which the [Configuration Module](configuration.md) updates the configuration. Most of the other modules will handle changes in the configuration during their session change operation, so the initializer should provide both the old and new configuration to all the other modules alongside the session change notification. This means that a session change notification should consist of the following data: ```rust @@ -53,5 +49,4 @@ struct SessionChangeNotification { } ``` -> REVIEW: other options? arguments in favor of going for options 1 or 3 instead of 2. we could do a "soft" version of 2 where we note that the chain is potentially broken due to bad initialization order > TODO Diagram: order of runtime operations (initialization, session change) diff --git a/roadmap/implementors-guide/src/runtime/configuration.md b/roadmap/implementers-guide/src/runtime/configuration.md similarity index 100% rename from roadmap/implementors-guide/src/runtime/configuration.md rename to roadmap/implementers-guide/src/runtime/configuration.md diff --git a/roadmap/implementors-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md similarity index 69% rename from roadmap/implementors-guide/src/runtime/inclusion.md rename to roadmap/implementers-guide/src/runtime/inclusion.md index d201e3ed51b29a5cf379b2837894b1276ebb9083..17dbdc94cc022834a326ec4c48057ee93cc13d82 100644 --- a/roadmap/implementors-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -14,7 +14,7 @@ struct AvailabilityBitfield { struct CandidatePendingAvailability { core: CoreIndex, // availability core - receipt: CandidateReceipt, + descriptor: CandidateDescriptor, availability_votes: Bitfield, // one bit per validator. relay_parent_number: BlockNumber, // number of the relay-parent. backed_in_number: BlockNumber, @@ -62,26 +62,37 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. check that each candidate corresponds to a scheduled core and that they are ordered in the same order the cores appear in assignments in `scheduled`. 1. check that `scheduled` is sorted ascending by `CoreIndex`, without duplicates. 1. check that there is no candidate pending availability for any scheduled `ParaId`. + 1. check that each candidate's `validation_data_hash` corresponds to a `PersistedValidationData` computed from the current state. + > NOTE: With contextual execution in place, validation data will be obtained as of the state of the context block. However, only the state of the current block can be used for such a query. 1. If the core assignment includes a specific collator, ensure the backed candidate is issued by that collator. 1. Ensure that any code upgrade scheduled by the candidate does not happen within `config.validation_upgrade_frequency` of `Paras::last_code_upgrade(para_id, true)`, if any, comparing against the value of `Paras::FutureCodeUpgrades` for the given para ID. 1. Check the collator's signature on the candidate data. - 1. Transform each [`CommittedCandidateReceipt`](../types/candidate.md#committed-candidate-receipt) into the corresponding [`CandidateReceipt`](../types/candidate.md#candidate-receipt), setting the commitments aside. 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup. - 1. check that the upward messages, when combined with the existing queue size, are not exceeding `config.max_upward_queue_count` and `config.watermark_upward_queue_size` parameters. + 1. call `Router::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. + 1. call `Router::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. + 1. call `Router::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. + 1. check that in the commitments of each candidate the horizontal messages are sorted by ascending recipient ParaId and there is no two horizontal messages have the same recipient. + 1. using `Router::verify_outbound_hrmp(sender, commitments.horizontal_messages)` ensure that the each candidate send a valid set of horizontal messages 1. create an entry in the `PendingAvailability` map for each backed candidate with a blank `availability_votes` bitfield. 1. create a corresponding entry in the `PendingAvailabilityCommitments` with the commitments. 1. Return a `Vec` of all scheduled cores of the list of passed assignments that a candidate was successfully backed for, sorted ascending by CoreIndex. * `enact_candidate(relay_parent_number: BlockNumber, CommittedCandidateReceipt)`: 1. If the receipt contains a code upgrade, Call `Paras::schedule_code_upgrade(para_id, code, relay_parent_number + config.validationl_upgrade_delay)`. > TODO: Note that this is safe as long as we never enact candidates where the relay parent is across a session boundary. In that case, which we should be careful to avoid with contextual execution, the configuration might have changed and the para may de-sync from the host's understanding of it. - 1. call `Router::queue_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). + 1. call `Router::enact_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). + 1. call `Router::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, + 1. call `Router::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. + 1. call `Router::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. 1. Call `Paras::note_new_head` using the `HeadData` from the receipt and `relay_parent_number`. * `collect_pending`: ```rust fn collect_pending(f: impl Fn(CoreIndex, BlockNumber) -> bool) -> Vec { // sweep through all paras pending availability. if the predicate returns true, when given the core index and - // the block number the candidate has been pending availability since, then clean up the corresponding storage for that candidate. + // the block number the candidate has been pending availability since, then clean up the corresponding storage for that candidate and the commitments. // return a vector of cleaned-up core IDs. } ``` +* `force_enact(ParaId)`: Forcibly enact the candidate with the given ID as though it had been deemed available by bitfields. Is a no-op if there is no candidate pending availability for this para-id. This should generally not be used but it is useful during execution of Runtime APIs, where the changes to the state are expected to be discarded directly after. +* `candidate_pending_availability(ParaId) -> Option`: returns the `CommittedCandidateReceipt` pending availability for the para provided, if any. +* `pending_availability(ParaId) -> Option`: returns the metadata around the candidate pending availability for the para, if any. diff --git a/roadmap/implementors-guide/src/runtime/inclusioninherent.md b/roadmap/implementers-guide/src/runtime/inclusioninherent.md similarity index 94% rename from roadmap/implementors-guide/src/runtime/inclusioninherent.md rename to roadmap/implementers-guide/src/runtime/inclusioninherent.md index c9f1b2105f7efd760d1d20d6ba923ba56c586c25..d026cf180aa5217f929468cc4e1042c11be60b89 100644 --- a/roadmap/implementors-guide/src/runtime/inclusioninherent.md +++ b/roadmap/implementers-guide/src/runtime/inclusioninherent.md @@ -22,4 +22,5 @@ Included: Option<()>, 1. Invoke `Scheduler::schedule(freed)` 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. 1. Call `Scheduler::occupied` using the return value of the `Inclusion::process_candidates` call above, first sorting the list of assigned core indices. + 1. Call the `Router::process_upward_dispatchables` routine to execute all messages in upward dispatch queues. 1. If all of the above succeeds, set `Included` to `Some(())`. diff --git a/roadmap/implementers-guide/src/runtime/initializer.md b/roadmap/implementers-guide/src/runtime/initializer.md new file mode 100644 index 0000000000000000000000000000000000000000..5fd2bc3bd60f2471f04db948c70fe4b63b185b73 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/initializer.md @@ -0,0 +1,41 @@ +# Initializer Module + +This module is responsible for initializing the other modules in a deterministic order. It also has one other purpose as described in the overview of the runtime: accepting and forwarding session change notifications. + +## Storage + +```rust +HasInitialized: bool; +// buffered session changes along with the block number at which they should be applied. +// +// typically this will be empty or one element long. ordered ascending by BlockNumber and insertion +// order. +BufferedSessionChanges: Vec<(BlockNumber, ValidatorSet, ValidatorSet)>; +``` + +## Initialization + +Before initializing modules, remove all changes from the `BufferedSessionChanges` with number less than or equal to the current block number, and apply the last one. The session change is applied to all modules in the same order as initialization. + +The other parachains modules are initialized in this order: + +1. Configuration +1. Paras +1. Scheduler +1. Inclusion +1. Validity. +1. Router. + +The [Configuration Module](configuration.md) is first, since all other modules need to operate under the same configuration as each other. It would lead to inconsistency if, for example, the scheduler ran first and then the configuration was updated before the Inclusion module. + +Set `HasInitialized` to true. + +## Session Change + +Store the session change information in `BufferedSessionChange` along with the block number at which it was submitted, plus one. Although the expected operational parameters of the block authorship system should prevent more than one change from being buffered at any time, it may occur. Regardless, we always need to track the block number at which the session change can be applied so as to remain flexible over session change notifications being issued before or after initialization of the current block. + +## Finalization + +Finalization order is less important in this case than initialization order, so we finalize the modules in the reverse order from initialization. + +Set `HasInitialized` to false. diff --git a/roadmap/implementors-guide/src/runtime/paras.md b/roadmap/implementers-guide/src/runtime/paras.md similarity index 90% rename from roadmap/implementors-guide/src/runtime/paras.md rename to roadmap/implementers-guide/src/runtime/paras.md index e80c2d102d2f1b90da20ffc04cd9b1e2e3916250..dbb169af17514a530e50cbe12bb99733c4ca394f 100644 --- a/roadmap/implementors-guide/src/runtime/paras.md +++ b/roadmap/implementers-guide/src/runtime/paras.md @@ -93,7 +93,8 @@ OutgoingParas: Vec; ## Session Change -1. Clean up outgoing paras. This means removing the entries under `Heads`, `ValidationCode`, `FutureCodeUpgrades`, and `FutureCode`. An according entry should be added to `PastCode`, `PastCodeMeta`, and `PastCodePruning` using the outgoing `ParaId` and removed `ValidationCode` value. This is because any outdated validation code must remain available on-chain for a determined amount of blocks, and validation code outdated by de-registering the para is still subject to that invariant. +1. Clean up outgoing paras. + 1. This means removing the entries under `Heads`, `ValidationCode`, `FutureCodeUpgrades`, and `FutureCode`. An according entry should be added to `PastCode`, `PastCodeMeta`, and `PastCodePruning` using the outgoing `ParaId` and removed `ValidationCode` value. This is because any outdated validation code must remain available on-chain for a determined amount of blocks, and validation code outdated by de-registering the para is still subject to that invariant. 1. Apply all incoming paras by initializing the `Heads` and `ValidationCode` using the genesis parameters. 1. Amend the `Parachains` list to reflect changes in registered parachains. 1. Amend the `Parathreads` set to reflect changes in registered parathreads. @@ -112,6 +113,7 @@ OutgoingParas: Vec; * `is_parathread(ParaId) -> bool`: Returns true if the para ID references any live parathread. * `last_code_upgrade(id: ParaId, include_future: bool) -> Option`: The block number of the last scheduled upgrade of the requested para. Includes future upgrades if the flag is set. This is the `expected_at` number, not the `activated_at` number. +* `persisted_validation_data(id: ParaId) -> Option`: Get the PersistedValidationData of the given para, assuming the context is the parent block. Returns `None` if the para is not known. ## Finalization diff --git a/roadmap/implementers-guide/src/runtime/router.md b/roadmap/implementers-guide/src/runtime/router.md new file mode 100644 index 0000000000000000000000000000000000000000..f16038319392af66b728ad36c1a2b0f5a4093e47 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/router.md @@ -0,0 +1,271 @@ +# Router Module + +The Router module is responsible for all messaging mechanisms supported between paras and the relay chain, specifically: UMP, DMP, HRMP and later XCMP. + +## Storage + +Storage layout: + +```rust,ignore +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +/// Dispatchable objects ready to be dispatched onto the relay chain. The messages are processed in FIFO order. +/// This is subject to `max_upward_queue_count` and +/// `watermark_queue_size` from `HostConfiguration`. +RelayDispatchQueues: map ParaId => Vec; +/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. +/// First item in the tuple is the count of messages and second +/// is the total length (in bytes) of the message payloads. +RelayDispatchQueueSize: map ParaId => (u32, u32); +/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. +NeedsDispatch: Vec; +/// This is the para that gets will get dispatched first during the next upward dispatchable queue +/// execution round. +NextDispatchRoundStartWith: Option; +/// The downward messages addressed for a certain para. +DownwardMessageQueues: map ParaId => Vec; +``` + +### HRMP + +HRMP related structs: + +```rust,ignore +/// A description of a request to open an HRMP channel. +struct HrmpOpenChannelRequest { + /// Indicates if this request was confirmed by the recipient. + confirmed: bool, + /// How many session boundaries ago this request was seen. + age: SessionIndex, + /// The amount that the sender supplied at the time of creation of this request. + sender_deposit: Balance, + /// The maximum number of messages that can be pending in the channel at once. + limit_used_places: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + limit_used_bytes: u32, +} + +/// A metadata of an HRMP channel. +struct HrmpChannel { + /// The amount that the sender supplied as a deposit when opening this channel. + sender_deposit: Balance, + /// The amount that the recipient supplied as a deposit when accepting opening this channel. + recipient_deposit: Balance, + /// The maximum number of messages that can be pending in the channel at once. + limit_used_places: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + limit_used_bytes: u32, + /// The current number of messages pending in the channel. + /// Invariant: should be less or equal to `limit_used_places`. + used_places: u32, + /// The total size in bytes of all message payloads in the channel. + /// Invariant: should be less or equal to `limit_used_bytes`. + used_bytes: u32, + /// A head of the Message Queue Chain for this channel. Each link in this chain has a form: + /// `(prev_head, B, H(M))`, where + /// - `prev_head`: is the previous value of `mqc_head`. + /// - `B`: is the [relay-chain] block number in which a message was appended + /// - `H(M)`: is the hash of the message being appended. + /// This value is initialized to a special value that consists of all zeroes which indicates + /// that no messages were previously added. + mqc_head: Hash, +} +``` +HRMP related storage layout + +```rust,ignore +/// The set of pending HRMP open channel requests. +/// +/// The set is accompanied by a list for iteration. +/// +/// Invariant: +/// - There are no channels that exists in list but not in the set and vice versa. +HrmpOpenChannelRequests: map HrmpChannelId => Option; +HrmpOpenChannelRequestsList: Vec; + +/// This mapping tracks how many open channel requests are inititated by a given sender para. +/// Invariant: `HrmpOpenChannelRequestsList` should contain the same number of items that has `(X, _)` +/// as the number of `HrmpOpenChannelRequestCount` for `X`. +HrmpOpenChannelRequestCount: map ParaId => u32; + +/// A set of pending HRMP close channel requests that are going to be closed during the session change. +/// Used for checking if a given channel is registered for closure. +/// +/// The set is accompanied by a list for iteration. +/// +/// Invariant: +/// - There are no channels that exists in list but not in the set and vice versa. +HrmpCloseChannelRequests: map HrmpChannelId => Option<()>; +HrmpCloseChannelRequestsList: Vec; + +/// The HRMP watermark associated with each para. +HrmpWatermarks: map ParaId => Option; +/// HRMP channel data associated with each para. +HrmpChannels: map HrmpChannelId => Option; +/// The indexes that map all senders to their recievers and vise versa. +/// Invariants: +/// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` as `(I, P)`. +/// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` as `(P, E)`. +/// - there should be no other dangling channels in `HrmpChannels`. +HrmpIngressChannelsIndex: map ParaId => Vec; +HrmpEgressChannelsIndex: map ParaId => Vec; +/// Storage for the messages for each channel. +/// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. +HrmpChannelContents: map HrmpChannelId => Vec; +/// Maintains a mapping that can be used to answer the question: +/// What paras sent a message at the given block number for a given reciever. +HrmpChannelDigests: map ParaId => Vec<(BlockNumber, Vec)>; +``` + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_upward_messages(P: ParaId, Vec`: + 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages. + 1. Checks each upward message `M` individually depending on its kind: + 1. If the message kind is `Dispatchable`: + 1. Verify that `RelayDispatchQueueSize` for `P` has enough capacity for the message (NOTE that should include all processed + upward messages of the `Dispatchable` kind up to this point!) + 1. If the message kind is `HrmpInitOpenChannel(recipient)`: + 1. Check that the `P` is not `recipient`. + 1. Check that `recipient` is a valid para. + 1. Check that there is no existing open channel request (`P`, `recipient`) in `HrmpOpenChannelRequests`. + 1. Check that the sum of the number of already opened HRMP channels by the `sender` (the size + of the set found `HrmpEgressChannelsIndex` for `sender`) and the number of open requests by the + `sender` (the value from `HrmpOpenChannelRequestCount` for `sender`) doesn't exceed the limit of + channels (`config.hrmp_max_parachain_outbound_channels` or `config.hrmp_max_parathread_outbound_channels`) minus 1. + 1. Check that `P`'s balance is more or equal to `config.hrmp_sender_deposit` + 1. If the message kind is `HrmpAcceptOpenChannel(sender)`: + 1. Check that there is existing request between (`sender`, `P`) in `HrmpOpenChannelRequests` + 1. Check that `P`'s balance is more or equal to `config.hrmp_recipient_deposit`. + 1. If the message kind is `HrmpCloseChannel(ch)`: + 1. Check that `P` is either `ch.sender` or `ch.recipient` + 1. Check that `HrmpChannels` for `ch` exists. + 1. Check that `ch` is not in the `HrmpCloseChannelRequests` set. +* `check_processed_downward_messages(P: ParaId, processed_downward_messages)`: + 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. + 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. +* `check_hrmp_watermark(P: ParaId, new_hrmp_watermark)`: + 1. `new_hrmp_watermark` should be strictly greater than the value of `HrmpWatermarks` for `P` (if any). + 1. `new_hrmp_watermark` must not be greater than the context's block number. + 1. in `HrmpChannelDigests` for `P` an entry with the block number equal to `new_hrmp_watermark` should exist. +* `verify_outbound_hrmp(sender: ParaId, Vec)`: + 1. For each horizontal message `M` with the channel `C` identified by `(sender, M.recipient)` check: + 1. exists + 1. `M`'s payload size summed with the `C.used_bytes` doesn't exceed a preconfigured limit `C.limit_used_bytes`. + 1. `C.used_places + 1` doesn't exceed a preconfigured limit `C.limit_used_places`. + +Candidate Enactment: + +* `queue_outbound_hrmp(sender: ParaId, Vec)`: + 1. For each horizontal message `HM` with the channel `C` identified by `(sender, HM.recipient)`: + 1. Append `HM` into `HrmpChannelContents` that corresponds to `C`. + 1. Locate or create an entry in ``HrmpChannelDigests`` for `HM.recipient` and append `sender` into the entry's list. + 1. Increment `C.used_places` + 1. Increment `C.used_bytes` by `HM`'s payload size + 1. Append a new link to the MQC and save the new head in `C.mqc_head`. Note that the current block number as of enactment is used for the link. +* `prune_hrmp(recipient, new_hrmp_watermark)`: + 1. From ``HrmpChannelDigests`` for `recipient` remove all entries up to an entry with block number equal to `new_hrmp_watermark`. + 1. From the removed digests construct a set of paras that sent new messages within the interval between the old and new watermarks. + 1. For each channel `C` identified by `(sender, recipient)` for each `sender` coming from the set, prune messages up to the `new_hrmp_watermark`. + 1. For each pruned message `M` from channel `C`: + 1. Decrement `C.used_places` + 1. Decrement `C.used_bytes` by `M`'s payload size. + 1. Set `HrmpWatermarks` for `P` to be equal to `new_hrmp_watermark` +* `prune_dmq(P: ParaId, processed_downward_messages)`: + 1. Remove the first `processed_downward_messages` from the `DownwardMessageQueues` of `P`. +* `enact_upward_messages(P: ParaId, Vec)`: + 1. Process all upward messages in order depending on their kinds: + 1. If the message kind is `Dispatchable`: + 1. Append the message to `RelayDispatchQueues` for `P` + 1. Increment the size and the count in `RelayDispatchQueueSize` for `P`. + 1. Ensure that `P` is present in `NeedsDispatch`. + 1. If the message kind is `HrmpInitOpenChannel(recipient)`: + 1. Increase `HrmpOpenChannelRequestCount` by 1 for the `P`. + 1. Append `(P, recipient)` to `HrmpOpenChannelRequestsList`. + 1. Add a new entry to `HrmpOpenChannelRequests` for `(sender, recipient)` + 1. Set `sender_deposit` to `config.hrmp_sender_deposit` + 1. Set `limit_used_places` to `config.hrmp_channel_max_places` + 1. Set `limit_limit_used_bytes` to `config.hrmp_channel_max_size` + 1. Reserve the deposit for the `P` according to `config.hrmp_sender_deposit` + 1. If the message kind is `HrmpAcceptOpenChannel(sender)`: + 1. Reserve the deposit for the `P` according to `config.hrmp_recipient_deposit` + 1. For the request in `HrmpOpenChannelRequests` identified by `(sender, P)`, set `confirmed` flag to `true`. + 1. If the message kind is `HrmpCloseChannel(ch)`: + 1. Insert a new entry `Some(())` to `HrmpCloseChannelRequests` for `ch`. + 1. Append `ch` to `HrmpCloseChannelRequestsList`. + +The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. + +`schedule_para_cleanup(ParaId)`: + 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. + +The following routine is meant to execute pending entries in upward dispatchable queues. This function doesn't fail, even if +any of dispatchables return an error. + +`process_upward_dispatchables()`: + 1. Initialize a cumulative weight counter `T` to 0 + 1. Initialize a local in memory dictionary `R` that maps `ParaId` to a vector of `DispatchResult`. + 1. Iterate over items in `NeedsDispatch` cyclically, starting with `NextDispatchRoundStartWith`. If the item specified is `None` start from the beginning. For each `P` encountered: + 1. Dequeue `D` the first dispatchable `D` from `RelayDispatchQueues` for `P` + 1. Decrement the size of the message from `RelayDispatchQueueSize` for `P` + 1. Decode `D` into a dispatchable. If failed append `DispatchResult::DecodeFailed` into `R` for `P`. Otherwise, if succeeded: + 1. If `weight_of(D) > config.dispatchable_upward_message_critical_weight` then append `DispatchResult::CriticalWeightExceeded` into `R` for `P`. Otherwise: + 1. Execute `D` and add the actual amount of weight consumed to `T`. Add the `DispatchResult` into `R` for `P`. + 1. If `weight_of(D) + T > config.preferred_dispatchable_upward_messages_step_weight`, set `NextDispatchRoundStartWith` to `P` and finish processing. + > NOTE that in practice we would need to approach the weight calculation more thoroughly, i.e. incorporate all operations + > that could take place on the course of handling these dispatchables. + 1. If `RelayDispatchQueues` for `P` became empty, remove `P` from `NeedsDispatch`. + 1. If `NeedsDispatch` became empty then finish processing and set `NextDispatchRoundStartWith` to `None`. + 1. Then, for each `P` and the vector of `DispatchResult` in `R`: + 1. Obtain a message by wrapping the vector into `DownwardMessage::DispatchResult` + 1. Append the resulting message to `DownwardMessageQueues` for `P`. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list: + 1. Remove all inbound channels of `P`, i.e. `(_, P)`, + 1. Remove all outbound channels of `P`, i.e. `(P, _)`, + 1. Remove all `DownwardMessageQueues` of `P`. + 1. Remove `RelayDispatchQueueSize` of `P`. + 1. Remove `RelayDispatchQueues` of `P`. + 1. Remove `P` if it exists in `NeedsDispatch`. + 1. If `P` is in `NextDispatchRoundStartWith`, then reset it to `None` + - Note that if we don't remove the open/close requests since they are going to die out naturally at the end of the session. +1. For each channel designator `D` in `HrmpOpenChannelRequestsList` we query the request `R` from `HrmpOpenChannelRequests`: + 1. if `R.confirmed = false`: + 1. increment `R.age` by 1. + 1. if `R.age` reached a preconfigured time-to-live limit `config.hrmp_open_request_ttl`, then: + 1. refund `R.sender_deposit` to the sender + 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. + 1. remove `R` + 1. remove `D` + 2. if `R.confirmed = true`, + 1. if both `D.sender` and `D.recipient` are not offboarded. + 1. create a new channel `C` between `(D.sender, D.recipient)`. + 1. Initialize the `C.sender_deposit` with `R.sender_deposit` and `C.recipient_deposit` + with the value found in the configuration `config.hrmp_recipient_deposit`. + 1. Insert `sender` into the set `HrmpIngressChannelsIndex` for the `recipient`. + 1. Insert `recipient` into the set `HrmpEgressChannelsIndex` for the `sender`. + 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. + 1. remove `R` + 1. remove `D` +1. For each channel designator `D` in `HrmpCloseChannelRequestsList` + 1. remove the channel identified by `D`, if exists. + 1. remove `D` from `HrmpCloseChannelRequests`. + 1. remove `D` from `HrmpCloseChannelRequestsList` + +To remove a channel `C` identified with a tuple `(sender, recipient)`: + +1. Return `C.sender_deposit` to the `sender`. +1. Return `C.recipient_deposit` to the `recipient`. +1. Remove `C` from `HrmpChannels`. +1. Remove `C` from `HrmpChannelContents`. +1. Remove `recipient` from the set `HrmpEgressChannelsIndex` for `sender`. +1. Remove `sender` from the set `HrmpIngressChannelsIndex` for `recipient`. diff --git a/roadmap/implementors-guide/src/runtime/scheduler.md b/roadmap/implementers-guide/src/runtime/scheduler.md similarity index 76% rename from roadmap/implementors-guide/src/runtime/scheduler.md rename to roadmap/implementers-guide/src/runtime/scheduler.md index 0b6a60a383ee82668f86342db7215f007fc3c399..fa78237089fa61bcd8cb89192b4bcf221739f4c9 100644 --- a/roadmap/implementors-guide/src/runtime/scheduler.md +++ b/roadmap/implementers-guide/src/runtime/scheduler.md @@ -14,66 +14,88 @@ It aims to achieve these tasks with these goals in mind: - Validator assignments should not be gameable. Malicious cartels should not be able to manipulate the scheduler to assign themselves as desired. - High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced. +## Availability Cores + The Scheduler manages resource allocation using the concept of "Availability Cores". There will be one availability core for each parachain, and a fixed number of cores used for multiplexing parathreads. Validators will be partitioned into groups, with the same number of groups as availability cores. Validator groups will be assigned to different availability cores over time. -An availability core can exist in either one of two states at the beginning or end of a block: free or occupied. A free availability core can have a parachain or parathread assigned to it for the potential to have a backed candidate included. After inclusion, the core enters the occupied state as the backed candidate is pending availability. There is an important distinction: a core is not considered occupied until it is in charge of a block pending availability, although the implementation may treat scheduled cores the same as occupied ones for brevity. A core exits the occupied state when the candidate is no longer pending availability - either on timeout or on availability. A core starting in the occupied state can move to the free state and back to occupied all within a single block, as availability bitfields are processed before backed candidates. At the end of the block, there is a possible timeout on availability which can move the core back to the free state if occupied. +An availability core can exist in either one of two states at the beginning or end of a block: free or occupied. A free availability core can have a parachain or parathread assigned to it for the potential to have a backed candidate included. After backing, the core enters the occupied state as the backed candidate is pending availability. There is an important distinction: a core is not considered occupied until it is in charge of a block pending availability, although the implementation may treat scheduled cores the same as occupied ones for brevity. A core exits the occupied state when the candidate is no longer pending availability - either on timeout or on availability. A core starting in the occupied state can move to the free state and back to occupied all within a single block, as availability bitfields are processed before backed candidates. At the end of the block, there is a possible timeout on availability which can move the core back to the free state if occupied. + +Cores are treated as an ordered list and are typically referred to by their index in that list. -```text -Availability Core State Machine +```dot process +digraph { + label = "Availability Core State Machine\n\n\n"; + labelloc = "t"; - Assignment & - Backing -+-----------+ +-----------+ -| +--------------> | -| Free | | Occupied | -| <--------------+ | -+-----------+ Availability +-----------+ - or Timeout + { rank=same vg1 vg2 } + vg1 [label = "Free" shape=rectangle] + vg2 [label = "Occupied" shape=rectangle] + + vg1 -> vg2 [label = "Assignment & Backing" ] + vg2 -> vg1 [label = "Availability or Timeout" ] +} ``` -```text -Availability Core Transitions within Block - - +-----------+ | +-----------+ - | | | | | - | Free | | | Occupied | - | | | | | - +--/-----\--+ | +--/-----\--+ - /- -\ | /- -\ - No Backing /- \ Backing | Availability /- \ No availability - /- \ | / \ - /- -\ | /- -\ - +-----v-----+ +----v------+ | +-----v-----+ +-----v-----+ - | | | | | | | | | - | Free | | Occupied | | | Free | | Occupied | - | | | | | | | | | - +-----------+ +-----------+ | +-----|---\-+ +-----|-----+ - | | \ | - | No backing | \ Backing | (no change) - | | -\ | - | +-----v-----+ \ +-----v-----+ - | | | \ | | - | | Free -----+---> Occupied | - | | | | | - | +-----------+ +-----------+ - | Availability Timeout +```dot process +digraph { + label = "Availability Core Transitions within Block\n\n\n"; + labelloc = "t"; + splines="line"; + + subgraph cluster_left { + label = ""; + labelloc = "t"; + + fr1 [label = "Free" shape=rectangle] + fr2 [label = "Free" shape=rectangle] + occ [label = "Occupied" shape=rectangle] + + fr1 -> fr2 [label = "No Backing"] + fr1 -> occ [label = "Backing"] + + { rank=same fr2 occ } + } + + subgraph cluster_right { + label = ""; + labelloc = "t"; + + occ2 [label = "Occupied" shape=rectangle] + fr3 [label = "Free" shape=rectangle] + fr4 [label = "Free" shape=rectangle] + occ3 [label = "Occupied" shape=rectangle] + occ4 [label = "Occupied" shape=rectangle] + + occ2 -> fr3 [label = "Availability"] + occ2 -> occ3 [label = "No availability"] + fr3 -> fr4 [label = "No backing"] + fr3 -> occ4 [label = "Backing"] + occ3 -> occ4 [label = "(no change)"] + occ3 -> fr3 [label = "Availability Timeout"] + + { rank=same; fr3[group=g1]; occ3[group=g2] } + { rank=same; fr4[group=g1]; occ4[group=g2] } + } +} ``` +## Validator Groups + Validator group assignments do not need to change very quickly. The security benefits of fast rotation is redundant with the challenge mechanism in the [Validity module](validity.md). Because of this, we only divide validators into groups at the beginning of the session and do not shuffle membership during the session. However, we do take steps to ensure that no particular validator group has dominance over a single parachain or parathread-multiplexer for an entire session to provide better guarantees of liveness. Validator groups rotate across availability cores in a round-robin fashion, with rotation occurring at fixed intervals. The i'th group will be assigned to the `(i+k)%n`'th core at any point in time, where `k` is the number of rotations that have occurred in the session, and `n` is the number of cores. This makes upcoming rotations within the same session predictable. When a rotation occurs, validator groups are still responsible for distributing availability chunks for any previous cores that are still occupied and pending availability. In practice, rotation and availability-timeout frequencies should be set so this will only be the core they have just been rotated from. It is possible that a validator group is rotated onto a core which is currently occupied. In this case, the validator group will have nothing to do until the previously-assigned group finishes their availability work and frees the core or the availability process times out. Depending on if the core is for a parachain or parathread, a different timeout `t` from the [`HostConfiguration`](../types/runtime.md#host-configuration) will apply. Availability timeouts should only be triggered in the first `t-1` blocks after the beginning of a rotation. +## Claims + Parathreads operate on a system of claims. Collators participate in auctions to stake a claim on authoring the next block of a parathread, although the auction mechanism is beyond the scope of the scheduler. The scheduler guarantees that they'll be given at least a certain number of attempts to author a candidate that is backed. Attempts that fail during the availability phase are not counted, since ensuring availability at that stage is the responsibility of the backing validators, not of the collator. When a claim is accepted, it is placed into a queue of claims, and each claim is assigned to a particular parathread-multiplexing core in advance. Given that the current assignments of validator groups to cores are known, and the upcoming assignments are predictable, it is possible for parathread collators to know who they should be talking to now and how they should begin establishing connections with as a fallback. With this information, the Node-side can be aware of which parathreads have a good chance of being includable within the relay-chain block and can focus any additional resources on backing candidates from those parathreads. Furthermore, Node-side code is aware of which validator group will be responsible for that thread. If the necessary conditions are reached for core reassignment, those candidates can be backed within the same block as the core being freed. Parathread claims, when scheduled onto a free core, may not result in a block pending availability. This may be due to collator error, networking timeout, or censorship by the validator group. In this case, the claims should be retried a certain number of times to give the collator a fair shot. -Cores are treated as an ordered list of cores and are typically referred to by their index in that list. - ## Storage Utility structs: @@ -131,8 +153,7 @@ Storage layout: ValidatorGroups: Vec>; /// A queue of upcoming claims and which core they should be mapped onto. ParathreadQueue: ParathreadQueue; -/// One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be -/// temporarily `Some` if scheduled but not occupied. +/// One entry for each availability core. Entries are `None` if the core is not currently occupied. /// The i'th parachain belongs to the i'th core, with the remaining cores all being /// parathread-multiplexers. AvailabilityCores: Vec>; @@ -156,7 +177,7 @@ Actions: 1. Set `configuration = Configuration::configuration()` (see [`HostConfiguration`](../types/runtime.md#host-configuration)) 1. Resize `AvailabilityCores` to have length `Paras::parachains().len() + configuration.parathread_cores with all`None` entries. 1. Compute new validator groups by shuffling using a secure randomness beacon - - We need a total of `N = Paras::parathreads().len() + configuration.parathread_cores` validator groups. + - We need a total of `N = Paras::parachains().len() + configuration.parathread_cores` validator groups. - The total number of validators `V` in the `SessionChangeNotification`'s `validators` may not be evenly divided by `V`. - First, we obtain "shuffled validators" `SV` by shuffling the validators using the `SessionChangeNotification`'s random seed. - The groups are selected by partitioning `SV`. The first V % N groups will have (V / N) + 1 members, while the remaining groups will have (V / N) members each. @@ -199,3 +220,6 @@ Actions: - `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. - `availability_timeout_predicate() -> Option bool>`: returns an optional predicate that should be used for timing out occupied cores. if `None`, no timing-out should be done. The predicate accepts the index of the core, and the block number since which it has been occupied. The predicate should be implemented based on the time since the last validator group rotation, and the respective parachain and parathread timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` of the last rotation would this return `Some`. +- `group_rotation_info() -> GroupRotationInfo`: Returns a helper for determining group rotation. +- `next_up_on_available(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it became available. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For parachains, this is always the ID of the parachain and no specified collator. For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that core, and is `None` if there isn't one. +- `next_up_on_time_out(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it timed out. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For parachains, this is always the ID of the parachain and no specified collator. For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that core, or if there isn't one, the claim that is currently occupying the core. Otherwise `None`. diff --git a/roadmap/implementors-guide/src/runtime/validity.md b/roadmap/implementers-guide/src/runtime/validity.md similarity index 62% rename from roadmap/implementors-guide/src/runtime/validity.md rename to roadmap/implementers-guide/src/runtime/validity.md index 11907cea779384cd92e0be9f2a28091b9ea32587..ee391dd78b745b2dd0d593839b2d0e7776dcf21d 100644 --- a/roadmap/implementors-guide/src/runtime/validity.md +++ b/roadmap/implementers-guide/src/runtime/validity.md @@ -17,8 +17,39 @@ We account for these requirements by having the validity module handle two kinds 1. Local disputes: those contesting the validity of the current fork by disputing a parablock included within it. 1. Remote disputes: a dispute that has partially or fully resolved on another fork which is transplanted to the local fork for completion and eventual slashing. +## Approval + +We begin approval checks upon any candidate immediately once it becomes available. + +Assigning approval checks involve VRF secret keys held by every validator, making it primarily an off-chain process. All assignment criteria require specific data called "stories" about the relay chain block in which the candidate assigned by that criteria became available. Among these criteria, the BABE VRF output provides the story for two, and the other's story consists of the candidate's block hash plus external knowledge that a relay chain equivocation exists with a conflicting candidate. + +We liberate availability cores when their candidate becomes available of course, but one approval assignment criteria continues associating each candidate with the core number it occupied when it became available. + +Assignment proceeds in loosely timed rounds called `DelayTranche`s roughly 12 times faster than block production, in which validators send assignment notices until all candidates have enough checkers assigned. Assignment tracks when approval votes arrive too and assigns more checkers if some checkers run late. + +Approval checks provide more security than backing checks, so polkadot becomes more efficient when validators perform more approval checks per backing check. If validators run 4 approval checks for every backing check, and run almost one backing check per relay chain block, then validators actually check almost 6 blocks per relay chain block. + +We should therefore reward approval checkers correctly because approval checks should actually represent our single largest workload. It follows that both assignment notices and approval votes should be tracked on-chain. + +We might track the assignments and approvals together as pairs in a simple rewards system. There are however two reasons to witness approvals on chain by tracking assignments and approvals on-chain, rewards and finality integration. + +First, an approval that arrives too slowly prompts assigning extra "no show" replacement checkers. Yet, we consider a block valid if the earlier checker completes their work, even if the extra checkers never quite finish, which complicates rewarding these extra checkers. We could support more nuanced rewards for extra checkers if assignments are placed on-chain earlier. Assignment delay tranches progress 12ish times faster than the relay chain, but no shows could still be witness by the relay chain because the no show delay takes longer than a relay chain slot. + +Second, we know off-chain when the approval process completes based upon all gossiped assignment notices, not just the approving ones. We need not-yet-approved assignment notices to appear on-chain if the chain should know about the validity of recently approved blocks. Relay chain blocks become eligible for finality in GRANDPA only once all their included candidates pass approvals checks, meaning all assigned checkers either voted approve or else were declared "no show" and replaced by more assigned checkers. A purely off-chain approvals scheme complicates GRANDPA with additional objections logic. + +Integration with GRANDPA appears simplest if we witness approvals in chain: Aside from inherents for assignment notices and approval votes, we provide an "Approved" inherent by which a relay chain block declares a past relay chain block approved. In other words, it trigger the on-chain approval counting logic in a relay chain block `R1` to rerun the assignment and approval tracker logic for some ancestor `R0`, which then declares `R0` approved. In this case, we could integrate with GRANDPA by gossiping messages that list the descendent `R1`, but then map this into the approved ancestor `R0` for GRANDPA itself. + +Approval votes could be recorded on-chain quickly because they represent a major commitments. + +Assignment notices should be recorded on-chain only when relevant. Any sent too early are retained but ignore until relevant by our off-chain assignment system. Assignments are ignored completely by the dispute system because any dispute immediately escalates into all validators checking, but disputes count existing approval votes of course. + + ## Local Disputes +There is little overlap between the approval system and the disputes systems since disputes cares only that two validators disagree. We do however require that disputes count validity votes from elsewhere, both the backing votes and the approval votes. + +We could approve, and even finalize, a relay chain block which then later disputes due to claims of some parachain being invalid. + > TODO: store all included candidate and attestations on them here. accept additional backing after the fact. accept reports based on VRF. candidate included in session S should only be reported on by validator keys from session S. trigger slashing. probably only slash for session S even if the report was submitted in session S+k because it is hard to unify identity One first question is to ask why different logic for local disputes is necessary. It seems that local disputes are necessary in order to create the first escalation that leads to block producers abandoning the chain and making remote disputes possible. @@ -29,11 +60,9 @@ For each such parablock, it is guaranteed by the inclusion pipeline that the par Disputes may occur against blocks that have happened in the session prior to the current one, from the perspective of the chain. In this case, the prior validator set is responsible for handling the dispute and to do so with their keys from the last session. This means that validator duty actually extends 1 session beyond leaving the validator set. -Validators self-select based on the BABE VRF output included by the block author in the block that the candidate became available. - -> TODO: some more details from Jeff's paper. +... -After enough validators have self-selected, the quorum will be clear and validators on the wrong side will be slashed. After concluding, the dispute will remain open for some time in order to collect further evidence of misbehaving validators, and then issue a signal in the header-chain that this fork should be abandoned along with the hash of the last ancestor before inclusion, which the chain should be reverted to, along with information about the invalid block that should be used to blacklist it from being included. +After concluding with enough validtors voting, the dispute will remain open for some time in order to collect further evidence of misbehaving validators, and then issue a signal in the header-chain that this fork should be abandoned along with the hash of the last ancestor before inclusion, which the chain should be reverted to, along with information about the invalid block that should be used to blacklist it from being included. ## Remote Disputes diff --git a/roadmap/implementors-guide/src/types/README.md b/roadmap/implementers-guide/src/types/README.md similarity index 100% rename from roadmap/implementors-guide/src/types/README.md rename to roadmap/implementers-guide/src/types/README.md diff --git a/roadmap/implementers-guide/src/types/availability.md b/roadmap/implementers-guide/src/types/availability.md new file mode 100644 index 0000000000000000000000000000000000000000..0117b174e645072b3553b6bc77a2f4b3ccc63f62 --- /dev/null +++ b/roadmap/implementers-guide/src/types/availability.md @@ -0,0 +1,65 @@ +# Availability + +One of the key roles of validators is to ensure availability of all data necessary to validate +candidates for the duration of a challenge period. This is done via an erasure-coding of the data to keep available. + +## Signed Availability Bitfield + +A bitfield [signed](backing.md#signed-wrapper) by a particular validator about the availability of pending candidates. + + +```rust +type SignedAvailabilityBitfield = Signed; + +struct Bitfields(Vec<(SignedAvailabilityBitfield)>), // bitfields sorted by validator index, ascending +``` + +### Semantics + +A `SignedAvailabilityBitfield` represents the view from a particular validator's perspective. Each bit in the bitfield corresponds to a single [availability core](../runtime-api/availability-cores.md). A `1` bit indicates that the validator believes the following statements to be true for a core: + +- the availability core is occupied +- there exists a [`CommittedCandidateReceipt`](candidate.html#committed-candidate-receipt) corresponding to that core. In other words, that para has a block in progress. +- the validator's [Availability Store](../node/utility/availability-store.md) contains a chunk of that parablock's PoV. + +In other words, it is the transpose of [`OccupiedCore::availability`](../runtime-api/availability-cores.md). + +## Proof-of-Validity + +Often referred to as PoV, this is a type-safe wrapper around bytes (`Vec`) when referring to data that acts as a stateless-client proof of validity of a candidate, when used as input to the validation function of the para. + +```rust +struct PoV(Vec); +``` + + +## Available Data + +This is the data we want to keep available for each [candidate](candidate.md) included in the relay chain. This is the PoV of the block, as well as the [`PersistedValidationData`](candidate.md#persistedvalidationdata) + +```rust +struct AvailableData { + /// The Proof-of-Validation of the candidate. + pov: PoV, + /// The persisted validation data used to check the candidate. + validation_data: PersistedValidationData, +} +``` + +> TODO: With XCMP, we also need to keep available the outgoing messages as a result of para-validation. + +## Erasure Chunk + +The [`AvailableData`](#availabledata) is split up into an erasure-coding as part of the availability process. Each validator gets a chunk. This describes one of those chunks, along with its proof against a merkle root hash, which should be apparent from context, and is the `erasure_root` field of a [`CandidateDescriptor`](candidate.md#candidatedescriptor). + + +```rust +struct ErasureChunk { + /// The erasure-encoded chunk of data belonging to the candidate block. + chunk: Vec, + /// The index of this erasure-encoded chunk of data. + index: u32, + /// Proof for this chunk's branch in the Merkle tree. + proof: Vec>, +} +``` diff --git a/roadmap/implementors-guide/src/types/backing.md b/roadmap/implementers-guide/src/types/backing.md similarity index 100% rename from roadmap/implementors-guide/src/types/backing.md rename to roadmap/implementers-guide/src/types/backing.md diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md new file mode 100644 index 0000000000000000000000000000000000000000..70191af973a8aa5b2eea043cea5099667ff323f7 --- /dev/null +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -0,0 +1,237 @@ +# Candidate Types + +Para candidates are some of the most common types, both within the runtime and on the Node-side. +Candidates are the fundamental datatype for advancing parachains and parathreads, encapsulating the collator's signature, the context of the parablock, the commitments to the output, and a commitment to the data which proves it valid. + +In a way, this entire guide is about these candidates: how they are scheduled, constructed, backed, included, and challenged. + +This section will describe the base candidate type, its components, and variants that contain extra data. + +## Para Id + +A unique 32-bit identifier referring to a specific para (chain or thread). The relay-chain runtime guarantees that `ParaId`s are unique for the duration of any session, but recycling and reuse over a longer period of time is permitted. + +```rust +struct ParaId(u32); +``` + +## Candidate Receipt + +Much info in a [`FullCandidateReceipt`](#full-candidate-receipt) is duplicated from the relay-chain state. When the corresponding relay-chain state is considered widely available, the Candidate Receipt should be favored over the `FullCandidateReceipt`. + +Examples of situations where the state is readily available includes within the scope of work done by subsystems working on a given relay-parent, or within the logic of the runtime importing a backed candidate. + +```rust +/// A candidate-receipt. +struct CandidateReceipt { + /// The descriptor of the candidate. + descriptor: CandidateDescriptor, + /// The hash of the encoded commitments made as a result of candidate execution. + commitments_hash: Hash, +} +``` + +## Full Candidate Receipt + +This is the full receipt type. The `ValidationData` are technically redundant with the `inner.relay_parent`, which uniquely describes the block in the blockchain from whose state these values are derived. The [`CandidateReceipt`](#candidate-receipt) variant is often used instead for this reason. + +However, the Full Candidate Receipt type is useful as a means of avoiding the implicit dependency on availability of old blockchain state. In situations such as availability and approval, having the full description of the candidate within a self-contained struct is convenient. + +```rust +/// All data pertaining to the execution of a para candidate. +struct FullCandidateReceipt { + inner: CandidateReceipt, + validation_data: ValidationData, +} +``` + +## Committed Candidate Receipt + +This is a variant of the candidate receipt which includes the commitments of the candidate receipt alongside the descriptor. This should be favored over the [`Candidate Receipt`](#candidate-receipt) in situations where the candidate is not going to be executed but the actual data committed to is important. This is often the case in the backing phase. + +The hash of the committed candidate receipt will be the same as the corresponding [`Candidate Receipt`](#candidate-receipt), because it is computed by first hashing the encoding of the commitments to form a plain [`Candidate Receipt`](#candidate-receipt). + +```rust +/// A candidate-receipt with commitments directly included. +struct CommittedCandidateReceipt { + /// The descriptor of the candidate. + descriptor: CandidateDescriptor, + /// The commitments of the candidate receipt. + commitments: CandidateCommitments, +} +``` + +## Candidate Descriptor + +This struct is pure description of the candidate, in a lightweight format. + +```rust +/// A unique descriptor of the candidate receipt. +struct CandidateDescriptor { + /// The ID of the para this is a candidate for. + para_id: ParaId, + /// The hash of the relay-chain block this is executed in the context of. + relay_parent: Hash, + /// The collator's sr25519 public key. + collator: CollatorId, + /// The blake2-256 hash of the persisted validation data. These are extra parameters + /// derived from relay-chain state that influence the validity of the block which + /// must also be kept available for secondary checkers. + persisted_validation_data_hash: Hash, + /// The blake2-256 hash of the pov-block. + pov_hash: Hash, + /// Signature on blake2-256 of components of this receipt: + /// The parachain index, the relay parent, the validation data hash, and the pov_hash. + signature: CollatorSignature, +} +``` + +## ValidationData + +The validation data provide information about how to validate both the inputs and outputs of a candidate. There are two types of validation data: [persisted](#persistedvalidationdata) and [transient](#transientvalidationdata). Their respective sections of the guide elaborate on their functionality in more detail. + +This information is derived from the chain state and will vary from para to para, although some of the fields may be the same for every para. + +Persisted validation data are generally derived from some relay-chain state to form inputs to the validation function, and as such need to be persisted by the availability system to avoid dependence on availability of the relay-chain state. The backing phase of the inclusion pipeline ensures that everything that is included in a valid fork of the relay-chain already adheres to the transient constraints. + +The validation data also serve the purpose of giving collators a means of ensuring that their produced candidate and the commitments submitted to the relay-chain alongside it will pass the checks done by the relay-chain when backing, and give validators the same understanding when determining whether to second or attest to a candidate. + +Since the commitments of the validation function are checked by the relay-chain, secondary checkers can rely on the invariant that the relay-chain only includes para-blocks for which these checks have already been done. As such, there is no need for the validation data used to inform validators and collators about the checks the relay-chain will perform to be persisted by the availability system. Nevertheless, we expose it so the backing validators can validate the outputs of a candidate before voting to submit it to the relay-chain and so collators can collate candidates that satisfy the criteria implied these transient validation data. + +Design-wise we should maintain two properties about this data structure: + +1. The `ValidationData` should be relatively lightweight primarly because it is constructed during inclusion for each candidate. +1. To make contextual execution possible, `ValidationData` should be constructable only having access to the latest relay-chain state for the past `k` blocks. That implies +either that the relay-chain should maintain all the required data accessible or somehow provided indirectly with a header-chain proof and a state proof from there. + +```rust +struct ValidationData { + persisted: PersistedValidationData, + transient: TransientValidationData, +} +``` + +## PersistedValidationData + +Validation data that needs to be persisted for secondary checkers. See the section on [`ValidationData`](#validationdata) for more details. + +```rust +struct PersistedValidationData { + /// The parent head-data. + parent_head: HeadData, + /// The relay-chain block number this is in the context of. This informs the collator. + block_number: BlockNumber, + /// The relay-chain + /// The list of MQC heads for the inbound channels paired with the sender para ids. This + /// vector is sorted ascending by the para id and doesn't contain multiple entries with the same + /// sender. + hrmp_mqc_heads: Vec<(ParaId, Hash)>, +} +``` + +## TransientValidationData + +These validation data are derived from some relay-chain state to check outputs of the validation function. + +```rust +struct TransientValidationData { + /// The maximum code size permitted, in bytes, of a produced validation code upgrade. + /// + /// This informs a relay-chain backing check and the parachain logic. + max_code_size: u32, + /// The maximum head-data size permitted, in bytes. + /// + /// This informs a relay-chain backing check and the parachain collator. + max_head_data_size: u32, + /// The balance of the parachain at the moment of validation. + balance: Balance, + /// Whether the parachain is allowed to upgrade its validation code. + /// + /// This is `Some` if so, and contains the number of the minimum relay-chain + /// height at which the upgrade will be applied, if an upgrade is signaled + /// now. + /// + /// A parachain should enact its side of the upgrade at the end of the first + /// parablock executing in the context of a relay-chain block with at least this + /// height. This may be equal to the current perceived relay-chain block height, in + /// which case the code upgrade should be applied at the end of the signaling + /// block. + /// + /// This informs a relay-chain backing check and the parachain logic. + code_upgrade_allowed: Option, +} +``` + +## HeadData + +Head data is a type-safe abstraction around bytes (`Vec`) for the purposes of representing heads of parachains or parathreads. + +```rust +struct HeadData(Vec); +``` + +## Candidate Commitments + +The execution and validation of parachain or parathread candidates produces a number of values which either must be committed to on the relay chain or committed to the state of the relay chain. + +```rust +/// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Default))] +struct CandidateCommitments { + /// Fees paid from the chain to the relay chain validators. + fees: Balance, + /// Messages directed to other paras routed via the relay chain. + horizontal_messages: Vec, + /// Messages destined to be interpreted by the Relay chain itself. + upward_messages: Vec, + /// The root of a block's erasure encoding Merkle tree. + erasure_root: Hash, + /// New validation code. + new_validation_code: Option, + /// The head-data produced as a result of execution. + head_data: HeadData, + /// The number of messages processed from the DMQ. + processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + hrmp_watermark: BlockNumber, +} +``` + +## Signing Context + +This struct provides context to signatures by combining with various payloads to localize the signature to a particular session index and relay-chain hash. Having these fields included in the signature makes misbehavior attribution much simpler. + +```rust +struct SigningContext { + /// The relay-chain block hash this signature is in the context of. + parent_hash: Hash, + /// The session index this signature is in the context of. + session_index: SessionIndex, +} +``` + +## Validation Outputs + +This struct encapsulates the outputs of candidate validation. + +```rust +struct ValidationOutputs { + /// The head-data produced by validation. + head_data: HeadData, + /// The validation data, persisted. + validation_data: PersistedValidationData, + /// Messages directed to other paras routed via the relay chain. + horizontal_messages: Vec, + /// Upwards messages to the relay chain. + upwards_messages: Vec, + /// Fees paid to the validators of the relay-chain. + fees: Balance, + /// The new validation code submitted by the execution, if any. + new_validation_code: Option, + /// The number of messages processed from the DMQ. + processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + hrmp_watermark: BlockNumber, +} +``` diff --git a/roadmap/implementors-guide/src/types/chain.md b/roadmap/implementers-guide/src/types/chain.md similarity index 100% rename from roadmap/implementors-guide/src/types/chain.md rename to roadmap/implementers-guide/src/types/chain.md diff --git a/roadmap/implementers-guide/src/types/messages.md b/roadmap/implementers-guide/src/types/messages.md new file mode 100644 index 0000000000000000000000000000000000000000..315923e3117c28b9d5f6b26282f32fee8f28581a --- /dev/null +++ b/roadmap/implementers-guide/src/types/messages.md @@ -0,0 +1,126 @@ +# Message types + +Types of messages that are passed between parachains and the relay chain: UMP, DMP, XCMP. + +There is also HRMP (Horizontally Relay-routed Message Passing) which provides the same functionality +although with smaller scalability potential. + +## HrmpChannelId + +A type that uniquely identifies a HRMP channel. A HRMP channel is established between two paras. +In text, we use the notation `(A, B)` to specify a channel between A and B. The channels are +unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The convention is +that we use the first item tuple for the sender and the second for the recipient. Only one channel +is allowed between two participants in one direction, i.e. there cannot be 2 different channels +identified by `(A, B)`. + +```rust,ignore +struct HrmpChannelId { + sender: ParaId, + recipient: ParaId, +} +``` + +## Upward Message + +A type of messages dispatched from a parachain to the relay chain. + +```rust,ignore +enum ParachainDispatchOrigin { + /// As a simple `Origin::Signed`, using `ParaId::account_id` as its value. This is good when + /// interacting with standard modules such as `balances`. + Signed, + /// As the special `Origin::Parachain(ParaId)`. This is good when interacting with parachain- + /// aware modules which need to succinctly verify that the origin is a parachain. + Parachain, + /// As the simple, superuser `Origin::Root`. This can only be done on specially permissioned + /// parachains. + Root, +} + +/// An opaque byte buffer that encodes an entrypoint and the arguments that should be +/// provided to it upon the dispatch. +/// +/// NOTE In order to be executable the byte buffer should be decoded which potentially can fail if +/// the encoding was changed. +type RawDispatchable = Vec; + +enum UpwardMessage { + /// This upward message is meant to schedule execution of a provided dispatchable. + Dispatchable { + /// The origin with which the dispatchable should be executed. + origin: ParachainDispatchOrigin, + /// The dispatchable to be executed in its raw form. + dispatchable: RawDispatchable, + }, + /// A message for initiation of opening a new HRMP channel between the origin para and the + /// given `recipient`. + /// + /// Let `origin` be the parachain that sent this upward message. In that case the channel + /// to be opened is (`origin` -> `recipient`). + HrmpInitOpenChannel(ParaId), + /// A message that is meant to confirm the HRMP open channel request initiated earlier by the + /// `HrmpInitOpenChannel` by the given `sender`. + /// + /// Let `origin` be the parachain that sent this upward message. In that case the channel + /// (`origin` -> `sender`) will be opened during the session change. + HrmpAcceptOpenChannel(ParaId), + /// A message for closing the specified existing channel `ch`. + /// + /// The channel to be closed is `(ch.sender -> ch.recipient)`. The parachain that sent this + /// upward message must be either `ch.sender` or `ch.recipient`. + HrmpCloseChannel(HrmpChannelId), +} +``` + +## Horizontal Message + +This is a message sent from a parachain to another parachain that travels through the relay chain. +This message ends up in the recipient's mailbox. A size of a horizontal message is defined by its +`data` payload. + +```rust,ignore +struct OutboundHrmpMessage { + /// The para that will get this message in its downward message queue. + pub recipient: ParaId, + /// The message payload. + pub data: Vec, +} + +struct InboundHrmpMessage { + pub sent_at: BlockNumber, + /// The message payload. + pub data: Vec, +} +``` + +## Downward Message + +`DownwardMessage`- is a message that goes down from the relay chain to a parachain. Such a message +could be seen as a notification, however, it is conceivable that they might be used by the relay +chain to send a request to the parachain (likely, through the `ParachainSpecific` variant). + +```rust,ignore +enum DispatchResult { + Executed { + success: bool, + }, + /// Decoding `RawDispatchable` into an executable runtime representation has failed. + DecodeFailed, + /// A dispatchable in question exceeded the maximum amount of weight allowed. + CriticalWeightExceeded, +} + +enum DownwardMessage { + /// The parachain receives a dispatch result for each sent dispatchable upward message in order + /// they were sent. + DispatchResult(Vec), + /// Some funds were transferred into the parachain's account. The hash is the identifier that + /// was given with the transfer. + TransferInto(AccountId, Balance, Remark), + /// An opaque message which interpretation is up to the recipient para. This variant ought + /// to be used as a basis for special protocols between the relay chain and, typically system, + /// paras. + ParachainSpecific(Vec), +} +``` diff --git a/roadmap/implementers-guide/src/types/network.md b/roadmap/implementers-guide/src/types/network.md new file mode 100644 index 0000000000000000000000000000000000000000..75f251613f2524361380f97e99de39178c37c334 --- /dev/null +++ b/roadmap/implementers-guide/src/types/network.md @@ -0,0 +1,119 @@ +# Network Types + +These types are those that are actually sent over the network to subsystems. + +## Universal Types + +```rust +type RequestId = u64; +type ProtocolVersion = u32; +struct PeerId(...); // opaque, unique identifier of a peer. +struct View(Vec); // Up to `N` (5?) chain heads. + +enum ObservedRole { + Full, + Light, +} +``` + +## V1 Network Subsystem Message Types + +### Availability Distribution V1 + +```rust +enum AvailabilityDistributionV1Message { + /// An erasure chunk for a given candidate hash. + Chunk(Hash, ErasureChunk), +} +``` + +### Bitfield Distribution V1 + +```rust +enum BitfieldDistributionV1Message { + /// A signed availability bitfield for a given relay-parent hash. + Bitfield(Hash, SignedAvailabilityBitfield), +} +``` + +### PoV Distribution V1 + +```rust +enum PoVDistributionV1Message { + /// Notification that we are awaiting the given PoVs (by hash) against a + /// specific relay-parent hash. + Awaiting(Hash, Vec), + /// Notification of an awaited PoV, in a given relay-parent context. + /// (relay_parent, pov_hash, pov) + SendPoV(Hash, Hash, PoV), +} +``` + +### Statement Distribution V1 + +```rust +enum StatementDistributionV1Message { + /// A signed full statement under a given relay-parent. + Statement(Hash, SignedFullStatement) +} +``` + +### Collator Protocol V1 + +```rust +enum CollatorProtocolV1Message { + /// Declare the intent to advertise collations under a collator ID. + Declare(CollatorId), + /// Advertise a collation to a validator. Can only be sent once the peer has declared + /// that they are a collator with given ID. + AdvertiseCollation(Hash, ParaId), + /// Request the advertised collation at that relay-parent. + RequestCollation(RequestId, Hash, ParaId), + /// A requested collation. + Collation(RequestId, CandidateReceipt, PoV), +} +``` + +## V1 Wire Protocols + +### Validation V1 + +These are the messages for the protocol on the validation peer-set. + +```rust +enum ValidationProtocolV1 { + AvailabilityDistribution(AvailabilityDistributionV1Message), + BitfieldDistribution(BitfieldDistributionV1Message), + PoVDistribution(PoVDistributionV1Message), + StatementDistribution(StatementDistributionV1Message), +} +``` + +### Collation V1 + +These are the messages for the protocol on the collation peer-set + +```rust +enum CollationProtocolV1 { + CollatorProtocol(CollatorProtocolV1Message), +} +``` + +## Network Bridge Event + +These updates are posted from the [Network Bridge Subsystem](../node/utility/network-bridge.md) to other subsystems based on registered listeners. + +```rust +enum NetworkBridgeEvent { + /// A peer with given ID is now connected. + PeerConnected(PeerId, ObservedRole), + /// A peer with given ID is now disconnected. + PeerDisconnected(PeerId), + /// We received a message from the given peer. + PeerMessage(PeerId, M), + /// The given peer has updated its description of its view. + PeerViewChange(PeerId, View), // guaranteed to come after peer connected event. + /// We have posted the given view update to all connected peers. + OurViewChange(View), +} +``` diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md new file mode 100644 index 0000000000000000000000000000000000000000..0fdd871e274e8d7aea425214472f9370d5345388 --- /dev/null +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -0,0 +1,416 @@ +# Overseer Protocol + +This chapter contains message types sent to and from the overseer, and the underlying subsystem message types that are transmitted using these. + +## Overseer Signal + +Signals from the overseer to a subsystem to request change in execution that has to be obeyed by the subsystem. + +```rust +enum OverseerSignal { + /// Signal about a change in active leaves. + ActiveLeavesUpdate(ActiveLeavesUpdate), + /// Conclude all operation. + Conclude, +} +``` + +All subsystems have their own message types; all of them need to be able to listen for overseer signals as well. There are currently two proposals for how to handle that with unified communication channels: + +1. Retaining the `OverseerSignal` definition above, add `enum FromOverseer {Signal(OverseerSignal), Message(T)}`. +1. Add a generic varint to `OverseerSignal`: `Message(T)`. + +Either way, there will be some top-level type encapsulating messages from the overseer to each subsystem. + +## Active Leaves Update + +Indicates a change in active leaves. Activated leaves should have jobs, whereas deactivated leaves should lead to winding-down of work based on those leaves. + +```rust +struct ActiveLeavesUpdate { + activated: [Hash], // in practice, these should probably be a SmallVec + deactivated: [Hash], +} +``` + +## All Messages + +> TODO (now) + +## Availability Distribution Message + +Messages received by the availability distribution subsystem. + +This is a network protocol that receives messages of type [`AvailabilityDistributionV1Message`][AvailabilityDistributionV1NetworkMessage]. + +```rust +enum AvailabilityDistributionMessage { + /// Distribute an availability chunk to other validators. + DistributeChunk(Hash, ErasureChunk), + /// Fetch an erasure chunk from network by candidate hash and chunk index. + FetchChunk(Hash, u32), + /// Event from the network. + /// An update on network state from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} +``` + +## Availability Store Message + +Messages to and from the availability store. + +```rust +enum AvailabilityStoreMessage { + /// Query the `AvailableData` of a candidate by hash. + QueryAvailableData(Hash, ResponseChannel>), + /// Query whether an `AvailableData` exists within the AV Store. + QueryDataAvailability(Hash, ResponseChannel), + /// Query a specific availability chunk of the candidate's erasure-coding by validator index. + /// Returns the chunk and its inclusion proof against the candidate's erasure-root. + QueryChunk(Hash, ValidatorIndex, ResponseChannel>), + /// Store a specific chunk of the candidate's erasure-coding by validator index, with an + /// accompanying proof. + StoreChunk(Hash, ValidatorIndex, AvailabilityChunkAndProof, ResponseChannel>), + /// Store `AvailableData`. If `ValidatorIndex` is provided, also store this validator's + /// `AvailabilityChunkAndProof`. + StoreAvailableData(Hash, Option, u32, AvailableData, ResponseChannel>), +} +``` + +## Bitfield Distribution Message + +Messages received by the bitfield distribution subsystem. +This is a network protocol that receives messages of type [`BitfieldDistributionV1Message`][BitfieldDistributionV1NetworkMessage]. + +```rust +enum BitfieldDistributionMessage { + /// Distribute a bitfield signed by a validator to other validators. + /// The bitfield distribution subsystem will assume this is indeed correctly signed. + DistributeBitfield(relay_parent, SignedAvailabilityBitfield), + /// Receive a network bridge update. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} +``` + +## Bitfield Signing Message + +Currently, the bitfield signing subsystem receives no specific messages. + +```rust +/// Non-instantiable message type +enum BitfieldSigningMessage { } +``` + +## Candidate Backing Message + +```rust +enum CandidateBackingMessage { + /// Requests a set of backable candidates that could be backed in a child of the given + /// relay-parent, referenced by its hash. + GetBackedCandidates(Hash, ResponseChannel>), + /// Note that the Candidate Backing subsystem should second the given candidate in the context of the + /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. + /// The PoV is expected to match the `pov_hash` in the descriptor. + Second(Hash, CandidateReceipt, PoV), + /// Note a peer validator's statement about a particular candidate. Disagreements about validity must be escalated + /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. + Statement(Statement), +} +``` + +## Candidate Selection Message + +These messages are sent to the [Candidate Selection subsystem](../node/backing/candidate-selection.md) as a means of providing feedback on its outputs. + +```rust +enum CandidateSelectionMessage { + /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. + Invalid(CandidateReceipt), +} +``` + +## Chain API Message + +The Chain API subsystem is responsible for providing an interface to chain data. + +```rust +enum ChainApiMessage { + /// Get the block number by hash. + /// Returns `None` if a block with the given hash is not present in the db. + BlockNumber(Hash, ResponseChannel, Error>>), + /// Get the finalized block hash by number. + /// Returns `None` if a block with the given number is not present in the db. + /// Note: the caller must ensure the block is finalized. + FinalizedBlockHash(BlockNumber, ResponseChannel, Error>>), + /// Get the last finalized block number. + /// This request always succeeds. + FinalizedBlockNumber(ResponseChannel>), + /// Request the `k` ancestors block hashes of a block with the given hash. + /// The response channel may return a `Vec` of size up to `k` + /// filled with ancestors hashes with the following order: + /// `parent`, `grandparent`, ... + Ancestors { + /// The hash of the block in question. + hash: Hash, + /// The number of ancestors to request. + k: usize, + /// The response channel. + response_channel: ResponseChannel, Error>>, + } +} +``` + +## Collator Protocol Message + +Messages received by the [Collator Protocol subsystem](../node/collators/collator-protocol.md) + +This is a network protocol that receives messages of type [`CollatorProtocolV1Message`][CollatorProtocolV1NetworkMessage]. + +```rust +enum CollatorProtocolMessage { + /// Signal to the collator protocol that it should connect to validators with the expectation + /// of collating on the given para. This is only expected to be called once, early on, if at all, + /// and only by the Collation Generation subsystem. As such, it will overwrite the value of + /// the previous signal. + /// + /// This should be sent before any `DistributeCollation` message. + CollateOn(ParaId), + /// Provide a collation to distribute to validators. + DistributeCollation(CandidateReceipt, PoV), + /// Fetch a collation under the given relay-parent for the given ParaId. + FetchCollation(Hash, ParaId, ResponseChannel<(CandidateReceipt, PoV)>), + /// Report a collator as having provided an invalid collation. This should lead to disconnect + /// and blacklist of the collator. + ReportCollator(CollatorId), + /// Note a collator as having provided a good collation. + NoteGoodCollation(CollatorId), +} +``` + +## Network Bridge Message + +Messages received by the network bridge. This subsystem is invoked by others to manipulate access +to the low-level networking code. + +```rust +/// Peer-sets handled by the network bridge. +enum PeerSet { + /// The collation peer-set is used to distribute collations from collators to validators. + Collation, + /// The validation peer-set is used to distribute information relevant to parachain + /// validation among validators. This may include nodes which are not validators, + /// as some protocols on this peer-set are expected to be gossip. + Validation, +} + +enum NetworkBridgeMessage { + /// Report a cost or benefit of a peer. Negative values are costs, positive are benefits. + ReportPeer(PeerSet, PeerId, cost_benefit: i32), + /// Send a message to one or more peers on the validation peerset. + SendValidationMessage([PeerId], ValidationProtocolV1), + /// Send a message to one or more peers on the collation peerset. + SendCollationMessage([PeerId], ValidationProtocolV1), + /// Connect to peers who represent the given `ValidatorId`s at the given relay-parent. + /// + /// Also accepts a response channel by which the issuer can learn the `PeerId`s of those + /// validators. + ConnectToValidators(PeerSet, [ValidatorId], ResponseChannel<[(ValidatorId, PeerId)]>>), +} +``` + +## Misbehavior Report + +```rust +enum MisbehaviorReport { + /// These validator nodes disagree on this candidate's validity, please figure it out + /// + /// Most likely, the list of statments all agree except for the final one. That's not + /// guaranteed, though; if somehow we become aware of lots of + /// statements disagreeing about the validity of a candidate before taking action, + /// this message should be dispatched with all of them, in arbitrary order. + /// + /// This variant is also used when our own validity checks disagree with others'. + CandidateValidityDisagreement(CandidateReceipt, Vec), + /// I've noticed a peer contradicting itself about a particular candidate + SelfContradiction(CandidateReceipt, SignedFullStatement, SignedFullStatement), + /// This peer has seconded more than one parachain candidate for this relay parent head + DoubleVote(CandidateReceipt, SignedFullStatement, SignedFullStatement), +} +``` + +If this subsystem chooses to second a parachain block, it dispatches a `CandidateBackingSubsystemMessage`. + +## PoV Distribution Message + +This is a network protocol that receives messages of type [`PoVDistributionV1Message`][PoVDistributionV1NetworkMessage]. + +```rust +enum PoVDistributionMessage { + /// Fetch a PoV from the network. + /// + /// This `CandidateDescriptor` should correspond to a candidate seconded under the provided + /// relay-parent hash. + FetchPoV(Hash, CandidateDescriptor, ResponseChannel), + /// Distribute a PoV for the given relay-parent and CandidateDescriptor. + /// The PoV should correctly hash to the PoV hash mentioned in the CandidateDescriptor + DistributePoV(Hash, CandidateDescriptor, PoV), + /// An update from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), +} +``` + +## Provisioner Message + +```rust +/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. +enum ProvisionableData { + /// This bitfield indicates the availability of various candidate blocks. + Bitfield(Hash, SignedAvailabilityBitfield), + /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. + BackedCandidate(BackedCandidate), + /// Misbehavior reports are self-contained proofs of validator misbehavior. + MisbehaviorReport(Hash, MisbehaviorReport), + /// Disputes trigger a broad dispute resolution process. + Dispute(Hash, Signature), +} + +/// This data needs to make its way from the provisioner into the InherentData. +/// +/// There, it is used to construct the InclusionInherent. +type ProvisionerInherentData = (SignedAvailabilityBitfields, Vec); + +/// Message to the Provisioner. +/// +/// In all cases, the Hash is that of the relay parent. +enum ProvisionerMessage { + /// This message allows potential block authors to be kept updated with all new authorship data + /// as it becomes available. + RequestBlockAuthorshipData(Hash, Sender), + /// This message allows external subsystems to request the set of bitfields and backed candidates + /// associated with a particular potential block hash. + /// + /// This is expected to be used by a proposer, to inject that information into the InherentData + /// where it can be assembled into the InclusionInherent. + RequestInherentData(Hash, oneshot::Sender), + /// This data should become part of a relay chain block + ProvisionableData(ProvisionableData), +} +``` + +## Runtime API Message + +The Runtime API subsystem is responsible for providing an interface to the state of the chain's runtime. + +This is fueled by an auxiliary type encapsulating all request types defined in the Runtime API section of the guide. + +> TODO: link to the Runtime API section. Not possible currently because of https://github.com/Michael-F-Bryan/mdbook-linkcheck/issues/25. Once v0.7.1 is released it will work. + +```rust +enum RuntimeApiRequest { + /// Get the current validator set. + Validators(ResponseChannel>), + /// Get the validator groups and rotation info. + ValidatorGroups(ResponseChannel<(Vec>, GroupRotationInfo)>), + /// Get the session index for children of the block. This can be used to construct a signing + /// context. + SessionIndex(ResponseChannel), + /// Get the validation code for a specific para, using the given occupied core assumption. + ValidationCode(ParaId, OccupiedCoreAssumption, ResponseChannel>), + /// Get the persisted validation data at the state of a given block for a specific para, + /// with the given occupied core assumption. + PersistedValidationData( + ParaId, + OccupiedCoreAssumption, + ResponseChannel>, + ), + /// Get the full validation data for a specific para, with the given occupied core assumption. + FullValidationData( + ParaId, + OccupiedCoreAssumption, + ResponseChannel>, + ), + /// Get information about all availability cores. + AvailabilityCores(ResponseChannel>), + /// Get a committed candidate receipt for all candidates pending availability. + CandidatePendingAvailability(ParaId, ResponseChannel>), + /// Get all events concerning candidates in the last block. + CandidateEvents(ResponseChannel>), +} + +enum RuntimeApiMessage { + /// Make a request of the runtime API against the post-state of the given relay-parent. + Request(Hash, RuntimeApiRequest), +} +``` + +## Statement Distribution Message + +The Statement Distribution subsystem distributes signed statements and candidates from validators to other validators. It does this by distributing full statements, which embed the candidate receipt, as opposed to compact statements which don't. +It receives updates from the network bridge and signed statements to share with other validators. + +This is a network protocol that receives messages of type [`StatementDistributionV1Message`][StatementDistributionV1NetworkMessage]. + +```rust +enum StatementDistributionMessage { + /// An update from the network bridge. + NetworkBridgeUpdateV1(NetworkBridgeEvent), + /// We have validated a candidate and want to share our judgment with our peers. + /// The hash is the relay parent. + /// + /// The statement distribution subsystem assumes that the statement should be correctly + /// signed. + Share(Hash, SignedFullStatement), +} +``` + +## Validation Request Type + +Various modules request that the [Candidate Validation subsystem](../node/utility/candidate-validation.md) validate a block with this message. It returns [`ValidationOutputs`](candidate.md#validationoutputs) for successful validation. + +```rust + +/// Result of the validation of the candidate. +enum ValidationResult { + /// Candidate is valid, and here are the outputs. In practice, this should be a shared type + /// so that validation caching can be done. + Valid(ValidationOutputs), + /// Candidate is invalid. + Invalid, +} + +/// Messages issued to the candidate validation subsystem. +/// +/// ## Validation Requests +/// +/// Validation requests made to the subsystem should return an error only on internal error. +/// Otherwise, they should return either `Ok(ValidationResult::Valid(_))` or `Ok(ValidationResult::Invalid)`. +enum CandidateValidationMessage { + /// Validate a candidate with provided parameters. This will implicitly attempt to gather the + /// `OmittedValidationData` and `ValidationCode` from the runtime API of the chain, + /// based on the `relay_parent` of the `CandidateDescriptor`. + /// + /// If there is no state available which can provide this data or the core for + /// the para is not free at the relay-parent, an error is returned. + ValidateFromChainState(CandidateDescriptor, PoV, ResponseChannel>), + + /// Validate a candidate with provided parameters. Explicitly provide the `PersistedValidationData` + /// and `ValidationCode` so this can do full validation without needing to access the state of + /// the relay-chain. Optionally provide the `TransientValidationData` which will lead to checks + /// on the output. + ValidateFromExhaustive( + PersistedValidationData, + Option, + ValidationCode, + CandidateDescriptor, + PoV, + ResponseChannel>, + ), +} +``` + +[NBE]: ../network.md#network-bridge-event +[AvailabilityDistributionV1NetworkMessage]: network.md#availability-distribution-v1 +[BitfieldDistributionV1NetworkMessage]: network.md#bitfield-distribution-v1 +[PoVDistributionV1NetworkMessage]: network.md#pov-distribution-v1 +[StatementDistributionV1NetworkMessage]: network.md#statement-distribution-v1 +[CollatorProtocolV1NetworkMessage]: network.md#collator-protocol-v1 diff --git a/roadmap/implementors-guide/src/types/runtime.md b/roadmap/implementers-guide/src/types/runtime.md similarity index 51% rename from roadmap/implementors-guide/src/types/runtime.md rename to roadmap/implementers-guide/src/types/runtime.md index ebdbfe15c502504a0e8614c281c357aeb9d48e89..a018d2eae6216b5db0ed861c136b74bb70af0502 100644 --- a/roadmap/implementors-guide/src/types/runtime.md +++ b/roadmap/implementers-guide/src/types/runtime.md @@ -24,7 +24,7 @@ struct HostConfiguration { /// The number of retries that a parathread author has to submit their block. pub parathread_retries: u32, /// How often parachain groups should be rotated across parachains. - pub parachain_rotation_frequency: BlockNumber, + pub group_rotation_frequency: BlockNumber, /// The availability period, in blocks, for parachains. This is the amount of blocks /// after inclusion that validators have to make the block available and signal its availability to /// the chain. Must be at least 1. @@ -39,6 +39,36 @@ struct HostConfiguration { /// Total size of messages allowed in the parachain -> relay-chain message queue before which /// no further messages may be added to it. If it exceeds this then the queue may contain only /// a single message. - pub watermark_upward_queue_size: u32, + pub max_upward_queue_size: u32, + /// The amount of weight we wish to devote to the processing the dispatchable upward messages + /// stage. + /// + /// NOTE that this is a soft limit and could be exceeded. + pub preferred_dispatchable_upward_messages_step_weight: u32, + /// Any dispatchable upward message that requests more than the critical amount is rejected + /// with `DispatchResult::CriticalWeightExceeded`. + /// + /// The parameter value is picked up so that no dispatchable can make the block weight exceed + /// the total budget. I.e. that the sum of `preferred_dispatchable_upward_messages_step_weight` + /// and `dispatchable_upward_message_critical_weight` doesn't exceed the amount of weight left + /// under a typical worst case (e.g. no upgrades, etc) weight consumed by the required phases of + /// block execution (i.e. initialization, finalization and inherents). + pub dispatchable_upward_message_critical_weight: u32, + /// The maximum number of messages that a candidate can contain. + pub max_upward_message_num_per_candidate: u32, + /// Number of sessions after which an HRMP open channel request expires. + pub hrmp_open_request_ttl: u32, + /// The deposit that the sender should provide for opening an HRMP channel. + pub hrmp_sender_deposit: u32, + /// The deposit that the recipient should provide for accepting opening an HRMP channel. + pub hrmp_recipient_deposit: u32, + /// The maximum number of messages allowed in an HRMP channel at once. + pub hrmp_channel_max_places: u32, + /// The maximum total size of messages in bytes allowed in an HRMP channel at once. + pub hrmp_channel_max_size: u32, + /// The maximum number of outbound HRMP channels a parachain is allowed to open. + pub hrmp_max_parachain_outbound_channels: u32, + /// The maximum number of outbound HRMP channels a parathread is allowed to open. + pub hrmp_max_parathread_outbound_channels: u32, } ``` diff --git a/roadmap/implementors-guide/src/whence-parachains.md b/roadmap/implementers-guide/src/whence-parachains.md similarity index 100% rename from roadmap/implementors-guide/src/whence-parachains.md rename to roadmap/implementers-guide/src/whence-parachains.md diff --git a/roadmap/implementors-guide/src/node/README.md b/roadmap/implementors-guide/src/node/README.md deleted file mode 100644 index 768d0138fd35bf36d24ae10bb94c6ec46c399c50..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Node Architecture - -## Design Goals - -* Modularity: Components of the system should be as self-contained as possible. Communication boundaries between components should be well-defined and mockable. This is key to creating testable, easily reviewable code. -* Minimizing side effects: Components of the system should aim to minimize side effects and to communicate with other components via message-passing. -* Operational Safety: The software will be managing signing keys where conflicting messages can lead to large amounts of value to be slashed. Care should be taken to ensure that no messages are signed incorrectly or in conflict with each other. - -The architecture of the node-side behavior aims to embody the Rust principles of ownership and message-passing to create clean, isolatable code. Each resource should have a single owner, with minimal sharing where unavoidable. - -Many operations that need to be carried out involve the network, which is asynchronous. This asynchrony affects all core subsystems that rely on the network as well. The approach of hierarchical state machines is well-suited to this kind of environment. - -We introduce a hierarchy of state machines consisting of an overseer supervising subsystems, where Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems. diff --git a/roadmap/implementors-guide/src/node/availability/bitfield-distribution.md b/roadmap/implementors-guide/src/node/availability/bitfield-distribution.md deleted file mode 100644 index 97a5c14be3dad3f010d766638149f7b8b5afadde..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/availability/bitfield-distribution.md +++ /dev/null @@ -1,21 +0,0 @@ -# Bitfield Distribution - -Validators vote on the availability of a backed candidate by issuing signed bitfields, where each bit corresponds to a single candidate. These bitfields can be used to compactly determine which backed candidates are available or not based on a 2/3+ quorum. - -## Protocol - -`ProtocolId`: `b"bitd"` - -Input: [`BitfieldDistributionMessage`](../../types/overseer-protocol.md#bitfield-distribution-message) -Output: - -- `NetworkBridge::RegisterEventProducer(ProtocolId)` -- `NetworkBridge::SendMessage([PeerId], ProtocolId, Bytes)` -- `NetworkBridge::ReportPeer(PeerId, cost_or_benefit)` -- `BlockAuthorshipProvisioning::Bitfield(relay_parent, SignedAvailabilityBitfield)` - -## Functionality - -This is implemented as a gossip system. Register a [network bridge](../utility/network-bridge.md) event producer on startup and track peer connection, view change, and disconnection events. Only accept bitfields relevant to our current view and only distribute bitfields to other peers when relevant to their most recent view. Check bitfield signatures in this subsystem and accept and distribute only one bitfield per validator. - -When receiving a bitfield either from the network or from a `DistributeBitfield` message, forward it along to the block authorship (provisioning) subsystem for potential inclusion in a block. diff --git a/roadmap/implementors-guide/src/node/backing/candidate-backing.md b/roadmap/implementors-guide/src/node/backing/candidate-backing.md deleted file mode 100644 index 1490bb074ccb30d891cbdf51262630293b541c0b..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/backing/candidate-backing.md +++ /dev/null @@ -1,92 +0,0 @@ -# Candidate Backing - -The Candidate Backing subsystem ensures every parablock considered for relay block inclusion has been seconded by at least one validator, and approved by a quorum. Parablocks for which no validator will assert correctness are discarded. If the block later proves invalid, the initial backers are slashable; this gives polkadot a rational threat model during subsequent stages. - -Its role is to produce backable candidates for inclusion in new relay-chain blocks. It does so by issuing signed [`Statement`s](../../types/backing.md#statement-type) and tracking received statements signed by other validators. Once enough statements are received, they can be combined into backing for specific candidates. - -Note that though the candidate backing subsystem attempts to produce as many backable candidates as possible, it does _not_ attempt to choose a single authoritative one. The choice of which actually gets included is ultimately up to the block author, by whatever metrics it may use; those are opaque to this subsystem. - -Once a sufficient quorum has agreed that a candidate is valid, this subsystem notifies the [Provisioner](../utility/provisioner.md), which in turn engages block production mechanisms to include the parablock. - -## Protocol - -The [Candidate Selection subsystem](candidate-selection.md) is the primary source of non-overseer messages into this subsystem. That subsystem generates appropriate [`CandidateBackingMessage`s](../../types/overseer-protocol.md#candidate-backing-message), and passes them to this subsystem. - -This subsystem validates the candidates and generates an appropriate [`SignedStatement`](../../types/backing.md#signed-statement-type). All `SignedStatement`s are then passed on to the [Statement Distribution subsystem](statement-distribution.md) to be gossiped to peers. All [Proofs of Validity](../../types/availability.md#proof-of-validity) should be distributed via the [PoV Distribution](pov-distribution.md) subsystem. When this subsystem decides that a candidate is invalid, and it was recommended to us to second by our own Candidate Selection subsystem, a message is sent to the Candidate Selection subsystem with the candidate's hash so that the collator which recommended it can be penalized. - -## Functionality - -The subsystem should maintain a set of handles to Candidate Backing Jobs that are currently live, as well as the relay-parent to which they correspond. - -### On Overseer Signal - -* If the signal is an [`OverseerSignal`](../../types/overseer-protocol.md#overseer-signal)`::StartWork(relay_parent)`, spawn a Candidate Backing Job with the given relay parent, storing a bidirectional channel with the Candidate Backing Job in the set of handles. -* If the signal is an [`OverseerSignal`](../../types/overseer-protocol.md#overseer-signal)`::StopWork(relay_parent)`, cease the Candidate Backing Job under that relay parent, if any. - -### On `CandidateBackingMessage` - -* If the message corresponds to a particular relay-parent, forward the message to the Candidate Backing Job for that relay-parent, if any is live. - -> big TODO: "contextual execution" -> -> * At the moment we only allow inclusion of _new_ parachain candidates validated by _current_ validators. -> * Allow inclusion of _old_ parachain candidates validated by _current_ validators. -> * Allow inclusion of _old_ parachain candidates validated by _old_ validators. -> -> This will probably blur the lines between jobs, will probably require inter-job communication and a short-term memory of recently backable, but not backed candidates. - -## Candidate Backing Job - -The Candidate Backing Job represents the work a node does for backing candidates with respect to a particular relay-parent. - -The goal of a Candidate Backing Job is to produce as many backable candidates as possible. This is done via signed [`Statement`s](../../types/backing.md#statement-type) by validators. If a candidate receives a majority of supporting Statements from the Parachain Validators currently assigned, then that candidate is considered backable. - -### On Startup - -* Fetch current validator set, validator -> parachain assignments from runtime API. -* Determine if the node controls a key in the current validator set. Call this the local key if so. -* If the local key exists, extract the parachain head and validation function for the parachain the local key is assigned to. - -### On Receiving New Signed Statement - -```rust -if let Statement::Seconded(candidate) = signed.statement { - if candidate is unknown and in local assignment { - spawn_validation_work(candidate, parachain head, validation function) - } -} - -// add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group -// majority, send a `BlockAuthorshipProvisioning::BackableCandidate(relay_parent, Candidate, Backing)` message. -``` - -### Spawning Validation Work - -```rust -fn spawn_validation_work(candidate, parachain head, validation function) { - asynchronously { - let pov = (fetch pov block).await - - // dispatched to sub-process (OS process) pool. - let valid = validate_candidate(candidate, validation function, parachain head, pov).await; - if valid { - // make PoV available for later distribution. Send data to the availability store to keep. - // sign and dispatch `valid` statement to network if we have not seconded the given candidate. - } else { - // sign and dispatch `invalid` statement to network. - } - } -} -``` - -### Fetch Pov Block - -Create a `(sender, receiver)` pair. -Dispatch a `PovFetchSubsystemMessage(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. - -### On Receiving `CandidateBackingMessage` - -* If the message is a `CandidateBackingMessage::RegisterBackingWatcher`, register the watcher and trigger it each time a new candidate is backable. Also trigger it once initially if there are any backable candidates at the time of receipt. -* If the message is a `CandidateBackingMessage::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. - -> TODO: send statements to Statement Distribution subsystem, handle shutdown signal from candidate backing subsystem diff --git a/roadmap/implementors-guide/src/node/backing/statement-distribution.md b/roadmap/implementors-guide/src/node/backing/statement-distribution.md deleted file mode 100644 index 59e5244d0d762120a52132b4621bf66d4e048863..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/backing/statement-distribution.md +++ /dev/null @@ -1,56 +0,0 @@ -# Statement Distribution - -The Statement Distribution Subsystem is responsible for distributing statements about seconded candidates between validators. - -## Protocol - -`ProtocolId`: `b"stmd"` - -Input: - -- NetworkBridgeUpdate(update) - -Output: - -- NetworkBridge::RegisterEventProducer(`ProtocolId`) -- NetworkBridge::SendMessage(`[PeerId]`, `ProtocolId`, `Bytes`) -- NetworkBridge::ReportPeer(PeerId, cost_or_benefit) - -## Functionality - -Implemented as a gossip protocol. Register a network event producer on startup. Handle updates to our view and peers' views. Neighbor packets are used to inform peers which chain heads we are interested in data for. - -Statement Distribution is the only backing subsystem which has any notion of peer nodes, who are any full nodes on the network. Validators will also act as peer nodes. - -It is responsible for distributing signed statements that we have generated and forwarding them, and for detecting a variety of Validator misbehaviors for reporting to [Misbehavior Arbitration](../utility/misbehavior-arbitration.md). During the Backing stage of the inclusion pipeline, it's the main point of contact with peer nodes. On receiving a signed statement from a peer, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. - -Track equivocating validators and stop accepting information from them. Forward double-vote proofs to the double-vote reporting system. Establish a data-dependency order: - -- In order to receive a `Seconded` message we have the on corresponding chain head in our view -- In order to receive an `Invalid` or `Valid` message we must have received the corresponding `Seconded` message. - -And respect this data-dependency order from our peers by respecting their views. This subsystem is responsible for checking message signatures. - -The Statement Distribution subsystem sends statements to peer nodes and detects double-voting by validators. When validators conflict with each other or themselves, the Misbehavior Arbitration system is notified. - -## Peer Receipt State Machine - -There is a very simple state machine which governs which messages we are willing to receive from peers. Not depicted in the state machine: on initial receipt of any [`SignedFullStatement`](../../types/backing.md#signed-statement-type), validate that the provided signature does in fact sign the included data. Note that each individual parablock candidate gets its own instance of this state machine; it is perfectly legal to receive a `Valid(X)` before a `Seconded(Y)`, as long as a `Seconded(X)` has been received. - -A: Initial State. Receive `SignedFullStatement(Statement::Second)`: extract `Statement`, forward to Candidate Backing and PoV Distribution, proceed to B. Receive any other `SignedFullStatement` variant: drop it. - -B: Receive any `SignedFullStatement`: check signature, forward to Candidate Backing. Receive `OverseerMessage::StopWork`: proceed to C. - -C: Receive any message for this block: drop it. - -## Peer Knowledge Tracking - -The peer receipt state machine implies that for parsimony of network resources, we should model the knowledge of our peers, and help them out. For example, let's consider a case with peers A, B, and C, validators X and Y, and candidate M. A sends us a `Statement::Second(M)` signed by X. We've double-checked it, and it's valid. While we're checking it, we receive a copy of X's `Statement::Second(M)` from `B`, along with a `Statement::Valid(M)` signed by Y. - -Our response to A is just the `Statement::Valid(M)` signed by Y. However, we haven't heard anything about this from C. Therefore, we send it everything we have: first a copy of X's `Statement::Second`, then Y's `Statement::Valid`. - -This system implies a certain level of duplication of messages--we received X's `Statement::Second` from both our peers, and C may experience the same--but it minimizes the degree to which messages are simply dropped. - -And respect this data-dependency order from our peers. This subsystem is responsible for checking message signatures. - -No jobs, `StartWork` and `StopWork` pulses are used to control neighbor packets and what we are currently accepting. diff --git a/roadmap/implementors-guide/src/node/collators/collation-distribution.md b/roadmap/implementors-guide/src/node/collators/collation-distribution.md deleted file mode 100644 index 0b24ce47ca56c64866fb5568b3a0d86c74ec0624..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/collators/collation-distribution.md +++ /dev/null @@ -1,9 +0,0 @@ -# Collation Distribution - -> TODO - -## Protocol - -## Functionality - -## Jobs, if any diff --git a/roadmap/implementors-guide/src/node/collators/collation-generation.md b/roadmap/implementors-guide/src/node/collators/collation-generation.md deleted file mode 100644 index 1c828b3b0f9c49a3ad776b0fdfdf93048a09f92a..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/collators/collation-generation.md +++ /dev/null @@ -1,9 +0,0 @@ -# Collation Generation - -> TODO - -## Protocol - -## Functionality - -## Jobs, if any diff --git a/roadmap/implementors-guide/src/node/utility/availability-store.md b/roadmap/implementors-guide/src/node/utility/availability-store.md deleted file mode 100644 index 51810a06a02fb774f7f8386c183146b6615fc3ca..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/utility/availability-store.md +++ /dev/null @@ -1,52 +0,0 @@ -# Availability Store - -This is a utility subsystem responsible for keeping available certain data and pruning that data. - -The two data types: - -- Full PoV blocks of candidates we have validated -- Availability chunks of candidates that were backed and noted available on-chain. - -For each of these data we have pruning rules that determine how long we need to keep that data available. - -PoV hypothetically only need to be kept around until the block where the data was made fully available is finalized. However, disputes can revert finality, so we need to be a bit more conservative. We should keep the PoV until a block that finalized availability of it has been finalized for 1 day. - -> TODO: arbitrary, but extracting `acceptance_period` is kind of hard here... - -Availability chunks need to be kept available until the dispute period for the corresponding candidate has ended. We can accomplish this by using the same criterion as the above, plus a delay. This gives us a pruning condition of the block finalizing availability of the chunk being final for 1 day + 1 hour. - -> TODO: again, concrete acceptance-period would be nicer here, but complicates things - -There is also the case where a validator commits to make a PoV available, but the corresponding candidate is never backed. In this case, we keep the PoV available for 1 hour. - -> TODO: ideally would be an upper bound on how far back contextual execution is OK. - -There may be multiple competing blocks all ending the availability phase for a particular candidate. Until (and slightly beyond) finality, it will be unclear which of those is actually the canonical chain, so the pruning records for PoVs and Availability chunks should keep track of all such blocks. - -## Protocol - -Input: [`AvailabilityStoreMessage`](../../types/overseer-protocol.md#availability-store-message) - -## Functionality - -On `StartWork`: - -- Note any new candidates backed in the block. Update pruning records for any stored `PoVBlock`s. -- Note any newly-included candidates backed in the block. Update pruning records for any stored availability chunks. - -On block finality events: - -- > TODO: figure out how we get block finality events from overseer -- Handle all pruning based on the newly-finalized block. - -On `QueryPoV` message: - -- Return the PoV block, if any, for that candidate hash. - -On `QueryChunk` message: - -- Determine if we have the chunk indicated by the parameters and return it and its inclusion proof via the response channel if so. - -On `StoreChunk` message: - -- Store the chunk along with its inclusion proof under the candidate hash and validator index. diff --git a/roadmap/implementors-guide/src/node/utility/candidate-validation.md b/roadmap/implementors-guide/src/node/utility/candidate-validation.md deleted file mode 100644 index ffeaa7a37e5b48db931ad6f401abfd50e02f13e9..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/utility/candidate-validation.md +++ /dev/null @@ -1,23 +0,0 @@ -# Candidate Validation - -This subsystem is responsible for handling candidate validation requests. It is a simple request/response server. - -A variety of subsystems want to know if a parachain block candidate is valid. None of them care about the detailed mechanics of how a candidate gets validated, just the results. This subsystem handles those details. - -## Protocol - -Input: [`CandidateValidationMessage`](../../types/overseer-protocol.md#validation-request-type) - -Output: Validation result via the provided response side-channel. - -## Functionality - -Given the hashes of a relay parent and a parachain candidate block, and either its PoV or the information with which to retrieve the PoV from the network, spawn a short-lived async job to determine whether the candidate is valid. - -Each job follows this process: - -- Get the full candidate from the current relay chain state -- Check the candidate's proof - > TODO: that's extremely hand-wavey. What does that actually entail? -- Generate either `Statement::Valid` or `Statement::Invalid`. Note that this never generates `Statement::Seconded`; Candidate Backing is the only subsystem which upgrades valid to seconded. -- Return the statement on the provided channel. diff --git a/roadmap/implementors-guide/src/node/utility/network-bridge.md b/roadmap/implementors-guide/src/node/utility/network-bridge.md deleted file mode 100644 index 0e83bc6277d2e2035f9841cef39c5c9b6bf6ea1c..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/utility/network-bridge.md +++ /dev/null @@ -1,46 +0,0 @@ -# Network Bridge - -One of the main features of the overseer/subsystem duality is to avoid shared ownership of resources and to communicate via message-passing. However, implementing each networking subsystem as its own network protocol brings a fair share of challenges. - -The most notable challenge is coordinating and eliminating race conditions of peer connection and disconnection events. If we have many network protocols that peers are supposed to be connected on, it is difficult to enforce that a peer is indeed connected on all of them or the order in which those protocols receive notifications that peers have connected. This becomes especially difficult when attempting to share peer state across protocols. All of the Parachain-Host's gossip protocols eliminate DoS with a data-dependency on current chain heads. However, it is inefficient and confusing to implement the logic for tracking our current chain heads as well as our peers' on each of those subsystems. Having one subsystem for tracking this shared state and distributing it to the others is an improvement in architecture and efficiency. - -One other piece of shared state to track is peer reputation. When peers are found to have provided value or cost, we adjust their reputation accordingly. - -So in short, this Subsystem acts as a bridge between an actual network component and a subsystem's protocol. - -## Protocol - -Input: [`NetworkBridgeMessage`](../../types/overseer-protocol.md#network-bridge-message) -Output: Varying, based on registered event producers. - -## Functionality - -Track a set of all Event Producers, each associated with a 4-byte protocol ID. -There are two types of network messages this sends and receives: - -- ProtocolMessage(ProtocolId, Bytes) -- ViewUpdate(View) - -`StartWork` and `StopWork` determine the computation of our local view. A `ViewUpdate` is issued to each connected peer, and a `NetworkBridgeUpdate::OurViewChange` is issued for each registered event producer. - -On `RegisterEventProducer`: - -- Add the event producer to the set of event producers. If there is a competing entry, ignore the request. - -On `ProtocolMessage` arrival: - -- If the protocol ID matches an event producer, produce the message from the `NetworkBridgeEvent::PeerMessage(sender, bytes)`, otherwise ignore and reduce peer reputation slightly -- dispatch message via overseer. - -On `ViewUpdate` arrival: - -- Do validity checks and note the most recent view update of the peer. -- For each event producer, dispatch the result of a `NetworkBridgeEvent::PeerViewChange(view)` via overseer. - -On `ReportPeer` message: - -- Adjust peer reputation according to cost or benefit provided - -On `SendMessage` message: - -- Issue a corresponding `ProtocolMessage` to each listed peer with given protocol ID and bytes. diff --git a/roadmap/implementors-guide/src/node/utility/provisioner.md b/roadmap/implementors-guide/src/node/utility/provisioner.md deleted file mode 100644 index 33fb394f1b196a3574b4dd6a7447d6730cd1ee9c..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/node/utility/provisioner.md +++ /dev/null @@ -1,56 +0,0 @@ -# Provisioner - -Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all potential block authors. - -A major feature of the provisioner: this subsystem is responsible for ensuring that parachain block candidates are sufficiently available before sending them to potential block authors. - -## Provisionable Data - -There are several distinct types of provisionable data, but they share this property in common: all should eventually be included in a relay chain block. - -### Backed Candidates - -The block author can choose 0 or 1 backed parachain candidates per parachain; the only constraint is that each backed candidate has the appropriate relay parent. However, the choice of a backed candidate must be the block author's; the provisioner must ensure that block authors are aware of all available [`BackedCandidate`s](../../types/backing.md#backed-candidate). - -### Signed Bitfields - -[Signed bitfields](../../types/availability.md#signed-availability-bitfield) are attestations from a particular validator about which candidates it believes are available. - -### Misbehavior Reports - -Misbehavior reports are self-contained proofs of misbehavior by a validator or group of validators. For example, it is very easy to verify a double-voting misbehavior report: the report contains two votes signed by the same key, advocating different outcomes. Concretely, misbehavior reports become inherents which cause dots to be slashed. - -Note that there is no mechanism in place which forces a block author to include a misbehavior report which it doesn't like, for example if it would be slashed by such a report. The chain's defense against this is to have a relatively long slash period, such that it's likely to encounter an honest author before the slash period expires. - -### Dispute Inherent - -The dispute inherent is similar to a misbehavior report in that it is an attestation of misbehavior on the part of a validator or group of validators. Unlike a misbehavior report, it is not self-contained: resolution requires coordinated action by several validators. The canonical example of a dispute inherent involves an approval checker discovering that a set of validators has improperly approved an invalid parachain block: resolving this requires the entire validator set to re-validate the block, so that the minority can be slashed. - -Dispute resolution is complex and is explained in substantially more detail [here](../../runtime/validity.md). - -> TODO: The provisioner is responsible for selecting remote disputes to replay. Let's figure out the details. - -## Protocol - -Input: [`ProvisionerMessage`](../../types/overseer-protocol.md#provisioner-message). Backed candidates come from the [Candidate Backing subsystem](../backing/candidate-backing.md), signed bitfields come from the [Bitfield Distribution subsystem](../availability/bitfield-distribution.md), and misbehavior reports and disputes come from the [Misbehavior Arbitration subsystem](misbehavior-arbitration.md). - -At initialization, this subsystem has no outputs. Block authors can send a `ProvisionerMessage::RequestBlockAuthorshipData`, which includes a channel over which provisionable data can be sent. All appropriate provisionable data will then be sent over this channel, as it is received. - -Note that block authors must re-send a `ProvisionerMessage::RequestBlockAuthorshipData` for each relay parent they are interested in receiving provisionable data for. - -## Functionality - -The subsystem should maintain a set of handles to Block Authorship Provisioning Jobs that are currently live. - -### On Overseer Signal - -- `StartWork`: spawn a Block Authorship Provisioning Job with the given relay parent, storing a bidirectional channel with that job. -- `StopWork`: terminate the Block Authorship Provisioning Job for the given relay parent, if any. - -### On `ProvisionerMessage` - -Forward the message to the appropriate Block Authorship Provisioning Job, or discard if no appropriate job is currently active. - -## Block Authorship Provisioning Job - -Maintain the set of channels to block authors. On receiving provisionable data, send a copy over each channel. diff --git a/roadmap/implementors-guide/src/runtime/initializer.md b/roadmap/implementors-guide/src/runtime/initializer.md deleted file mode 100644 index aba4d5f352ab0a6195ec3fa6c047bb4fb2a22d4d..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/runtime/initializer.md +++ /dev/null @@ -1,35 +0,0 @@ -# Initializer Module - -This module is responsible for initializing the other modules in a deterministic order. It also has one other purpose as described above: accepting and forwarding session change notifications. - -## Storage - -```rust -HasInitialized: bool -``` - -## Initialization - -The other modules are initialized in this order: - -1. Configuration -1. Paras -1. Scheduler -1. Inclusion -1. Validity. -1. Router. - -The [Configuration Module](configuration.md) is first, since all other modules need to operate under the same configuration as each other. It would lead to inconsistency if, for example, the scheduler ran first and then the configuration was updated before the Inclusion module. - -Set `HasInitialized` to true. - -## Session Change - -If `HasInitialized` is true, throw an unrecoverable error (panic). -Otherwise, forward the session change notification to other modules in initialization order. - -## Finalization - -Finalization order is less important in this case than initialization order, so we finalize the modules in the reverse order from initialization. - -Set `HasInitialized` to false. diff --git a/roadmap/implementors-guide/src/runtime/router.md b/roadmap/implementors-guide/src/runtime/router.md deleted file mode 100644 index 4f7adeaa048f570795a5503d8f04810c6fb7279b..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/runtime/router.md +++ /dev/null @@ -1,35 +0,0 @@ -# Router Module - -The Router module is responsible for storing and dispatching Upward and Downward messages from and to parachains respectively. It is intended to later handle the XCMP logic as well. - -For each enacted block the `queue_upward_messages` entry-point is called. - -## Storage - -Storage layout: - -```rust,ignore -/// Messages ready to be dispatched onto the relay chain. -/// This is subject to `max_upward_queue_count` and -/// `watermark_queue_size` from `HostConfiguration`. -RelayDispatchQueues: map ParaId => Vec; -/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. -/// First item in the tuple is the count of messages and second -/// is the total length (in bytes) of the message payloads. -RelayDispatchQueueSize: map ParaId => (u32, u32); -/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. -NeedsDispatch: Vec; -``` - -## Initialization - -No initialization routine runs for this module. - -## Routines - -* `queue_upward_messages(ParaId, Vec)`: - 1. Updates `NeedsDispatch`, and enqueues upward messages into `RelayDispatchQueue` and modifies the respective entry in `RelayDispatchQueueSize`. - -## Finalization - - 1. Dispatch queued upward messages from `RelayDispatchQueues` in a FIFO order applying the `config.watermark_upward_queue_size` and `config.max_upward_queue_count` limits. diff --git a/roadmap/implementors-guide/src/types/availability.md b/roadmap/implementors-guide/src/types/availability.md deleted file mode 100644 index 3362908d6b63f659a09c0885bacafc6c32b59aa1..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/types/availability.md +++ /dev/null @@ -1,23 +0,0 @@ -# Availability - -One of the key roles of validators is to ensure availability of all data necessary to validate -candidates for the duration of a challenge period. This is done via an erasure-coding of the data to keep available. - -## Signed Availability Bitfield - -A bitfield [signed](backing.md#signed-wrapper) by a particular validator about the availability of pending candidates. - - -```rust -pub type SignedAvailabilityBitfield = Signed; - -struct Bitfields(Vec<(SignedAvailabilityBitfield)>), // bitfields sorted by validator index, ascending -``` - -## Proof-of-Validity - -Often referred to as PoV, this is a type-safe wrapper around bytes (`Vec`) when referring to data that acts as a stateless-client proof of validity of a candidate, when used as input to the validation function of the para. - -```rust -struct PoV(Vec); -``` diff --git a/roadmap/implementors-guide/src/types/candidate.md b/roadmap/implementors-guide/src/types/candidate.md deleted file mode 100644 index fdba6919e57ce340df8eab39be92798a376fd1ef..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/types/candidate.md +++ /dev/null @@ -1,181 +0,0 @@ -# Candidate Types - -Para candidates are some of the most common types, both within the runtime and on the Node-side. - -In a way, this entire guide is about these candidates: how they are scheduled, constructed, backed, included, and challenged. - -This section will describe the base candidate type, its components, and variants that contain extra data. - -## Candidate Receipt - -Much info in a [`FullCandidateReceipt`](#full-candidate-receipt) is duplicated from the relay-chain state. When the corresponding relay-chain state is considered widely available, the Candidate Receipt should be favored over the `FullCandidateReceipt`. - -Examples of situations where the state is readily available includes within the scope of work done by subsystems working on a given relay-parent, or within the logic of the runtime importing a backed candidate. - -```rust -/// A candidate-receipt. -struct CandidateReceipt { - /// The descriptor of the candidate. - descriptor: CandidateDescriptor, - /// The hash of the encoded commitments made as a result of candidate execution. - commitments_hash: Hash, -} -``` - -## Full Candidate Receipt - -This is the full receipt type. The `GlobalValidationSchedule` and the `LocalValidationData` are technically redundant with the `inner.relay_parent`, which uniquely describes the a block in the blockchain from whose state these values are derived. The [`CandidateReceipt`](#candidate-receipt) variant is often used instead for this reason. - -However, the Full Candidate Receipt type is useful as a means of avoiding the implicit dependency on availability of old blockchain state. In situations such as availability and approval, having the full description of the candidate within a self-contained struct is convenient. - -```rust -/// All data pertaining to the execution of a para candidate. -struct FullCandidateReceipt { - inner: CandidateReceipt, - /// The global validation schedule. - global_validation: GlobalValidationSchedule, - /// The local validation data. - local_validation: LocalValidationData, -} -``` - -## Committed Candidate Receipt - -This is a variant of the candidate receipt which includes the commitments of the candidate receipt alongside the descriptor. This should be favored over the [`Candidate Receipt`](#candidate-receipt) in situations where the candidate is not going to be executed but the actual data committed to is important. This is often the case in the backing phase. - -The hash of the committed candidate receipt will be the same as the corresponding [`Candidate Receipt`](#candidate-receipt), because it is computed by first hashing the encoding of the commitments to form a plain [`Candidate Receipt`](#candidate-receipt). - -```rust -/// A candidate-receipt with commitments directly included. -struct CommittedCandidateReceipt { - /// The descriptor of the candidate. - descriptor: CandidateDescriptor, - /// The commitments of the candidate receipt. - commitments: CandidateCommitments, -} -``` - -## Candidate Descriptor - -This struct is pure description of the candidate, in a lightweight format. - -```rust -/// A unique descriptor of the candidate receipt. -struct CandidateDescriptor { - /// The ID of the para this is a candidate for. - para_id: Id, - /// The hash of the relay-chain block this is executed in the context of. - relay_parent: Hash, - /// The collator's sr25519 public key. - collator: CollatorId, - /// Signature on blake2-256 of components of this receipt: - /// The parachain index, the relay parent, and the pov_hash. - signature: CollatorSignature, - /// The blake2-256 hash of the pov-block. - pov_hash: Hash, -} -``` - - -## GlobalValidationSchedule - -The global validation schedule comprises of information describing the global environment for para execution, as derived from a particular relay-parent. These are parameters that will apply to all parablocks executed in the context of this relay-parent. - -> TODO: message queue watermarks (first downward messages, then XCMP channels) - -```rust -/// Extra data that is needed along with the other fields in a `CandidateReceipt` -/// to fully validate the candidate. -/// -/// These are global parameters that apply to all candidates in a block. -struct GlobalValidationSchedule { - /// The maximum code size permitted, in bytes. - max_code_size: u32, - /// The maximum head-data size permitted, in bytes. - max_head_data_size: u32, - /// The relay-chain block number this is in the context of. - block_number: BlockNumber, -} -``` - -## LocalValidationData - -This is validation data needed for execution of candidate pertaining to a specific para and relay-chain block. - -Unlike the [`GlobalValidationData`](#globalvalidationdata), which only depends on a relay-parent, this is parameterized both by a relay-parent and a choice of one of two options: - 1. Assume that the candidate pending availability on this para at the onset of the relay-parent is included. - 1. Assume that the candidate pending availability on this para at the onset of the relay-parent is timed-out. - -This choice can also be expressed as a choice of which parent head of the para will be built on - either optimistically on the candidate pending availability or pessimistically on the one that is surely included. - -Para validation happens optimistically before the block is authored, so it is not possible to predict with 100% accuracy what will happen in the earlier phase of the [`InclusionInherent`](../runtime/inclusioninherent.md) module where new availability bitfields and availability timeouts are processed. This is what will eventually define whether a candidate can be backed within a specific relay-chain block. - -> TODO: determine if balance/fees are even needed here. - -```rust -/// Extra data that is needed along with the other fields in a `CandidateReceipt` -/// to fully validate the candidate. These fields are parachain-specific. -struct LocalValidationData { - /// The parent head-data. - parent_head: HeadData, - /// The balance of the parachain at the moment of validation. - balance: Balance, - /// The blake2-256 hash of the validation code used to execute the candidate. - validation_code_hash: Hash, - /// Whether the parachain is allowed to upgrade its validation code. - /// - /// This is `Some` if so, and contains the number of the minimum relay-chain - /// height at which the upgrade will be applied, if an upgrade is signaled - /// now. - /// - /// A parachain should enact its side of the upgrade at the end of the first - /// parablock executing in the context of a relay-chain block with at least this - /// height. This may be equal to the current perceived relay-chain block height, in - /// which case the code upgrade should be applied at the end of the signaling - /// block. - code_upgrade_allowed: Option, -} -``` - -## HeadData - -Head data is a type-safe abstraction around bytes (`Vec`) for the purposes of representing heads of parachains or parathreads. - -```rust -struct HeadData(Vec); -``` - -## Candidate Commitments - -The execution and validation of parachain or parathread candidates produces a number of values which either must be committed to on the relay chain or committed to the state of the relay chain. - -```rust -/// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Default))] -struct CandidateCommitments { - /// Fees paid from the chain to the relay chain validators. - fees: Balance, - /// Messages destined to be interpreted by the Relay chain itself. - upward_messages: Vec, - /// The root of a block's erasure encoding Merkle tree. - erasure_root: Hash, - /// New validation code. - new_validation_code: Option, - /// The head-data produced as a result of execution. - head_data: HeadData, -} -``` - -## Signing Context - -This struct provides context to signatures by combining with various payloads to localize the signature to a particular session index and relay-chain hash. Having these fields included in the signature makes misbehavior attribution much simpler. - -```rust -struct SigningContext { - /// The relay-chain block hash this signature is in the context of. - parent_hash: Hash, - /// The session index this signature is in the context of. - session_index: SessionIndex, -} -``` diff --git a/roadmap/implementors-guide/src/types/messages.md b/roadmap/implementors-guide/src/types/messages.md deleted file mode 100644 index 14218eb0330f0a6c93f320cf77b65ce6054c41a7..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/types/messages.md +++ /dev/null @@ -1,28 +0,0 @@ -# Message types - -Types of messages that are passed between parachains and the relay chain: UMP, DMP, XCMP. - -## Upward Message - -A type of messages dispatched from a parachain to the relay chain. - -```rust,ignore -enum ParachainDispatchOrigin { - /// As a simple `Origin::Signed`, using `ParaId::account_id` as its value. This is good when - /// interacting with standard modules such as `balances`. - Signed, - /// As the special `Origin::Parachain(ParaId)`. This is good when interacting with parachain- - /// aware modules which need to succinctly verify that the origin is a parachain. - Parachain, - /// As the simple, superuser `Origin::Root`. This can only be done on specially permissioned - /// parachains. - Root, -} - -struct UpwardMessage { - /// The origin for the message to be sent from. - pub origin: ParachainDispatchOrigin, - /// The message data. - pub data: Vec, -} -``` diff --git a/roadmap/implementors-guide/src/types/overseer-protocol.md b/roadmap/implementors-guide/src/types/overseer-protocol.md deleted file mode 100644 index 7b211bd14f6c864c46c56c23b6d79346a7ba45f4..0000000000000000000000000000000000000000 --- a/roadmap/implementors-guide/src/types/overseer-protocol.md +++ /dev/null @@ -1,269 +0,0 @@ -# Overseer Protocol - -This chapter contains message types sent to and from the overseer, and the underlying subsystem message types that are transmitted using these. - -## Overseer Signal - -Signals from the overseer to a subsystem to request change in execution that has to be obeyed by the subsystem. - -```rust -enum OverseerSignal { - /// Signal to start work localized to the relay-parent hash. - StartWork(Hash), - /// Signal to stop (or phase down) work localized to the relay-parent hash. - StopWork(Hash), -} -``` - -All subsystems have their own message types; all of them need to be able to listen for overseer signals as well. There are currently two proposals for how to handle that with unified communication channels: - -1. Retaining the `OverseerSignal` definition above, add `enum FromOverseer {Signal(OverseerSignal), Message(T)}`. -1. Add a generic varint to `OverseerSignal`: `Message(T)`. - -Either way, there will be some top-level type encapsulating messages from the overseer to each subsystem. - -## All Messages - -> TODO (now) - -## Availability Distribution Message - -Messages received by the availability distribution subsystem. - -```rust -enum AvailabilityDistributionMessage { - /// Distribute an availability chunk to other validators. - DistributeChunk(Hash, ErasureChunk), - /// Fetch an erasure chunk from network by candidate hash and chunk index. - FetchChunk(Hash, u32), - /// Event from the network. - /// An update on network state from the network bridge. - NetworkBridgeUpdate(NetworkBridgeEvent), -} -``` - -## Availability Store Message - -Messages to and from the availability store. - -```rust -enum AvailabilityStoreMessage { - /// Query the PoV of a candidate by hash. - QueryPoV(Hash, ResponseChannel), - /// Query a specific availability chunk of the candidate's erasure-coding by validator index. - /// Returns the chunk and its inclusion proof against the candidate's erasure-root. - QueryChunk(Hash, ValidatorIndex, ResponseChannel), - /// Store a specific chunk of the candidate's erasure-coding by validator index, with an - /// accompanying proof. - StoreChunk(Hash, ValidatorIndex, AvailabilityChunkAndProof), -} -``` - -## Bitfield Distribution Message - -Messages received by the bitfield distribution subsystem. - -```rust -enum BitfieldDistributionMessage { - /// Distribute a bitfield signed by a validator to other validators. - /// The bitfield distribution subsystem will assume this is indeed correctly signed. - DistributeBitfield(relay_parent, SignedAvailabilityBitfield), - /// Receive a network bridge update. - NetworkBridgeUpdate(NetworkBridgeEvent), -} -``` - -## Bitfield Signing Message - -Currently, the bitfield signing subsystem receives no specific messages. - -```rust -/// Non-instantiable message type -enum BitfieldSigningMessage { } -``` - -## Candidate Backing Message - -```rust -enum CandidateBackingMessage { - /// Registers a stream listener for updates to the set of backable candidates that could be backed - /// in a child of the given relay-parent, referenced by its hash. - RegisterBackingWatcher(Hash, TODO), - /// Note that the Candidate Backing subsystem should second the given candidate in the context of the - /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. - Second(Hash, CandidateReceipt, PoV), - /// Note a peer validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. - Statement(Statement), -} -``` - -## Candidate Selection Message - -These messages are sent to the [Candidate Selection subsystem](../node/backing/candidate-selection.md) as a means of providing feedback on its outputs. - -```rust -enum CandidateSelectionMessage { - /// We recommended a particular candidate to be seconded, but it was invalid; penalize the collator. - Invalid(CandidateReceipt), -} -``` - -## Network Bridge Message - -Messages received by the network bridge. This subsystem is invoked by others to manipulate access -to the low-level networking code. - -```rust -enum NetworkBridgeMessage { - /// Register an event producer with the network bridge. This should be done early and cannot - /// be de-registered. - RegisterEventProducer(ProtocolId, Fn(NetworkBridgeEvent) -> AllMessages), - /// Report a cost or benefit of a peer. Negative values are costs, positive are benefits. - ReportPeer(PeerId, cost_benefit: i32), - /// Send a message to one or more peers on the given protocol ID. - SendMessage([PeerId], ProtocolId, Bytes), -} -``` - -## Network Bridge Update - -These updates are posted from the [Network Bridge Subsystem](../node/utility/network-bridge.md) to other subsystems based on registered listeners. - -```rust -struct View(Vec); // Up to `N` (5?) chain heads. - -enum NetworkBridgeEvent { - /// A peer with given ID is now connected. - PeerConnected(PeerId, ObservedRole), // role is one of Full, Light, OurGuardedAuthority, OurSentry - /// A peer with given ID is now disconnected. - PeerDisconnected(PeerId), - /// We received a message from the given peer. Protocol ID should be apparent from context. - PeerMessage(PeerId, Bytes), - /// The given peer has updated its description of its view. - PeerViewChange(PeerId, View), // guaranteed to come after peer connected event. - /// We have posted the given view update to all connected peers. - OurViewChange(View), -} -``` - - -```rust -enum MisbehaviorReport { - /// These validator nodes disagree on this candidate's validity, please figure it out - /// - /// Most likely, the list of statments all agree except for the final one. That's not - /// guaranteed, though; if somehow we become aware of lots of - /// statements disagreeing about the validity of a candidate before taking action, - /// this message should be dispatched with all of them, in arbitrary order. - /// - /// This variant is also used when our own validity checks disagree with others'. - CandidateValidityDisagreement(CandidateReceipt, Vec), - /// I've noticed a peer contradicting itself about a particular candidate - SelfContradiction(CandidateReceipt, SignedFullStatement, SignedFullStatement), - /// This peer has seconded more than one parachain candidate for this relay parent head - DoubleVote(CandidateReceipt, SignedFullStatement, SignedFullStatement), -} -``` - -If this subsystem chooses to second a parachain block, it dispatches a `CandidateBackingSubsystemMessage`. - -## PoV Distribution Message - -```rust -enum PoVDistributionMessage { - /// Note a statement by a validator on a relay-parent. `Seconded` statements must always - /// have been passed in before `Valid` or `Invalid` statements. - ValidatorStatement(Hash, SignedFullStatement), - /// Fetch a PoV from the network. - /// (relay_parent, PoV-hash, Response channel). - FetchPoV(Hash, CandidateDescriptor, ResponseChannel), - /// Distribute a PoV for the given relay-parent and CandidateDescriptor. - /// The PoV should correctly hash to the PoV hash mentioned in the CandidateDescriptor - DistributePoV(Hash, CandidateDescriptor, PoV), - /// An update from the network bridge. - NetworkBridgeUpdate(NetworkBridgeEvent), -} -``` - -## Provisioner Message - -```rust -/// This data becomes intrinsics or extrinsics which should be included in a future relay chain block. -enum ProvisionableData { - /// This bitfield indicates the availability of various candidate blocks. - Bitfield(Hash, SignedAvailabilityBitfield), - /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. - BackedCandidate(BackedCandidate), - /// Misbehavior reports are self-contained proofs of validator misbehavior. - MisbehaviorReport(Hash, MisbehaviorReport), - /// Disputes trigger a broad dispute resolution process. - Dispute(Hash, Signature), -} - -/// Message to the Provisioner. -/// -/// In all cases, the Hash is that of the relay parent. -enum ProvisionerMessage { - /// This message allows potential block authors to be kept updated with all new authorship data - /// as it becomes available. - RequestBlockAuthorshipData(Hash, Sender), - /// This data should become part of a relay chain block - ProvisionableData(ProvisionableData), -} -``` - -## Runtime API Message - -The Runtime API subsystem is responsible for providing an interface to the state of the chain's runtime. - -Other subsystems query this data by sending these messages. - -```rust -enum RuntimeApiRequest { - /// Get the current validator set. - Validators(ResponseChannel>), - /// Get a signing context for bitfields and statements. - SigningContext(ResponseChannel), - /// Get the validation code for a specific para, assuming execution under given block number, and - /// an optional block number representing an intermediate parablock executed in the context of - /// that block. - ValidationCode(ParaId, BlockNumber, Option, ResponseChannel), -} - -enum RuntimeApiMessage { - /// Make a request of the runtime API against the post-state of the given relay-parent. - Request(Hash, RuntimeApiRequest), -} -``` - -## Statement Distribution Message - -The Statement Distribution subsystem distributes signed statements and candidates from validators to other validators. It does this by distributing full statements, which embed the candidate receipt, as opposed to compact statements which don't. -It receives updates from the network bridge and signed statements to share with other validators. - -```rust -enum StatementDistributionMessage { - /// An update from the network bridge. - NetworkBridgeUpdate(NetworkBridgeEvent), - /// We have validated a candidate and want to share our judgment with our peers. - /// The hash is the relay parent. - /// - /// The statement distribution subsystem assumes that the statement should be correctly - /// signed. - Share(Hash, SignedFullStatement), -} -``` - -## Validation Request Type - -Various modules request that the [Candidate Validation subsystem](../node/utility/candidate-validation.md) validate a block with this message - -```rust -enum CandidateValidationMessage { - /// Validate a candidate with provided parameters. Returns `Err` if an only if an internal - /// error is encountered. A bad candidate will return `Ok(false)`, while a good one will - /// return `Ok(true)`. - Validate(ValidationCode, CandidateReceipt, PoV, ResponseChannel>), -} -``` diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 6826900da75664a3dd90f56c9e6dde769f08dda9..35ff0f9e914255a6a1dc272d76f7013652906138 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "polkadot-rpc" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] jsonrpc-core = "14.0.3" +jsonrpc-pubsub = "14.0.3" polkadot-primitives = { path = "../primitives" } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -23,5 +24,5 @@ sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master" } frame-rpc-system = { package = "substrate-frame-rpc-system", git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 6bddc8ad8dafeccb39f4d8248aa21c06224d3959..4867e14f15d56ea1c0933c29d8d4016b0ab486dc 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -20,7 +20,7 @@ use std::sync::Arc; -use polkadot_primitives::{Block, BlockNumber, AccountId, Nonce, Balance, Hash}; +use polkadot_primitives::v0::{Block, BlockNumber, AccountId, Nonce, Balance, Hash}; use sp_api::ProvideRuntimeApi; use txpool_api::TransactionPool; use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; @@ -28,8 +28,9 @@ use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; use sc_client_api::light::{Fetcher, RemoteBlockchain}; use sc_consensus_babe::Epoch; -use sc_rpc::DenyUnsafe; use sp_block_builder::BlockBuilder; +pub use sc_rpc::DenyUnsafe; +pub use jsonrpc_pubsub::manager::SubscriptionManager; /// A type representing all RPC extensions. pub type RpcExtension = jsonrpc_core::IoHandler; @@ -62,6 +63,10 @@ pub struct GrandpaDeps { pub shared_voter_state: sc_finality_grandpa::SharedVoterState, /// Authority set info. pub shared_authority_set: sc_finality_grandpa::SharedAuthoritySet, + /// Receives notifications about justification events from Grandpa. + pub justification_stream: sc_finality_grandpa::GrandpaJustificationStream, + /// Subscription manager to keep track of pubsub subscribers. + pub subscriptions: jsonrpc_pubsub::manager::SubscriptionManager, } /// Full client dependencies @@ -81,16 +86,15 @@ pub struct FullDeps { } /// Instantiate all RPC extensions. -pub fn create_full(deps: FullDeps) -> RpcExtension where +pub fn create_full(deps: FullDeps) -> RpcExtension where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata, C: Send + Sync + 'static, C::Api: frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, P: TransactionPool + Sync + Send + 'static, - UE: codec::Codec + Send + Sync + 'static, SC: SelectChain + 'static, { use frame_rpc_system::{FullSystem, SystemApi}; @@ -115,6 +119,8 @@ pub fn create_full(deps: FullDeps) -> RpcExtension where let GrandpaDeps { shared_voter_state, shared_authority_set, + justification_stream, + subscriptions, } = grandpa; io.extend_with( @@ -139,22 +145,23 @@ pub fn create_full(deps: FullDeps) -> RpcExtension where GrandpaApi::to_delegate(GrandpaRpcHandler::new( shared_authority_set, shared_voter_state, + justification_stream, + subscriptions, )) ); io } /// Instantiate all RPC extensions for light node. -pub fn create_light(deps: LightDeps) -> RpcExtension +pub fn create_light(deps: LightDeps) -> RpcExtension where C: ProvideRuntimeApi, C: HeaderBackend, C: Send + Sync + 'static, C::Api: frame_rpc_system::AccountNonceApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, P: TransactionPool + Sync + Send + 'static, F: Fetcher + 'static, - UE: codec::Codec + Send + Sync + 'static, { use frame_rpc_system::{LightSystem, SystemApi}; diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 4dba004540e0f2821f03401ac269f14c4be8c952..3d3b0e44b071fcc47c3b978ff6cef7080df75554 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "polkadot-runtime-common" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } @@ -22,31 +22,32 @@ sp-session = { git = "https://github.com/paritytech/substrate", branch = "master sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment = { package = "pallet-transaction-payment", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } libsecp256k1 = { version = "0.3.2", default-features = false, optional = true } +runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } [dev-dependencies] hex-literal = "0.2.1" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -treasury = { package = "pallet-treasury", git = "https://github.com/paritytech/substrate", branch = "master" } -trie-db = "0.21.0" +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master" } +trie-db = "0.22.0" serde_json = "1.0.41" libsecp256k1 = "0.3.2" @@ -67,21 +68,21 @@ std = [ "sp-std/std", "sp-io/std", "frame-support/std", - "authorship/std", - "balances/std", + "pallet-authorship/std", + "pallet-balances/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", - "session/std", - "staking/std", - "system/std", - "timestamp/std", - "vesting/std", - "transaction-payment/std", + "pallet-session/std", + "pallet-staking/std", + "frame-system/std", + "pallet-timestamp/std", + "pallet-vesting/std", + "pallet-transaction-payment/std", ] runtime-benchmarks = [ "libsecp256k1/hmac", "frame-benchmarking", "frame-support/runtime-benchmarks", - "system/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/runtime/common/src/attestations.rs b/runtime/common/src/attestations.rs deleted file mode 100644 index 97080fc64ab33a6e78bead3168329a40e339fc1f..0000000000000000000000000000000000000000 --- a/runtime/common/src/attestations.rs +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A module for tracking all attestations that fell on a given candidate receipt. -//! -//! In the future, it is planned that this module will handle dispute resolution -//! as well. - -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use frame_support::{ - decl_storage, decl_module, decl_error, ensure, - dispatch::DispatchResult, - traits::Get, - weights::DispatchClass, -}; - -use primitives::{Hash, parachain::{AttestedCandidate, AbridgedCandidateReceipt, Id as ParaId}}; -use sp_runtime::RuntimeDebug; -use sp_staking::SessionIndex; - -use inherents::{ProvideInherent, InherentData, MakeFatalError, InherentIdentifier}; -use system::ensure_none; - -/// Parachain blocks included in a recent relay-chain block. -#[derive(Encode, Decode)] -pub struct IncludedBlocks { - /// The actual relay chain block number where blocks were included. - pub actual_number: T::BlockNumber, - /// The session index at this block. - pub session: SessionIndex, - /// The randomness seed at this block. - pub random_seed: [u8; 32], - /// All parachain IDs active at this block. - pub active_parachains: Vec, - /// Hashes of the parachain candidates included at this block. - pub para_blocks: Vec, -} - -/// Attestations kept over time on a parachain block. -#[derive(Encode, Decode)] -pub struct BlockAttestations { - receipt: AbridgedCandidateReceipt, - valid: Vec, // stash account ID of voter. - invalid: Vec, // stash account ID of voter. -} - -/// Additional attestations on a parachain block, after it was included. -#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] -pub struct MoreAttestations; - -/// Something which processes rewards for received attestations. -pub trait RewardAttestation { - /// Reward immediate attestations on parachain blocks. The argument is an iterable of - /// validator indices of the attesting validators. - fn reward_immediate(validator_indices: impl IntoIterator); -} - -impl RewardAttestation for () { - fn reward_immediate(validator_indices: impl IntoIterator) { - // ensure side-effecting iterators do work. - for _ in validator_indices {} - } -} - -impl RewardAttestation for staking::Module { - fn reward_immediate(validator_indices: impl IntoIterator) { - use staking::SessionInterface; - - // The number of points to reward for a validity statement. - // https://research.web3.foundation/en/latest/polkadot/Token%20Economics/#payment-details - const STAKING_REWARD_POINTS: u32 = 20; - - let validators = T::SessionInterface::validators(); - - let validator_rewards = validator_indices.into_iter() - .filter_map(|i| validators.get(i as usize).cloned()) - .map(|v| (v, STAKING_REWARD_POINTS)); - - Self::reward_by_ids(validator_rewards); - } -} - -pub trait Trait: session::Trait { - /// How many blocks ago we're willing to accept attestations for. - type AttestationPeriod: Get; - - /// Get a list of the validators' underlying identities. - type ValidatorIdentities: Get>; - - /// Hook for rewarding validators upon attesting. - type RewardAttestation: RewardAttestation; -} - -decl_storage! { - trait Store for Module as Attestations { - /// A mapping from modular block number (n % AttestationPeriod) - /// to session index and the list of candidate hashes. - pub RecentParaBlocks: map hasher(twox_64_concat) T::BlockNumber => Option>; - - /// Attestations on a recent parachain block. - pub ParaBlockAttestations: - double_map hasher(twox_64_concat) T::BlockNumber, hasher(identity) Hash - => Option>; - - // Did we already have more attestations included in this block? - DidUpdate: bool; - } -} - -decl_error! { - pub enum Error for Module { - /// More attestations can be added only once in a block. - TooManyAttestations, - } -} - -decl_module! { - /// Parachain-attestations module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - /// Provide candidate receipts for parachains, in ascending order by id. - #[weight = (0, DispatchClass::Mandatory)] - fn more_attestations(origin, _more: MoreAttestations) -> DispatchResult { - ensure_none(origin)?; - ensure!(!DidUpdate::exists(), Error::::TooManyAttestations); - DidUpdate::put(true); - - Ok(()) - } - - fn on_finalize(_n: T::BlockNumber) { - DidUpdate::kill(); - } - } -} - -impl Module { - /// Update recent candidates to contain the already-checked parachain candidates. - pub(crate) fn note_included(heads: &[AttestedCandidate], para_blocks: IncludedBlocks) { - let attestation_period = T::AttestationPeriod::get(); - let mod_num = para_blocks.actual_number % attestation_period; - - // clear old entry that was in this place. - if let Some(old_entry) = >::take(&mod_num) { - >::remove_prefix(&old_entry.actual_number); - } - - let validators = T::ValidatorIdentities::get(); - - // make new entry. - for (head, hash) in heads.iter().zip(¶_blocks.para_blocks) { - let mut valid = Vec::new(); - let invalid = Vec::new(); - - { - let attesting_indices = head.validator_indices - .iter() - .enumerate() - .filter(|(_, bit)| **bit) - .inspect(|&(auth_index, _)| { - if let Some(stash_id) = validators.get(auth_index) { - valid.push(stash_id.clone()); - } - }) - .map(|(i, _)| i as u32); - - T::RewardAttestation::reward_immediate(attesting_indices); - } - - let summary = BlockAttestations { - receipt: head.candidate().clone(), - valid, - invalid, - }; - - >::insert(¶_blocks.actual_number, hash, &summary); - } - - >::insert(&mod_num, ¶_blocks); - } -} - -/// An identifier for inherent data that provides after-the-fact attestations -/// on already included parachain blocks. -pub const MORE_ATTESTATIONS_IDENTIFIER: InherentIdentifier = *b"par-atts"; - -pub type InherentType = MoreAttestations; - -impl ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = MORE_ATTESTATIONS_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - data.get_data::(&MORE_ATTESTATIONS_IDENTIFIER) - .ok() - .and_then(|x| x.map(Call::more_attestations)) - } -} diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index 8811e04943d2c3d87d623b69d806d8dc03991b09..334d5ec4bb7b58c6c3958553c845fc182807ca3d 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -22,7 +22,7 @@ use frame_support::{ decl_event, decl_storage, decl_module, decl_error, ensure, dispatch::IsSubType, traits::{Currency, Get, VestingSchedule, EnsureOrigin}, weights::{Pays, DispatchClass} }; -use system::{ensure_signed, ensure_root, ensure_none}; +use frame_system::{ensure_signed, ensure_root, ensure_none}; use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{self, Serialize, Deserialize, Serializer, Deserializer}; @@ -35,15 +35,15 @@ use sp_runtime::{ TransactionSource, TransactionValidityError, }, }; -use primitives::ValidityError; +use primitives::v1::ValidityError; -type CurrencyOf = <::VestingSchedule as VestingSchedule<::AccountId>>::Currency; -type BalanceOf = as Currency<::AccountId>>::Balance; +type CurrencyOf = <::VestingSchedule as VestingSchedule<::AccountId>>::Currency; +type BalanceOf = as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: system::Trait { +pub trait Trait: frame_system::Trait { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; type VestingSchedule: VestingSchedule; type Prefix: Get<&'static [u8]>; type MoveClaimOrigin: EnsureOrigin; @@ -130,9 +130,9 @@ impl sp_std::fmt::Debug for EcdsaSignature { decl_event!( pub enum Event where Balance = BalanceOf, - AccountId = ::AccountId + AccountId = ::AccountId { - /// Someone claimed some DOTs. + /// Someone claimed some DOTs. [who, ethereum_address, amount] Claimed(AccountId, EthereumAddress, Balance), } ); @@ -539,10 +539,10 @@ impl sp_runtime::traits::ValidateUnsigned for Module { /// otherwise free to place on chain. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct PrevalidateAttests(sp_std::marker::PhantomData) where - ::Call: IsSubType, T>; + ::Call: IsSubType>; impl Debug for PrevalidateAttests where - ::Call: IsSubType, T> + ::Call: IsSubType> { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { @@ -556,7 +556,7 @@ impl Debug for PrevalidateAttests where } impl PrevalidateAttests where - ::Call: IsSubType, T> + ::Call: IsSubType> { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { @@ -565,10 +565,10 @@ impl PrevalidateAttests where } impl SignedExtension for PrevalidateAttests where - ::Call: IsSubType, T> + ::Call: IsSubType> { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "PrevalidateAttests"; @@ -642,7 +642,7 @@ mod tests { ord_parameter_types, weights::{Pays, GetDispatchInfo}, traits::ExistenceRequirement, dispatch::DispatchError::BadOrigin, }; - use balances; + use pallet_balances; use super::Call as ClaimsCall; impl_outer_origin! { @@ -665,7 +665,7 @@ mod tests { pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl system::Trait for Test { + impl frame_system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; type Call = Call; @@ -687,9 +687,10 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type ModuleToIndex = (); - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = Balances; + type SystemWeightInfo = (); } parameter_types! { @@ -698,19 +699,21 @@ mod tests { pub const MinVestedTransfer: u64 = 0; } - impl balances::Trait for Test { + impl pallet_balances::Trait for Test { type Balance = u64; type Event = (); type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } - impl vesting::Trait for Test { + impl pallet_vesting::Trait for Test { type Event = (); type Currency = Balances; type BlockNumberToBalance = Identity; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } parameter_types!{ @@ -724,11 +727,11 @@ mod tests { type Event = (); type VestingSchedule = Vesting; type Prefix = Prefix; - type MoveClaimOrigin = system::EnsureSignedBy; + type MoveClaimOrigin = frame_system::EnsureSignedBy; } - type System = system::Module; - type Balances = balances::Module; - type Vesting = vesting::Module; + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Vesting = pallet_vesting::Module; type Claims = Module; fn alice() -> secp256k1::SecretKey { @@ -750,9 +753,9 @@ mod tests { // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = system::GenesisConfig::default().build_storage::().unwrap(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. - balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); GenesisConfig::{ claims: vec![ (eth(&alice()), 100, None, None), @@ -979,7 +982,7 @@ mod tests { // Make sure we can not transfer the vested balance. assert_err!( >::transfer(&69, &80, 180, ExistenceRequirement::AllowDeath), - balances::Error::::LiquidityRestrictions, + pallet_balances::Error::::LiquidityRestrictions, ); }); } @@ -1164,8 +1167,7 @@ mod tests { mod benchmarking { use super::*; use secp_utils::*; - use system::RawOrigin; - use system as frame_system; // NOTE: required for the benchmarks! macro + use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, account}; use sp_runtime::DispatchResult; use sp_runtime::traits::ValidateUnsigned; diff --git a/runtime/common/src/crowdfund.rs b/runtime/common/src/crowdfund.rs index 15160f7fc0958c5daf7267ee740b3ec738bbaa5f..17b78cbd71d045b27e951acf0b5ebfd50a7df21f 100644 --- a/runtime/common/src/crowdfund.rs +++ b/runtime/common/src/crowdfund.rs @@ -72,23 +72,23 @@ use frame_support::{ Currency, Get, OnUnbalanced, WithdrawReason, ExistenceRequirement::AllowDeath }, }; -use system::ensure_signed; +use frame_system::ensure_signed; use sp_runtime::{ModuleId, traits::{AccountIdConversion, Hash, Saturating, Zero, CheckedAdd} }; use crate::slots; use codec::{Encode, Decode}; use sp_std::vec::Vec; -use primitives::parachain::{Id as ParaId, HeadData}; +use primitives::v1::{Id as ParaId, HeadData}; pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; #[allow(dead_code)] pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: slots::Trait { - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// ModuleID for the crowdfund module. An appropriate value could be ```ModuleId(*b"py/cfund")``` type ModuleId: Get; @@ -184,15 +184,22 @@ decl_storage! { decl_event! { pub enum Event where - ::AccountId, + ::AccountId, Balance = BalanceOf, { + /// Create a new crowdfunding campaign. [fund_index] Created(FundIndex), + /// Contributed to a crowd sale. [who, fund_index, amount] Contributed(AccountId, FundIndex, Balance), + /// Withdrew full balance of a contributor. [who, fund_index, amount] Withdrew(AccountId, FundIndex, Balance), + /// Fund is placed into retirement. [fund_index] Retiring(FundIndex), + /// Fund is dissolved. [fund_index] Dissolved(FundIndex), + /// The deploy data of the funded parachain is setted. [fund_index] DeployDataFixed(FundIndex), + /// Onboarding process for a winning parachain fund is completed. [find_index, parachain_id] Onboarded(FundIndex, ParaId), } } @@ -263,7 +270,7 @@ decl_module! { ensure!(first_slot < last_slot, Error::::LastSlotBeforeFirstSlot); ensure!(last_slot <= first_slot + 3.into(), Error::::LastSlotTooFarInFuture); - ensure!(end > >::block_number(), Error::::CannotEndInPast); + ensure!(end > >::block_number(), Error::::CannotEndInPast); let deposit = T::SubmissionDeposit::get(); let transfer = WithdrawReason::Transfer.into(); @@ -306,7 +313,7 @@ decl_module! { ensure!(fund.raised <= fund.cap, Error::::CapExceeded); // Make sure crowdfund has not ended - let now = >::block_number(); + let now = >::block_number(); ensure!(fund.end > now, Error::::ContributionPeriodOver); T::Currency::transfer(&who, &Self::fund_account_id(index), value, AllowDeath)?; @@ -394,7 +401,7 @@ decl_module! { ensure!(fund.parachain.is_none(), Error::::AlreadyOnboard); fund.parachain = Some(para_id); - let fund_origin = system::RawOrigin::Signed(Self::fund_account_id(index)).into(); + let fund_origin = frame_system::RawOrigin::Signed(Self::fund_account_id(index)).into(); >::fix_deploy_data( fund_origin, index, @@ -423,7 +430,7 @@ decl_module! { ensure!(T::Currency::free_balance(&account) >= fund.raised, Error::::FundsNotReturned); // This fund just ended. Withdrawal period begins. - let now = >::block_number(); + let now = >::block_number(); fund.end = now; >::insert(index, &fund); @@ -438,7 +445,7 @@ decl_module! { let mut fund = Self::funds(index).ok_or(Error::::InvalidFundIndex)?; ensure!(fund.parachain.is_none(), Error::::FundNotRetired); - let now = >::block_number(); + let now = >::block_number(); // `fund.end` can represent the end of a failed crowdsale or the beginning of retirement ensure!(now >= fund.end, Error::::FundNotEnded); @@ -469,7 +476,7 @@ decl_module! { let fund = Self::funds(index).ok_or(Error::::InvalidFundIndex)?; ensure!(fund.parachain.is_none(), Error::::HasActiveParachain); - let now = >::block_number(); + let now = >::block_number(); ensure!( now >= fund.end.saturating_add(T::RetirementPeriod::get()), Error::::InRetirementPeriod @@ -568,14 +575,14 @@ mod tests { }; use frame_support::traits::{Contains, ContainsLengthBound}; use sp_core::H256; - use primitives::parachain::{Info as ParaInfo, Id as ParaId, Scheduling, ValidationCode}; + use primitives::v1::{Id as ParaId, ValidationCode}; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. use sp_runtime::{ Perbill, Permill, Percent, testing::Header, DispatchResult, traits::{BlakeTwo256, IdentityLookup}, }; - use crate::registrar::Registrar; + use crate::slots::Registrar; impl_outer_origin! { pub enum Origin for Test {} @@ -592,7 +599,7 @@ mod tests { pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl system::Trait for Test { + impl frame_system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; type Call = (); @@ -614,19 +621,21 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type ModuleToIndex = (); - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = Balances; + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl balances::Trait for Test { + impl pallet_balances::Trait for Test { type Balance = u64; type Event = (); type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { @@ -651,22 +660,24 @@ mod tests { fn min_len() -> usize { 0 } fn max_len() -> usize { 0 } } - impl treasury::Trait for Test { - type Currency = balances::Module; - type ApproveOrigin = system::EnsureRoot; - type RejectOrigin = system::EnsureRoot; + impl pallet_treasury::Trait for Test { + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; type Event = (); type ProposalRejection = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = (); type Tippers = Nobody; type TipCountdown = TipCountdown; type TipFindersFee = TipFindersFee; type TipReportDepositBase = TipReportDepositBase; type TipReportDepositPerByte = TipReportDepositPerByte; type ModuleId = TreasuryModuleId; + type WeightInfo = (); } thread_local! { @@ -695,13 +706,9 @@ mod tests { code_size <= MAX_CODE_SIZE } - fn para_info(_id: ParaId) -> Option { - Some(ParaInfo { scheduling: Scheduling::Always }) - } - fn register_para( id: ParaId, - _info: ParaInfo, + _parachain: bool, code: ValidationCode, initial_head_data: HeadData, ) -> DispatchResult { @@ -752,20 +759,20 @@ mod tests { type ModuleId = CrowdfundModuleId; } - type System = system::Module; - type Balances = balances::Module; + type System = frame_system::Module; + type Balances = pallet_balances::Module; type Slots = slots::Module; - type Treasury = treasury::Module; + type Treasury = pallet_treasury::Module; type Crowdfund = Module; - type RandomnessCollectiveFlip = randomness_collective_flip::Module; - use balances::Error as BalancesError; + type RandomnessCollectiveFlip = pallet_randomness_collective_flip::Module; + use pallet_balances::Error as BalancesError; use slots::Error as SlotsError; // This function basically just builds a genesis storage key/value store according to // our desired mockup. fn new_test_ext() -> sp_io::TestExternalities { - let mut t = system::GenesisConfig::default().build_storage::().unwrap(); - balances::GenesisConfig::{ + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ balances: vec![(1, 1000), (2, 2000), (3, 3000), (4, 4000)], }.assimilate_storage(&mut t).unwrap(); t.into() @@ -916,7 +923,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into() )); @@ -927,7 +934,7 @@ mod tests { assert_eq!( fund.deploy_data, Some(DeployData { - code_hash: ::Hash::default(), + code_hash: ::Hash::default(), code_size: 0, initial_head_data: vec![0].into(), }), @@ -946,7 +953,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(2), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into()), Error::::InvalidOrigin @@ -956,7 +963,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(1), 1, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into()), Error::::InvalidFundIndex @@ -966,7 +973,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -974,7 +981,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![1].into()), Error::::ExistingDeployData @@ -994,7 +1001,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1040,7 +1047,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1068,7 +1075,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1111,7 +1118,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1253,7 +1260,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1282,7 +1289,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1321,14 +1328,14 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(2), 1, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); diff --git a/runtime/common/src/dummy.rs b/runtime/common/src/dummy.rs new file mode 100644 index 0000000000000000000000000000000000000000..73f53920651dd0ec6623ea83ca1490e2a38562cc --- /dev/null +++ b/runtime/common/src/dummy.rs @@ -0,0 +1,30 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A dummy module for holding place of modules in a runtime. + +use frame_support::{decl_module, decl_storage}; + +pub trait Trait: frame_system::Trait { } + +decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + } +} + +decl_storage! { + trait Store for Module, I: Instance = DefaultInstance> as Dummy { } +} diff --git a/runtime/common/src/impls.rs b/runtime/common/src/impls.rs index 543e2c739adb1cc1bf340f67b2e3170c88b740c6..111fc00e24a0ef6d6b1a74925109c0a9b65fd4e2 100644 --- a/runtime/common/src/impls.rs +++ b/runtime/common/src/impls.rs @@ -25,20 +25,20 @@ pub struct ToAuthor(sp_std::marker::PhantomData); impl OnUnbalanced> for ToAuthor where - R: balances::Trait + authorship::Trait, - ::AccountId: From, - ::AccountId: Into, - ::Event: From::AccountId, - ::Balance, - balances::DefaultInstance> + R: pallet_balances::Trait + pallet_authorship::Trait, + ::AccountId: From, + ::AccountId: Into, + ::Event: From::AccountId, + ::Balance, + pallet_balances::DefaultInstance> >, { fn on_nonzero_unbalanced(amount: NegativeImbalance) { let numeric_amount = amount.peek(); - let author = >::author(); - >::resolve_creating(&>::author(), amount); - >::deposit_event(balances::RawEvent::Deposit(author, numeric_amount)); + let author = >::author(); + >::resolve_creating(&>::author(), amount); + >::deposit_event(pallet_balances::RawEvent::Deposit(author, numeric_amount)); } } @@ -47,18 +47,18 @@ pub struct CurrencyToVoteHandler(sp_std::marker::PhantomData); impl CurrencyToVoteHandler where - R: balances::Trait, + R: pallet_balances::Trait, R::Balance: Into, { fn factor() -> u128 { - let issuance: u128 = >::total_issuance().into(); + let issuance: u128 = >::total_issuance().into(); (issuance / u64::max_value() as u128).max(1) } } impl Convert for CurrencyToVoteHandler where - R: balances::Trait, + R: pallet_balances::Trait, R::Balance: Into, { fn convert(x: u128) -> u64 { (x / Self::factor()) as u64 } @@ -66,7 +66,7 @@ where impl Convert for CurrencyToVoteHandler where - R: balances::Trait, + R: pallet_balances::Trait, R::Balance: Into, { fn convert(x: u128) -> u128 { x * Self::factor() } diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 49a6dcced0d749a0d41a95633ebbea3152b4ff5f..50218d39d51e06554a5e3380b659ddce8715d472 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -18,38 +18,37 @@ #![cfg_attr(not(feature = "std"), no_std)] -pub mod attestations; pub mod claims; -pub mod parachains; pub mod slot_range; -pub mod registrar; pub mod slots; pub mod crowdfund; +pub mod purchase; pub mod impls; +pub mod paras_sudo_wrapper; -use primitives::BlockNumber; +pub mod dummy; + +use primitives::v1::{BlockNumber, ValidatorId}; use sp_runtime::{Perquintill, Perbill, FixedPointNumber, traits::Saturating}; use frame_support::{ parameter_types, traits::{Currency}, weights::{Weight, constants::WEIGHT_PER_SECOND}, }; -use transaction_payment::{TargetedFeeAdjustment, Multiplier}; +use pallet_transaction_payment::{TargetedFeeAdjustment, Multiplier}; use static_assertions::const_assert; pub use frame_support::weights::constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; #[cfg(feature = "std")] -pub use staking::StakerStatus; +pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use attestations::{Call as AttestationsCall, MORE_ATTESTATIONS_IDENTIFIER}; -pub use parachains::Call as ParachainsCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; /// Implementations of some helper traits passed into runtime modules as associated types. pub use impls::{CurrencyToVoteHandler, ToAuthor}; -pub type NegativeImbalance = as Currency<::AccountId>>::NegativeImbalance; +pub type NegativeImbalance = as Currency<::AccountId>>::NegativeImbalance; /// We assume that an on-initialize consumes 10% of the weight on average, hence a single extrinsic /// will not be allowed to consume more than `AvailableBlockRatio - 10%`. @@ -90,6 +89,35 @@ pub type SlowAdjustingFeeUpdate = TargetedFeeAdjustment< MinimumMultiplier >; +/// A placeholder since there is currently no provided session key handler for parachain validator +/// keys. +pub struct ParachainSessionKeyPlaceholder(sp_std::marker::PhantomData); +impl sp_runtime::BoundToRuntimeAppPublic for ParachainSessionKeyPlaceholder { + type Public = ValidatorId; +} + +impl + pallet_session::OneSessionHandler for ParachainSessionKeyPlaceholder +{ + type Key = ValidatorId; + + fn on_genesis_session<'a, I: 'a>(_validators: I) where + I: Iterator, + T::AccountId: 'a + { + + } + + fn on_new_session<'a, I: 'a>(_changed: bool, _v: I, _q: I) where + I: Iterator, + T::AccountId: 'a + { + + } + + fn on_disabled(_: usize) { } +} + #[cfg(test)] mod multiplier_tests { use super::*; @@ -116,7 +144,7 @@ mod multiplier_tests { pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl system::Trait for Runtime { + impl frame_system::Trait for Runtime { type BaseCallFilter = (); type Origin = Origin; type Index = u64; @@ -141,13 +169,14 @@ mod multiplier_tests { type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } - type System = system::Module; + type System = frame_system::Module; fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { let mut t: sp_io::TestExternalities = - system::GenesisConfig::default().build_storage::().unwrap().into(); + frame_system::GenesisConfig::default().build_storage::().unwrap().into(); t.execute_with(|| { System::set_block_limits(w, 0); assertions() diff --git a/runtime/common/src/parachains.rs b/runtime/common/src/parachains.rs deleted file mode 100644 index ef67adb031f18c9c6e503d1c0dea7225e9f45769..0000000000000000000000000000000000000000 --- a/runtime/common/src/parachains.rs +++ /dev/null @@ -1,3514 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Main parachains logic. For now this is just the determination of which validators do what. - -use sp_std::prelude::*; -use sp_std::result; -use codec::{Decode, Encode}; -use sp_runtime::{ - KeyTypeId, Perbill, RuntimeDebug, - traits::{ - Hash as HashT, BlakeTwo256, Saturating, One, Zero, Dispatchable, - AccountIdConversion, BadOrigin, Convert, SignedExtension, AppVerify, - DispatchInfoOf, - }, - transaction_validity::{TransactionValidityError, ValidTransaction, TransactionValidity}, -}; -use sp_staking::{ - SessionIndex, - offence::{ReportOffence, Offence, Kind}, -}; -use frame_support::{ - traits::KeyOwnerProofSystem, - dispatch::{IsSubType}, - weights::{DispatchClass, Weight}, -}; -use primitives::{ - Balance, - BlockNumber, - parachain::{ - Id as ParaId, Chain, DutyRoster, AttestedCandidate, CompactStatement as Statement, ParachainDispatchOrigin, - UpwardMessage, ValidatorId, ActiveParas, CollatorId, Retriable, OmittedValidationData, - CandidateReceipt, GlobalValidationSchedule, AbridgedCandidateReceipt, - LocalValidationData, Scheduling, ValidityAttestation, NEW_HEADS_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, - ValidatorSignature, SigningContext, HeadData, ValidationCode, - }, -}; -use frame_support::{ - Parameter, dispatch::DispatchResult, decl_storage, decl_module, decl_error, ensure, - traits::{Currency, Get, WithdrawReason, ExistenceRequirement, Randomness}, -}; -use sp_runtime::{ - transaction_validity::InvalidTransaction, -}; - -use inherents::{ProvideInherent, InherentData, MakeFatalError, InherentIdentifier}; - -use system::{ - ensure_none, ensure_signed, - offchain::{CreateSignedTransaction, SendSignedTransaction, Signer}, -}; -use crate::attestations::{self, IncludedBlocks}; -use crate::registrar::Registrar; - -// ranges for iteration of general block number don't work, so this -// is a utility to get around that. -struct BlockNumberRange { - low: N, - high: N, -} - -impl Iterator for BlockNumberRange { - type Item = N; - - fn next(&mut self) -> Option { - if self.low >= self.high { - return None - } - - let item = self.low.clone(); - self.low = self.low.clone().saturating_add(One::one()); - Some(item) - } -} - -// wrapper trait because an associated type of `Currency` -// doesn't work.` -pub trait ParachainCurrency { - fn free_balance(para_id: ParaId) -> Balance; - fn deduct(para_id: ParaId, amount: Balance) -> DispatchResult; -} - -impl> ParachainCurrency for T where - T::Balance: From + Into, - ParaId: AccountIdConversion, -{ - fn free_balance(para_id: ParaId) -> Balance { - let para_account = para_id.into_account(); - T::free_balance(¶_account).into() - } - - fn deduct(para_id: ParaId, amount: Balance) -> DispatchResult { - let para_account = para_id.into_account(); - - // burn the fee. - let _ = T::withdraw( - ¶_account, - amount.into(), - WithdrawReason::Fee.into(), - ExistenceRequirement::KeepAlive, - )?; - - Ok(()) - } -} - -/// Interface to the persistent (stash) identities of the current validators. -pub struct ValidatorIdentities(sp_std::marker::PhantomData); - -/// A structure used to report conflicting votes by validators. -/// -/// It is generic over two parameters: -/// `Proof` - proof of historical ownership of a key by some validator. -/// `Hash` - a type of a hash used in the runtime. -#[derive(RuntimeDebug, Encode, Decode)] -#[derive(Clone, Eq, PartialEq)] -pub struct DoubleVoteReport { - /// Identity of the double-voter. - pub identity: ValidatorId, - /// First vote of the double-vote. - pub first: (Statement, ValidatorSignature), - /// Second vote of the double-vote. - pub second: (Statement, ValidatorSignature), - /// Proof that the validator with `identity` id was actually a validator at `parent_hash`. - pub proof: Proof, - /// A `SigningContext` with a session and a parent hash of the moment this offence was commited. - pub signing_context: SigningContext, -} - -impl DoubleVoteReport { - fn verify>( - &self, - ) -> Result<(), DoubleVoteValidityError> { - let first = self.first.clone(); - let second = self.second.clone(); - let id = self.identity.clone(); - - T::KeyOwnerProofSystem::check_proof((PARACHAIN_KEY_TYPE_ID, id), self.proof.clone()) - .ok_or(DoubleVoteValidityError::InvalidProof)?; - - if self.proof.session() != self.signing_context.session_index { - return Err(DoubleVoteValidityError::InvalidReport); - } - - // Check signatures. - Self::verify_vote( - &first, - &self.signing_context, - &self.identity, - )?; - Self::verify_vote( - &second, - &self.signing_context, - &self.identity, - )?; - - match (&first.0, &second.0) { - // If issuing a `Candidate` message on a parachain block, neither a `Valid` or - // `Invalid` vote cannot be issued on that parachain block, as the `Candidate` - // message is an implicit validity vote. - (Statement::Candidate(candidate_hash), Statement::Valid(hash)) | - (Statement::Candidate(candidate_hash), Statement::Invalid(hash)) | - (Statement::Valid(hash), Statement::Candidate(candidate_hash)) | - (Statement::Invalid(hash), Statement::Candidate(candidate_hash)) - if *candidate_hash == *hash => {}, - // Otherwise, it is illegal to cast both a `Valid` and - // `Invalid` vote on a given parachain block. - (Statement::Valid(hash_1), Statement::Invalid(hash_2)) | - (Statement::Invalid(hash_1), Statement::Valid(hash_2)) - if *hash_1 == *hash_2 => {}, - _ => { - return Err(DoubleVoteValidityError::NotDoubleVote); - } - } - - Ok(()) - } - - fn verify_vote( - vote: &(Statement, ValidatorSignature), - signing_context: &SigningContext, - authority: &ValidatorId, - ) -> Result<(), DoubleVoteValidityError> { - let payload = localized_payload(vote.0.clone(), signing_context); - - if !vote.1.verify(&payload[..], authority) { - return Err(DoubleVoteValidityError::InvalidSignature); - } - - Ok(()) - } -} - -impl Get> for ValidatorIdentities { - fn get() -> Vec { - >::validators() - } -} - -/// A trait to get a session number the `MembershipProof` belongs to. -pub trait GetSessionNumber { - fn session(&self) -> SessionIndex; -} - -impl GetSessionNumber for sp_session::MembershipProof { - fn session(&self) -> SessionIndex { - self.session - } -} - -pub trait Trait: CreateSignedTransaction> + attestations::Trait + session::historical::Trait { - // The transaction signing authority - type AuthorityId: system::offchain::AppCrypto; - - /// The outer origin type. - type Origin: From + From>; - - /// The outer call dispatch type. - type Call: Parameter + Dispatchable::Origin> + From>; - - /// Some way of interacting with balances for fees. - type ParachainCurrency: ParachainCurrency; - - /// Polkadot in practice will always use the `BlockNumber` type. - /// Substrate isn't good at giving us ways to bound the supertrait - /// associated type, so we introduce this conversion. - type BlockNumberConversion: Convert; - - /// Something that provides randomness in the runtime. - type Randomness: Randomness; - - /// Means to determine what the current set of active parachains are. - type ActiveParachains: ActiveParas; - - /// The way that we are able to register parachains. - type Registrar: Registrar; - - /// Maximum code size for parachains, in bytes. Note that this is not - /// the entire storage burden of the parachain, as old code is stored for - /// `SlashPeriod` blocks. - type MaxCodeSize: Get; - - /// Max head data size. - type MaxHeadDataSize: Get; - /// The frequency at which paras can upgrade their validation function. - /// This is an integer number of relay-chain blocks that must pass between - /// code upgrades. - type ValidationUpgradeFrequency: Get; - - /// The delay before a validation function upgrade is applied. - type ValidationUpgradeDelay: Get; - - /// The period (in blocks) that slash reports are permitted against an - /// included candidate. - /// - /// After validation function upgrades, the old code is persisted on-chain - /// for this period, to ensure that candidates validated under old functions - /// can be re-checked. - type SlashPeriod: Get; - - /// Proof type. - /// - /// We need this type to bind the `KeyOwnerProofSystem::Proof` to necessary bounds. - /// As soon as https://rust-lang.github.io/rfcs/2289-associated-type-bounds.html - /// gets in this can be simplified. - type Proof: Parameter + GetSessionNumber; - - /// Compute and check proofs of historical key owners. - type KeyOwnerProofSystem: KeyOwnerProofSystem< - (KeyTypeId, ValidatorId), - Proof = Self::Proof, - IdentificationTuple = Self::IdentificationTuple, - >; - - /// An identification tuple type bound to `Parameter`. - type IdentificationTuple: Parameter; - - /// Report an offence. - type ReportOffence: ReportOffence< - Self::AccountId, - Self::IdentificationTuple, - DoubleVoteOffence, - >; - - /// A type that converts the opaque hash type to exact one. - type BlockHashConversion: Convert; -} - -/// Origin for the parachains module. -#[derive(PartialEq, Eq, Clone)] -#[cfg_attr(feature = "std", derive(Debug))] -pub enum Origin { - /// It comes from a parachain. - Parachain(ParaId), -} - -/// An offence that is filed if the validator has submitted a double vote. -#[derive(RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))] -pub struct DoubleVoteOffence { - /// The current session index in which we report a validator. - session_index: SessionIndex, - /// The size of the validator set in current session/era. - validator_set_count: u32, - /// An offender that has submitted two conflicting votes. - offender: Offender, -} - -impl Offence for DoubleVoteOffence { - const ID: Kind = *b"para:double-vote"; - type TimeSlot = SessionIndex; - - fn offenders(&self) -> Vec { - vec![self.offender.clone()] - } - - fn session_index(&self) -> SessionIndex { - self.session_index - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> Self::TimeSlot { - self.session_index - } - - fn slash_fraction(_offenders_count: u32, _validator_set_count: u32) -> Perbill { - // Slash 100%. - Perbill::from_percent(100) - } -} - -/// Total number of individual messages allowed in the parachain -> relay-chain message queue. -const MAX_QUEUE_COUNT: usize = 100; -/// Total size of messages allowed in the parachain -> relay-chain message queue before which no -/// further messages may be added to it. If it exceeds this then the queue may contain only a -/// single message. -const WATERMARK_QUEUE_SIZE: usize = 20000; - -/// Metadata used to track previous parachain validation code that we keep in -/// the state. -#[derive(Default, Encode, Decode)] -#[cfg_attr(test, derive(Debug, Clone, PartialEq))] -pub struct ParaPastCodeMeta { - // Block numbers where the code was replaced. These can be used as indices - // into the `PastCode` map along with the `ParaId` to fetch the code itself. - upgrade_times: Vec, - // This tracks the highest pruned code-replacement, if any. - last_pruned: Option, -} - -#[cfg_attr(test, derive(Debug, PartialEq))] -enum UseCodeAt { - // Use the current code. - Current, - // Use the code that was replaced at the given block number. - ReplacedAt(N), -} - -impl ParaPastCodeMeta { - // note a replacement has occurred at a given block number. - fn note_replacement(&mut self, at: N) { - self.upgrade_times.insert(0, at) - } - - // Yields the block number of the code that should be used for validating at - // the given block number. - // - // a return value of `None` means that there is no code we are aware of that - // should be used to validate at the given height. - fn code_at(&self, at: N) -> Option> { - // The `PastCode` map stores the code which was replaced at `t`. - let end_position = self.upgrade_times.iter().position(|&t| t < at); - if let Some(end_position) = end_position { - Some(if end_position != 0 { - // `end_position` gives us the replacement time where the code used at `at` - // was set. But that code has been replaced: `end_position - 1` yields - // that index. - UseCodeAt::ReplacedAt(self.upgrade_times[end_position - 1]) - } else { - // the most recent tracked replacement is before `at`. - // this means that the code put in place then (i.e. the current code) - // is correct for validating at `at`. - UseCodeAt::Current - }) - } else { - if self.last_pruned.as_ref().map_or(true, |&n| n < at) { - // Our `last_pruned` is before `at`, so we still have the code! - // but no code upgrade entries found before the `at` parameter. - // - // this means one of two things is true: - // 1. there are no non-pruned upgrade logs. in this case use `Current` - // 2. there are non-pruned upgrade logs all after `at`. - // in this case use the oldest upgrade log. - Some(self.upgrade_times.last() - .map(|n| UseCodeAt::ReplacedAt(*n)) - .unwrap_or(UseCodeAt::Current) - ) - } else { - // We don't have the code anymore. - None - } - } - } - - // The block at which the most recently tracked code change occurred. - fn most_recent_change(&self) -> Option { - self.upgrade_times.first().map(|x| x.clone()) - } - - // prunes all code upgrade logs occurring at or before `max`. - // note that code replaced at `x` is the code used to validate all blocks before - // `x`. Thus, `max` should be outside of the slashing window when this is invoked. - // - // returns an iterator of block numbers at which code was replaced, where the replaced - // code should be now pruned, in ascending order. - fn prune_up_to(&'_ mut self, max: N) -> impl Iterator + '_ { - match self.upgrade_times.iter().position(|&t| t <= max) { - None => { - // this is a no-op `drain` - desired because all - // logged code upgrades occurred after `max`. - self.upgrade_times.drain(self.upgrade_times.len()..).rev() - } - Some(pos) => { - self.last_pruned = Some(self.upgrade_times[pos]); - self.upgrade_times.drain(pos..).rev() - } - } - } -} - -decl_storage! { - trait Store for Module as Parachains - { - /// All authorities' keys at the moment. - pub Authorities get(fn authorities): Vec; - /// The active code of a currently-registered parachain. - pub Code get(fn parachain_code): map hasher(twox_64_concat) ParaId => Option; - /// Past code of parachains. The parachains themselves may not be registered anymore, - /// but we also keep their code on-chain for the same amount of time as outdated code - /// to assist with availability. - PastCodeMeta get(fn past_code_meta): map hasher(twox_64_concat) ParaId => ParaPastCodeMeta; - /// Actual past code, indicated by the parachain and the block number at which it - /// became outdated. - PastCode: map hasher(twox_64_concat) (ParaId, T::BlockNumber) => Option; - /// Past code pruning, in order of priority. - PastCodePruning get(fn past_code_pruning_tasks): Vec<(ParaId, T::BlockNumber)>; - // The block number at which the planned code change is expected for a para. - // The change will be applied after the first parablock for this ID included which executes - // in the context of a relay chain block with a number >= `expected_at`. - FutureCodeUpgrades get(fn code_upgrade_schedule): map hasher(twox_64_concat) ParaId => Option; - // The actual future code of a para. - FutureCode: map hasher(twox_64_concat) ParaId => ValidationCode; - - /// The heads of the parachains registered at present. - pub Heads get(fn parachain_head): map hasher(twox_64_concat) ParaId => Option; - /// Messages ready to be dispatched onto the relay chain. It is subject to - /// `MAX_MESSAGE_COUNT` and `WATERMARK_MESSAGE_SIZE`. - pub RelayDispatchQueue: map hasher(twox_64_concat) ParaId => Vec; - /// Size of the dispatch queues. Separated from actual data in order to avoid costly - /// decoding when checking receipt validity. First item in tuple is the count of messages - /// second if the total length (in bytes) of the message payloads. - pub RelayDispatchQueueSize: map hasher(twox_64_concat) ParaId => (u32, u32); - /// The ordered list of ParaIds that have a `RelayDispatchQueue` entry. - NeedsDispatch: Vec; - - /// `Some` if the parachain heads get updated in this block, along with the parachain IDs - /// that did update. Ordered in the same way as `registrar::Active` (i.e. by ParaId). - /// - /// `None` if not yet updated. - pub DidUpdate: Option>; - } - add_extra_genesis { - config(authorities): Vec; - build(|config| Module::::initialize_authorities(&config.authorities)) - } -} - -decl_error! { - pub enum Error for Module { - /// Parachain heads must be updated only once in the block. - TooManyHeadUpdates, - /// Too many parachain candidates. - TooManyParaCandidates, - /// Proposed heads must be ascending order by parachain ID without duplicate. - HeadsOutOfOrder, - /// Candidate is for an unregistered parachain. - UnregisteredPara, - /// Invalid collator. - InvalidCollator, - /// The message queue is full. Messages will be added when there is space. - QueueFull, - /// The message origin is invalid. - InvalidMessageOrigin, - /// No validator group for parachain. - NoValidatorGroup, - /// Not enough validity votes for candidate. - NotEnoughValidityVotes, - /// The number of attestations exceeds the number of authorities. - VotesExceedsAuthorities, - /// Attesting validator not on this chain's validation duty. - WrongValidatorAttesting, - /// Invalid signature from attester. - InvalidSignature, - /// Extra untagged validity votes along with candidate. - UntaggedVotes, - /// Wrong parent head for parachain receipt. - ParentMismatch, - /// Head data was too large. - HeadDataTooLarge, - /// New validation code was too large. - ValidationCodeTooLarge, - /// Disallowed code upgrade. - DisallowedCodeUpgrade, - /// Para does not have enough balance to pay fees. - CannotPayFees, - /// Unexpected relay-parent for a candidate receipt. - UnexpectedRelayParent, - } -} - -decl_module! { - /// Parachains module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - fn on_initialize(now: T::BlockNumber) -> Weight { - ::DidUpdate::kill(); - - Self::do_old_code_pruning(now); - - // TODO https://github.com/paritytech/polkadot/issues/977: set correctly - 0 - } - - fn on_finalize() { - assert!(::DidUpdate::exists(), "Parachain heads must be updated once in the block"); - } - - /// Provide candidate receipts for parachains, in ascending order by id. - #[weight = (1_000_000_000, DispatchClass::Mandatory)] - pub fn set_heads(origin, heads: Vec) -> DispatchResult { - ensure_none(origin)?; - ensure!(!::exists(), Error::::TooManyHeadUpdates); - - let active_parachains = Self::active_parachains(); - - let parachain_count = active_parachains.len(); - ensure!(heads.len() <= parachain_count, Error::::TooManyParaCandidates); - - let mut proceeded = Vec::with_capacity(heads.len()); - - let schedule = Self::global_validation_schedule(); - - if !active_parachains.is_empty() { - // perform integrity checks before writing to storage. - { - let mut last_id = None; - - let mut iter = active_parachains.iter(); - for head in &heads { - let id = head.parachain_index(); - // proposed heads must be ascending order by parachain ID without duplicate. - ensure!( - last_id.as_ref().map_or(true, |x| x < &id), - Error::::HeadsOutOfOrder - ); - - // must be unknown since active parachains are always sorted. - let (_, maybe_required_collator) = iter.find(|para| para.0 == id) - .ok_or(Error::::UnregisteredPara)?; - - if let Some((required_collator, _)) = maybe_required_collator { - ensure!(required_collator == &head.candidate.collator, Error::::InvalidCollator); - } - - Self::check_upward_messages( - id, - &head.candidate.commitments.upward_messages, - MAX_QUEUE_COUNT, - WATERMARK_QUEUE_SIZE, - )?; - - let id = head.parachain_index(); - proceeded.push(id); - last_id = Some(id); - } - } - - let para_blocks = Self::check_candidates( - &schedule, - &heads, - &active_parachains, - )?; - - >::note_included(&heads, para_blocks); - - Self::update_routing( - &heads, - ); - - // note: we dispatch new messages _after_ the call to `check_candidates` - // which deducts any fees. if that were not the case, an upward message - // could be dispatched and spend money that invalidated a candidate. - Self::dispatch_upward_messages( - MAX_QUEUE_COUNT, - WATERMARK_QUEUE_SIZE, - Self::dispatch_message, - ); - } - - DidUpdate::put(proceeded); - - Ok(()) - } - - /// Provide a proof that some validator has commited a double-vote. - /// - /// The weight is 0; in order to avoid DoS a `SignedExtension` validation - /// is implemented. - #[weight = 0] - pub fn report_double_vote( - origin, - report: DoubleVoteReport< - >::Proof, - >, - ) -> DispatchResult { - let reporter = ensure_signed(origin)?; - - let validators = >::validators(); - let validator_set_count = validators.len() as u32; - - let session_index = report.proof.session(); - let DoubleVoteReport { identity, proof, .. } = report; - - // We have already checked this proof in `SignedExtension`, but we need - // this here to get the full identification of the offender. - let offender = T::KeyOwnerProofSystem::check_proof( - (PARACHAIN_KEY_TYPE_ID, identity), - proof, - ).ok_or("Invalid/outdated key ownership proof.")?; - - let offence = DoubleVoteOffence { - session_index, - validator_set_count, - offender, - }; - - // Checks if this is actually a double vote are - // implemented in `ValidateDoubleVoteReports::validete`. - T::ReportOffence::report_offence(vec![reporter], offence) - .map_err(|_| "Failed to report offence")?; - - Ok(()) - } - } -} - -fn majority_of(list_len: usize) -> usize { - list_len / 2 + list_len % 2 -} - -fn localized_payload( - statement: Statement, - signing_context: &SigningContext, -) -> Vec { - let mut encoded = statement.encode(); - signing_context.using_encoded(|s| encoded.extend(s)); - encoded -} - -impl Module { - /// Initialize the state of a new parachain/parathread. - pub fn initialize_para( - id: ParaId, - code: ValidationCode, - initial_head_data: HeadData, - ) { - ::insert(id, code); - ::insert(id, initial_head_data); - } - - /// Cleanup all storage related to a para. Some pieces of data may remain - /// available in the on-chain state. - pub fn cleanup_para( - id: ParaId, - ) { - let code = ::take(id); - ::remove(id); - - // clean up from all code-upgrade maps. - // we don't clean up the meta or planned-code maps as that's handled - // by the pruning process. - if let Some(_planned_future_at) = ::FutureCodeUpgrades::take(&id) { - ::FutureCode::remove(&id); - } - - if let Some(code) = code { - Self::note_past_code(id, >::block_number(), code); - } - } - - // note replacement of the code of para with given `id`, which occured in the - // context of the given relay-chain block number. provide the replaced code. - // - // `at` for para-triggered replacement is the block number of the relay-chain - // block in whose context the parablock was executed - // (i.e. number of `relay_parent` in the receipt) - fn note_past_code(id: ParaId, at: T::BlockNumber, old_code: ValidationCode) { - ::PastCodeMeta::mutate(&id, |past_meta| { - past_meta.note_replacement(at); - }); - - ::PastCode::insert(&(id, at), old_code); - - // Schedule pruning for this past-code to be removed as soon as it - // exits the slashing window. - ::PastCodePruning::mutate(|pruning| { - let insert_idx = pruning.binary_search_by_key(&at, |&(_, b)| b) - .unwrap_or_else(|idx| idx); - pruning.insert(insert_idx, (id, at)); - }) - } - - // does old code pruning. - fn do_old_code_pruning(now: T::BlockNumber) { - let slash_period = T::SlashPeriod::get(); - if now <= slash_period { return } - - // The height of any changes we no longer should keep around. - let pruning_height = now - (slash_period + One::one()); - - ::PastCodePruning::mutate(|pruning_tasks: &mut Vec<(_, T::BlockNumber)>| { - let pruning_tasks_to_do = { - // find all past code that has just exited the pruning window. - let up_to_idx = pruning_tasks.iter() - .take_while(|&(_, at)| at <= &pruning_height) - .count(); - pruning_tasks.drain(..up_to_idx) - }; - - for (para_id, _) in pruning_tasks_to_do { - let full_deactivate = ::PastCodeMeta::mutate(¶_id, |meta| { - for pruned_repl_at in meta.prune_up_to(pruning_height) { - ::PastCode::remove(&(para_id, pruned_repl_at)); - } - - meta.most_recent_change().is_none() && Self::parachain_head(¶_id).is_none() - }); - - // This parachain has been removed and now the vestigial code - // has been removed from the state. clean up meta as well. - if full_deactivate { - ::PastCodeMeta::remove(¶_id); - } - } - }); - } - - // Performs a code upgrade of a parachain. - fn do_code_upgrade(id: ParaId, at: T::BlockNumber, new_code: &ValidationCode) { - let old_code = Self::parachain_code(&id).unwrap_or_default(); - Code::insert(&id, new_code); - - Self::note_past_code(id, at, old_code); - } - - /// Get a `SigningContext` with a current `SessionIndex` and parent hash. - pub fn signing_context() -> SigningContext { - let session_index = >::current_index(); - let parent_hash = >::parent_hash(); - - SigningContext { - session_index, - parent_hash: T::BlockHashConversion::convert(parent_hash), - } - } - - /// Submit a double vote report. - pub fn submit_double_vote_report( - report: DoubleVoteReport, - ) -> Option<()> { - Signer::::all_accounts() - .send_signed_transaction( - move |_account| { - Call::report_double_vote(report.clone()) - } - ) - .iter() - .find_map(|(_, res)| res.ok().map(|_| ())) - } - - /// Dispatch some messages from a parachain. - fn dispatch_message( - id: ParaId, - origin: ParachainDispatchOrigin, - data: &[u8], - ) { - if let Ok(message_call) = ::Call::decode(&mut &data[..]) { - let origin: ::Origin = match origin { - ParachainDispatchOrigin::Signed => - system::RawOrigin::Signed(id.into_account()).into(), - ParachainDispatchOrigin::Parachain => - Origin::Parachain(id).into(), - ParachainDispatchOrigin::Root => - system::RawOrigin::Root.into(), - }; - let _ok = message_call.dispatch(origin).is_ok(); - // Not much to do with the result as it is. It's up to the parachain to ensure that the - // message makes sense. - } - } - - /// Ensure all is well with the upward messages. - fn check_upward_messages( - id: ParaId, - upward_messages: &[UpwardMessage], - max_queue_count: usize, - watermark_queue_size: usize, - ) -> DispatchResult { - // Either there are no more messages to add... - if !upward_messages.is_empty() { - let (count, size) = ::get(id); - ensure!( - // ...or we are appending one message onto an empty queue... - upward_messages.len() + count as usize == 1 - // ...or... - || ( - // ...the total messages in the queue ends up being no greater than the - // limit... - upward_messages.len() + count as usize <= max_queue_count - && - // ...and the total size of the payloads in the queue ends up being no - // greater than the limit. - upward_messages.iter() - .fold(size as usize, |a, x| a + x.data.len()) - <= watermark_queue_size - ), - Error::::QueueFull - ); - if !id.is_system() { - for m in upward_messages.iter() { - ensure!(m.origin != ParachainDispatchOrigin::Root, Error::::InvalidMessageOrigin); - } - } - } - Ok(()) - } - - /// Update routing information from the parachain heads. This queues upwards - /// messages to the relay chain as well. - fn update_routing( - heads: &[AttestedCandidate], - ) { - // we sort them in order to provide a fast lookup to ensure we can avoid duplicates in the - // needs_dispatch queue. - let mut ordered_needs_dispatch = NeedsDispatch::get(); - - for head in heads.iter() { - let id = head.parachain_index(); - Heads::insert(id, &head.candidate.head_data); - - // Queue up upwards messages (from parachains to relay chain). - Self::queue_upward_messages( - id, - &head.candidate.commitments.upward_messages, - &mut ordered_needs_dispatch, - ); - } - - NeedsDispatch::put(ordered_needs_dispatch); - } - - /// Place any new upward messages into our queue for later dispatch. - /// - /// `ordered_needs_dispatch` is mutated to ensure it reflects the new value of - /// `RelayDispatchQueueSize`. It is up to the caller to guarantee that it gets written into - /// storage after this call. - fn queue_upward_messages( - id: ParaId, - upward_messages: &[UpwardMessage], - ordered_needs_dispatch: &mut Vec, - ) { - if !upward_messages.is_empty() { - RelayDispatchQueueSize::mutate(id, |&mut(ref mut count, ref mut len)| { - *count += upward_messages.len() as u32; - *len += upward_messages.iter() - .fold(0, |a, x| a + x.data.len()) as u32; - }); - - upward_messages.iter().for_each(|m| RelayDispatchQueue::append(id, m)); - - if let Err(i) = ordered_needs_dispatch.binary_search(&id) { - // same. - ordered_needs_dispatch.insert(i, id); - } else { - sp_runtime::print("ordered_needs_dispatch contains id?!"); - } - } - } - - /// Simple FIFO dispatcher. This must be called after parachain fees are checked, - /// as dispatched messages may spend parachain funds. - fn dispatch_upward_messages( - max_queue_count: usize, - watermark_queue_size: usize, - mut dispatch_message: impl FnMut(ParaId, ParachainDispatchOrigin, &[u8]), - ) { - let queueds = NeedsDispatch::get(); - let mut drained_count = 0usize; - let mut dispatched_count = 0usize; - let mut dispatched_size = 0usize; - for id in queueds.iter() { - drained_count += 1; - - let (count, size) = ::get(id); - let count = count as usize; - let size = size as usize; - if dispatched_count == 0 || ( - dispatched_count + count <= max_queue_count - && dispatched_size + size <= watermark_queue_size - ) { - if count > 0 { - // still dispatching messages... - RelayDispatchQueueSize::remove(id); - let messages = RelayDispatchQueue::take(id); - for UpwardMessage { origin, data } in messages.into_iter() { - dispatch_message(*id, origin, &data); - } - dispatched_count += count; - dispatched_size += size; - if dispatched_count >= max_queue_count - || dispatched_size >= watermark_queue_size - { - break - } - } - } - } - NeedsDispatch::put(&queueds[drained_count..]); - } - - /// Calculate the current block's duty roster using system's random seed. - /// Returns the duty roster along with the random seed. - pub fn calculate_duty_roster() -> (DutyRoster, [u8; 32]) { - let parachains = Self::active_parachains(); - let parachain_count = parachains.len(); - - // TODO: use decode length. substrate #2794 - let validator_count = Self::authorities().len(); - let validators_per_parachain = - if parachain_count == 0 { - 0 - } else { - (validator_count - 1) / parachain_count - }; - - let mut roles_val = (0..validator_count).map(|i| match i { - i if i < parachain_count * validators_per_parachain => { - let idx = i / validators_per_parachain; - Chain::Parachain(parachains[idx].0.clone()) - } - _ => Chain::Relay, - }).collect::>(); - - let mut seed = { - let phrase = b"validator_role_pairs"; - let seed = T::Randomness::random(&phrase[..]); - let seed_len = seed.as_ref().len(); - let needed_bytes = validator_count * 4; - - // hash only the needed bits of the random seed. - // if earlier bits are influencable, they will not factor into - // the seed used here. - let seed_off = if needed_bytes >= seed_len { - 0 - } else { - seed_len - needed_bytes - }; - - BlakeTwo256::hash(&seed.as_ref()[seed_off..]) - }; - - let orig_seed = seed.clone().to_fixed_bytes(); - - // shuffle - for i in 0..(validator_count.saturating_sub(1)) { - // 4 bytes of entropy used per cycle, 32 bytes entropy per hash - let offset = (i * 4 % 32) as usize; - - // number of roles remaining to select from. - let remaining = sp_std::cmp::max(1, (validator_count - i) as usize); - - // 8 32-bit ints per 256-bit seed. - let val_index = u32::decode(&mut &seed[offset..offset + 4]) - .expect("using 4 bytes for a 32-bit quantity") as usize % remaining; - - if offset == 28 { - // into the last 4 bytes - rehash to gather new entropy - seed = BlakeTwo256::hash(seed.as_ref()); - } - - // exchange last item with randomly chosen first. - roles_val.swap(remaining - 1, val_index); - } - - (DutyRoster { validator_duty: roles_val, }, orig_seed) - } - - /// Get the global validation schedule for all parachains. - pub fn global_validation_schedule() -> GlobalValidationSchedule { - let now = >::block_number(); - GlobalValidationSchedule { - max_code_size: T::MaxCodeSize::get(), - max_head_data_size: T::MaxHeadDataSize::get(), - block_number: T::BlockNumberConversion::convert(if now.is_zero() { - now - } else { - // parablocks included in this block will execute in the context - // of the current block's parent. - now - One::one() - }), - } - } - - /// Get the local validation schedule for a particular parachain. - pub fn local_validation_data(id: &ParaId, perceived_height: T::BlockNumber) -> Option { - if perceived_height + One::one() != >::block_number() { - // sanity-check - no non-direct-parent blocks allowed at the moment. - return None - } - - let code_upgrade_allowed: Option = (|| { - match T::Registrar::para_info(*id)?.scheduling { - Scheduling::Always => {}, - Scheduling::Dynamic => return None, // parathreads can't upgrade code. - } - - // if perceived-height were not the parent of `now`, then this should - // not be drawn from current-runtime configuration. however the sanity-check - // above prevents that. - let min_upgrade_frequency = T::ValidationUpgradeFrequency::get(); - let upgrade_delay = T::ValidationUpgradeDelay::get(); - - let no_planned = Self::code_upgrade_schedule(id) - .map_or(true, |expected: T::BlockNumber| expected <= perceived_height); - - let can_upgrade_code = no_planned && - Self::past_code_meta(id).most_recent_change() - .map_or(true, |at| at + min_upgrade_frequency < perceived_height); - - if can_upgrade_code { - let applied_at = perceived_height + upgrade_delay; - Some(T::BlockNumberConversion::convert(applied_at)) - } else { - None - } - })(); - - Self::parachain_head(id).map(|parent_head| LocalValidationData { - parent_head, - balance: T::ParachainCurrency::free_balance(*id), - code_upgrade_allowed, - }) - } - - /// Get the local validation data for a particular parent w.r.t. the current - /// block height. - pub fn current_local_validation_data(id: &ParaId) -> Option { - let now: T::BlockNumber = >::block_number(); - if now >= One::one() { - Self::local_validation_data(id, now - One::one()) - } else { - None - } - } - - /// Fetch the code used for verifying a parachain at a particular height. - pub fn parachain_code_at(id: &ParaId, at: T::BlockNumber) -> Option { - // note - we don't check that the parachain is currently registered - // as this might be a deregistered parachain whose old code should still - // stick around on-chain for some time. - Self::past_code_meta(id).code_at(at).and_then(|to_use| match to_use { - UseCodeAt::Current => Self::parachain_code(id), - UseCodeAt::ReplacedAt(replaced_at) => - ::PastCode::get(&(*id, replaced_at)), - }) - } - - /// Get the currently active set of parachains. - pub fn active_parachains() -> Vec<(ParaId, Option<(CollatorId, Retriable)>)> { - T::ActiveParachains::active_paras() - } - - // check the attestations on these candidates. The candidates should have been checked - // that each candidates' chain ID is valid. - fn check_candidates( - schedule: &GlobalValidationSchedule, - attested_candidates: &[AttestedCandidate], - active_parachains: &[(ParaId, Option<(CollatorId, Retriable)>)] - ) -> sp_std::result::Result, sp_runtime::DispatchError> - { - // returns groups of slices that have the same chain ID. - // assumes the inner slice is sorted by id. - struct GroupedDutyIter<'a> { - next_idx: usize, - inner: &'a [(usize, ParaId)], - } - - impl<'a> GroupedDutyIter<'a> { - fn new(inner: &'a [(usize, ParaId)]) -> Self { - GroupedDutyIter { next_idx: 0, inner } - } - - fn group_for(&mut self, wanted_id: ParaId) -> Option<&'a [(usize, ParaId)]> { - while let Some((id, keys)) = self.next() { - if wanted_id == id { - return Some(keys) - } - } - - None - } - } - - impl<'a> Iterator for GroupedDutyIter<'a> { - type Item = (ParaId, &'a [(usize, ParaId)]); - - fn next(&mut self) -> Option { - if self.next_idx == self.inner.len() { return None } - let start_idx = self.next_idx; - self.next_idx += 1; - let start_id = self.inner[start_idx].1; - - while self.inner.get(self.next_idx).map_or(false, |&(_, ref id)| id == &start_id) { - self.next_idx += 1; - } - - Some((start_id, &self.inner[start_idx..self.next_idx])) - } - } - - let authorities = Self::authorities(); - let (duty_roster, random_seed) = Self::calculate_duty_roster(); - - // convert a duty roster, which is originally a Vec, where each - // item corresponds to the same position in the session keys, into - // a list containing (index, parachain duty) where indices are into the session keys. - // this list is sorted ascending by parachain duty, just like the - // parachain candidates are. - let make_sorted_duties = |duty: &[Chain]| { - let mut sorted_duties = Vec::with_capacity(duty.len()); - for (val_idx, duty) in duty.iter().enumerate() { - let id = match duty { - Chain::Relay => continue, - Chain::Parachain(id) => id, - }; - - let idx = sorted_duties.binary_search_by_key(&id, |&(_, ref id)| id) - .unwrap_or_else(|idx| idx); - - sorted_duties.insert(idx, (val_idx, *id)); - } - - sorted_duties - }; - - // computes the omitted validation data for a particular parachain. - // - // pass the perceived relay chain height of the para-block. This is the block number of - // `abridged.relay_parent`. - let full_candidate = | - abridged: &AbridgedCandidateReceipt, - perceived_height: T::BlockNumber, - | - -> sp_std::result::Result - { - let para_id = abridged.parachain_index; - let local_validation = Self::local_validation_data(¶_id, perceived_height) - .ok_or(Error::::ParentMismatch)?; - - let omitted = OmittedValidationData { - global_validation: schedule.clone(), - local_validation, - }; - - Ok(abridged.clone().complete(omitted)) - }; - - let sorted_validators = make_sorted_duties(&duty_roster.validator_duty); - - let relay_height_now = >::block_number(); - let parent_hash = >::parent_hash(); - let signing_context = Self::signing_context(); - let localized_payload = |statement: Statement| localized_payload(statement, &signing_context); - let code_upgrade_delay = T::ValidationUpgradeDelay::get(); - - let mut validator_groups = GroupedDutyIter::new(&sorted_validators[..]); - - let mut para_block_hashes = Vec::new(); - - for candidate in attested_candidates { - let para_id = candidate.parachain_index(); - let validator_group = validator_groups.group_for(para_id) - .ok_or(Error::::NoValidatorGroup)?; - - // NOTE: when changing this to allow older blocks, - // care must be taken in the availability store pruning to ensure that - // data is stored correctly. A block containing a candidate C can be - // orphaned before a block containing C is finalized. Care must be taken - // not to prune the data for C simply because an orphaned block contained - // it. - - ensure!( - candidate.candidate().relay_parent.as_ref() == parent_hash.as_ref(), - Error::::UnexpectedRelayParent, - ); - - // Since we only allow execution in context of parent hash. - let perceived_relay_block_height = >::block_number() - One::one(); - - ensure!( - candidate.validity_votes.len() >= majority_of(validator_group.len()), - Error::::NotEnoughValidityVotes, - ); - - ensure!( - candidate.validity_votes.len() <= authorities.len(), - Error::::VotesExceedsAuthorities, - ); - - ensure!( - schedule.max_head_data_size as usize >= candidate.candidate().head_data.0.len(), - Error::::HeadDataTooLarge, - ); - - let full_candidate = full_candidate( - candidate.candidate(), - perceived_relay_block_height, - )?; - - // apply any scheduled code upgrade. - if let Some(expected_at) = Self::code_upgrade_schedule(¶_id) { - if expected_at <= perceived_relay_block_height { - let new_code = FutureCode::take(¶_id); - ::FutureCodeUpgrades::remove(¶_id); - - Self::do_code_upgrade(para_id, perceived_relay_block_height, &new_code); - } - } - - if let Some(ref new_code) = full_candidate.commitments.new_validation_code { - ensure!( - full_candidate.local_validation.code_upgrade_allowed.is_some(), - Error::::DisallowedCodeUpgrade, - ); - ensure!( - schedule.max_code_size >= new_code.0.len() as u32, - Error::::ValidationCodeTooLarge, - ); - - if code_upgrade_delay.is_zero() { - Self::do_code_upgrade(para_id, perceived_relay_block_height, new_code); - } else { - ::FutureCodeUpgrades::insert( - ¶_id, - &(perceived_relay_block_height + code_upgrade_delay), - ); - FutureCode::insert( - ¶_id, - new_code, - ); - } - } - - let fees = full_candidate.commitments.fees; - - ensure!( - full_candidate.local_validation.balance >= full_candidate.commitments.fees, - Error::::CannotPayFees, - ); - - T::ParachainCurrency::deduct(para_id, fees)?; - - let candidate_hash = candidate.candidate().hash(); - let mut encoded_implicit = None; - let mut encoded_explicit = None; - - let mut expected_votes_len = 0; - for (vote_index, (auth_index, _)) in candidate.validator_indices - .iter() - .enumerate() - .filter(|(_, bit)| **bit) - .enumerate() - { - let validity_attestation = match candidate.validity_votes.get(vote_index) { - None => Err(Error::::NotEnoughValidityVotes)?, - Some(v) => { - expected_votes_len = vote_index + 1; - v - } - }; - - if validator_group.iter().find(|&(idx, _)| *idx == auth_index).is_none() { - Err(Error::::WrongValidatorAttesting)? - } - - let (payload, sig) = match validity_attestation { - ValidityAttestation::Implicit(sig) => { - let payload = encoded_implicit.get_or_insert_with(|| localized_payload( - Statement::Candidate(candidate_hash), - )); - - (payload, sig) - } - ValidityAttestation::Explicit(sig) => { - let payload = encoded_explicit.get_or_insert_with(|| localized_payload( - Statement::Valid(candidate_hash), - )); - - (payload, sig) - } - }; - - ensure!( - sig.verify(&payload[..], &authorities[auth_index]), - Error::::InvalidSignature, - ); - } - - ensure!( - candidate.validity_votes.len() == expected_votes_len, - Error::::UntaggedVotes - ); - - para_block_hashes.push(candidate_hash); - } - - Ok(IncludedBlocks { - actual_number: relay_height_now, - session: >::current_index(), - random_seed, - active_parachains: active_parachains.iter().map(|x| x.0).collect(), - para_blocks: para_block_hashes, - }) - } - - fn initialize_authorities(authorities: &[ValidatorId]) { - if !authorities.is_empty() { - assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); - Authorities::put(authorities); - } - } - -/* - // TODO: Consider integrating if needed. (https://github.com/paritytech/polkadot/issues/223) - /// Extract the parachain heads from the block. - pub fn parachain_heads(&self) -> &[CandidateReceipt] { - let x = self.inner.extrinsics.get(PARACHAINS_SET_POSITION as usize).and_then(|xt| match xt.function { - Call::Parachains(ParachainsCall::set_heads(ref x)) => Some(&x[..]), - _ => None - }); - - match x { - Some(x) => x, - None => panic!("Invalid polkadot block asserted at {:?}", self.file_line), - } - } -*/ -} - -impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = ValidatorId; -} - -impl session::OneSessionHandler for Module { - type Key = ValidatorId; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator - { - Self::initialize_authorities(&validators.map(|(_, key)| key).collect::>()); - } - - fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued: I) - where I: Iterator - { - if changed { - ::Authorities::put(validators.map(|(_, key)| key).collect::>()); - } - } - - fn on_disabled(_i: usize) { } -} - -pub type InherentType = Vec; - -impl ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = NEW_HEADS_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let data = data.get_data::(&NEW_HEADS_IDENTIFIER) - .expect("Parachain heads could not be decoded.") - .expect("No parachain heads found in inherent data."); - - Some(Call::set_heads(data)) - } -} - -/// Ensure that the origin `o` represents a parachain. -/// Returns `Ok` with the parachain ID that effected the extrinsic or an `Err` otherwise. -pub fn ensure_parachain(o: OuterOrigin) -> result::Result - where OuterOrigin: Into> -{ - match o.into() { - Ok(Origin::Parachain(id)) => Ok(id), - _ => Err(BadOrigin), - } -} - - -/// Ensure that double vote reports are only processed if valid. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct ValidateDoubleVoteReports(sp_std::marker::PhantomData); - -impl sp_std::fmt::Debug for ValidateDoubleVoteReports where -{ - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "ValidateDoubleVoteReports") - } -} - -impl ValidateDoubleVoteReports { - /// Create a new `ValidateDoubleVoteReports` struct. - pub fn new() -> Self { - ValidateDoubleVoteReports(sp_std::marker::PhantomData) - } -} - -/// Custom validity error used while validating double vote reports. -#[derive(RuntimeDebug)] -#[repr(u8)] -pub enum DoubleVoteValidityError { - /// The authority being reported is not in the authority set. - NotAnAuthority = 0, - - /// Failed to convert offender's `FullIdentificationOf`. - FailedToConvertId = 1, - - /// The signature on one or both of the statements in the report is wrong. - InvalidSignature = 2, - - /// The two statements in the report are not conflicting. - NotDoubleVote = 3, - - /// Invalid report. Indicates that statement doesn't match the attestation on one of the votes. - InvalidReport = 4, - - /// The proof provided in the report is not valid. - InvalidProof = 5, -} - -impl SignedExtension for ValidateDoubleVoteReports where - ::Call: IsSubType, T> -{ - const IDENTIFIER: &'static str = "ValidateDoubleVoteReports"; - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) - -> sp_std::result::Result - { - Ok(()) - } - - fn validate( - &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let r = ValidTransaction::default(); - - if let Some(local_call) = call.is_sub_type() { - if let Call::report_double_vote(report) = local_call { - let validators = >::validators(); - - let expected_session = report.signing_context.session_index; - let session = report.proof.session(); - - if session != expected_session { - return Err(InvalidTransaction::BadProof.into()); - } - - let authorities = Module::::authorities(); - let offender_idx = match authorities.iter().position(|a| *a == report.identity) { - Some(idx) => idx, - None => return Err(InvalidTransaction::Custom( - DoubleVoteValidityError::NotAnAuthority as u8).into() - ), - }; - - if T::FullIdentificationOf::convert(validators[offender_idx].clone()).is_none() { - return Err(InvalidTransaction::Custom( - DoubleVoteValidityError::FailedToConvertId as u8).into() - ); - } - - report - .verify::() - .map_err(|e| TransactionValidityError::from(InvalidTransaction::Custom(e as u8)))?; - } - } - - Ok(r) - } -} - - -#[cfg(test)] -mod tests { - use super::*; - use super::Call as ParachainsCall; - use bitvec::{bitvec, vec::BitVec}; - use sp_io::TestExternalities; - use sp_core::{H256, Blake2Hasher, sr25519}; - use sp_trie::NodeCodec; - use sp_runtime::{ - impl_opaque_keys, - Perbill, curve::PiecewiseLinear, - traits::{ - BlakeTwo256, IdentityLookup, SaturatedConversion, - OpaqueKeys, Extrinsic as ExtrinsicT, - }, - testing::TestXt, - }; - use primitives::{ - parachain::{ - CandidateReceipt, ValidityAttestation, ValidatorId, Info as ParaInfo, - Scheduling, CandidateCommitments, - }, - BlockNumber, - Header, - }; - use keyring::Sr25519Keyring; - use frame_support::{ - impl_outer_origin, impl_outer_dispatch, assert_ok, assert_err, parameter_types, - traits::{OnInitialize, OnFinalize}, - weights::DispatchInfo, - }; - use crate::parachains; - use crate::registrar; - use crate::slots; - use session::{SessionHandler, SessionManager}; - use staking::EraIndex; - - // result of as trie_db::NodeCodec>::hashed_null_node() - const EMPTY_TRIE_ROOT: [u8; 32] = [ - 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, 76, 5, 57, 29, 19, 154, - 98, 177, 87, 231, 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, 19, 20 - ]; - - impl_outer_origin! { - pub enum Origin for Test { - parachains - } - } - - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - parachains::Parachains, - staking::Staking, - } - } - - impl_opaque_keys! { - pub struct TestSessionKeys { - pub parachain_validator: super::Module, - } - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - } - - impl system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Call = Call; - type Index = u64; - type BlockNumber = BlockNumber; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - - impl system::offchain::SendTransactionTypes for Test where - Call: From, - { - type OverarchingCall = Call; - type Extrinsic = TestXt; - } - - parameter_types! { - pub const Period: BlockNumber = 1; - pub const Offset: BlockNumber = 0; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); - } - - /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. - pub struct TestSessionHandler; - impl SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[PARACHAIN_KEY_TYPE_ID]; - - fn on_genesis_session(_: &[(AId, Ks)]) {} - - fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} - - fn on_before_session_ending() {} - - fn on_disabled(_: usize) {} - } - - impl session::Trait for Test { - type Event = (); - type ValidatorId = u64; - type ValidatorIdOf = staking::StashOf; - type ShouldEndSession = session::PeriodicSessions; - type NextSessionRotation = session::PeriodicSessions; - type SessionManager = session::historical::NoteHistoricalRoot; - type SessionHandler = TestSessionHandler; - type Keys = TestSessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - } - - impl session::historical::Trait for Test { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; - } - - parameter_types! { - pub const MinimumPeriod: u64 = 3; - } - impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - } - - mod time { - use primitives::{Moment, BlockNumber}; - pub const MILLISECS_PER_BLOCK: Moment = 6000; - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 1 * HOURS; - // These time units are defined in number of blocks. - const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - const HOURS: BlockNumber = MINUTES * 60; - } - parameter_types! { - pub const EpochDuration: BlockNumber = time::EPOCH_DURATION_IN_BLOCKS; - pub const ExpectedBlockTime: u64 = time::MILLISECS_PER_BLOCK; - } - - impl babe::Trait for Test { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; - - // session module is the trigger - type EpochChangeTrigger = babe::ExternalTrigger; - } - - parameter_types! { - pub const ExistentialDeposit: Balance = 1; - } - - impl balances::Trait for Test { - type Balance = u128; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - - pallet_staking_reward_curve::build! { - const REWARD_CURVE: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000u64, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); - } - - parameter_types! { - pub const SessionsPerEra: sp_staking::SessionIndex = 3; - pub const BondingDuration: staking::EraIndex = 3; - pub const SlashDeferDuration: staking::EraIndex = 0; - pub const AttestationPeriod: BlockNumber = 100; - pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const ElectionLookahead: BlockNumber = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; - } - - pub struct CurrencyToVoteHandler; - - impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u128 { x } - } - - impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { x.saturated_into() } - } - - impl staking::Trait for Test { - type RewardRemainder = (); - type CurrencyToVote = CurrencyToVoteHandler; - type Event = (); - type Currency = Balances; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = system::EnsureRoot; - type SessionInterface = Self; - type UnixTime = timestamp::Module; - type RewardCurve = RewardCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - } - - impl attestations::Trait for Test { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = ValidatorIdentities; - type RewardAttestation = (); - } - - parameter_types!{ - pub const LeasePeriod: BlockNumber = 10; - pub const EndingPeriod: BlockNumber = 3; - } - - impl slots::Trait for Test { - type Event = (); - type Currency = Balances; - type Parachains = registrar::Module; - type EndingPeriod = EndingPeriod; - type LeasePeriod = LeasePeriod; - type Randomness = RandomnessCollectiveFlip; - } - - parameter_types! { - pub const ParathreadDeposit: Balance = 10; - pub const QueueSize: usize = 2; - pub const MaxRetries: u32 = 3; - } - - impl registrar::Trait for Test { - type Event = (); - type Origin = Origin; - type Currency = Balances; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = slots::Module; - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; - } - - parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); - } - - impl offences::Trait for Test { - type Event = (); - type IdentificationTuple = session::historical::IdentificationTuple; - type OnOffenceHandler = Staking; - type WeightSoftLimit = OffencesWeightSoftLimit; - } - - parameter_types! { - pub const MaxHeadDataSize: u32 = 100; - pub const MaxCodeSize: u32 = 100; - - pub const ValidationUpgradeFrequency: BlockNumber = 10; - pub const ValidationUpgradeDelay: BlockNumber = 2; - pub const SlashPeriod: BlockNumber = 50; - } - - // This is needed for a custom `AccountId` type which is `u64` in testing here. - pub mod test_keys { - use sp_core::{crypto::KeyTypeId, sr25519}; - pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); - - mod app { - use sp_application_crypto::{app_crypto, sr25519}; - use super::super::Parachains; - - app_crypto!(sr25519, super::KEY_TYPE); - - impl sp_runtime::traits::IdentifyAccount for Public { - type AccountId = u64; - - fn into_account(self) -> Self::AccountId { - Parachains::authorities().iter().position(|b| *b == self.0.clone().into()).unwrap() as u64 - } - } - } - - pub type ReporterId = app::Public; - pub struct ReporterAuthorityId; - impl system::offchain::AppCrypto for ReporterAuthorityId { - type RuntimeAppPublic = ReporterId; - type GenericSignature = sr25519::Signature; - type GenericPublic = sr25519::Public; - } - } - - impl Trait for Test { - type AuthorityId = test_keys::ReporterAuthorityId; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = Balances; - type BlockNumberConversion = sp_runtime::traits::Identity; - type Randomness = RandomnessCollectiveFlip; - type ActiveParachains = registrar::Module; - type Registrar = registrar::Module; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - type Proof = - >::Proof; - type IdentificationTuple = - >::IdentificationTuple; - type ReportOffence = Offences; - type BlockHashConversion = sp_runtime::traits::Identity; - type KeyOwnerProofSystem = Historical; - } - - type Extrinsic = TestXt; - - impl system::offchain::CreateSignedTransaction for Test where - Call: From, - { - fn create_transaction>( - call: Call, - _public: test_keys::ReporterId, - _account: ::AccountId, - nonce: ::Index, - ) -> Option<(Call, ::SignaturePayload)> { - Some((call, (nonce, ()))) - } - } - - impl system::offchain::SigningTypes for Test { - type Public = test_keys::ReporterId; - type Signature = sr25519::Signature; - } - - type Parachains = Module; - type Balances = balances::Module; - type System = system::Module; - type Offences = offences::Module; - type Staking = staking::Module; - type Session = session::Module; - type Timestamp = timestamp::Module; - type RandomnessCollectiveFlip = randomness_collective_flip::Module; - type Registrar = registrar::Module; - type Historical = session::historical::Module; - - fn new_test_ext(parachains: Vec<(ParaId, ValidationCode, HeadData)>) -> TestExternalities { - use staking::StakerStatus; - use babe::AuthorityId as BabeAuthorityId; - - let mut t = system::GenesisConfig::default().build_storage::().unwrap(); - - let authority_keys = [ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Eve, - Sr25519Keyring::Ferdie, - Sr25519Keyring::One, - Sr25519Keyring::Two, - ]; - - // stashes are the index. - let session_keys: Vec<_> = authority_keys.iter().enumerate() - .map(|(i, k)| (i as u64, i as u64, TestSessionKeys { - parachain_validator: ValidatorId::from(k.public()), - })) - .collect(); - - let authorities: Vec<_> = authority_keys.iter().map(|k| ValidatorId::from(k.public())).collect(); - let babe_authorities: Vec<_> = authority_keys.iter() - .map(|k| BabeAuthorityId::from(k.public())) - .map(|k| (k, 1)) - .collect(); - - // controllers are the index + 1000 - let stakers: Vec<_> = (0..authority_keys.len()).map(|i| ( - i as u64, - i as u64 + 1000, - 10_000, - StakerStatus::::Validator, - )).collect(); - - let balances: Vec<_> = (0..authority_keys.len()).map(|i| (i as u64, 10_000_000)).collect(); - - GenesisConfig { - authorities: authorities.clone(), - }.assimilate_storage::(&mut t).unwrap(); - - registrar::GenesisConfig:: { - parachains, - _phdata: Default::default(), - }.assimilate_storage(&mut t).unwrap(); - - session::GenesisConfig:: { - keys: session_keys, - }.assimilate_storage(&mut t).unwrap(); - - babe::GenesisConfig { - authorities: babe_authorities, - }.assimilate_storage::(&mut t).unwrap(); - - balances::GenesisConfig:: { - balances, - }.assimilate_storage(&mut t).unwrap(); - - staking::GenesisConfig:: { - stakers, - validator_count: 8, - force_era: staking::Forcing::ForceNew, - minimum_validator_count: 0, - invulnerables: vec![], - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); - - t.into() - } - - fn set_heads(v: Vec) -> ParachainsCall { - ParachainsCall::set_heads(v) - } - - fn report_double_vote( - report: DoubleVoteReport, - ) -> Result, TransactionValidityError> { - let inner = ParachainsCall::report_double_vote(report); - let call = Call::Parachains(inner.clone()); - - ValidateDoubleVoteReports::(sp_std::marker::PhantomData) - .validate(&0, &call, &DispatchInfo::default(), 0)?; - - Ok(inner) - } - - // creates a template candidate which pins to correct relay-chain state. - fn raw_candidate(para_id: ParaId) -> CandidateReceipt { - let mut head_data = Parachains::parachain_head(¶_id).unwrap(); - head_data.0.extend(para_id.encode()); - - CandidateReceipt { - parachain_index: para_id, - relay_parent: System::parent_hash(), - head_data, - collator: Default::default(), - signature: Default::default(), - pov_block_hash: Default::default(), - global_validation: Parachains::global_validation_schedule(), - local_validation: Parachains::current_local_validation_data(¶_id).unwrap(), - commitments: CandidateCommitments::default(), - } - } - - // makes a blank attested candidate from a `CandidateReceipt`. - fn make_blank_attested(candidate: CandidateReceipt) -> AttestedCandidate { - let (candidate, _) = candidate.abridge(); - - AttestedCandidate { - validity_votes: vec![], - validator_indices: BitVec::new(), - candidate, - } - } - - fn make_attestations(candidate: &mut AttestedCandidate) { - let mut vote_implicit = false; - - let (duty_roster, _) = Parachains::calculate_duty_roster(); - let candidate_hash = candidate.candidate.hash(); - - let authorities = Parachains::authorities(); - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - let validation_entries = duty_roster.validator_duty.iter() - .enumerate(); - - let mut validator_indices = BitVec::new(); - for (idx, &duty) in validation_entries { - if duty != Chain::Parachain(candidate.parachain_index()) { continue } - vote_implicit = !vote_implicit; - - let key = extract_key(authorities[idx].clone()); - - let statement = if vote_implicit { - Statement::Candidate(candidate_hash.clone()) - } else { - Statement::Valid(candidate_hash.clone()) - }; - - let signing_context = Parachains::signing_context(); - let payload = localized_payload(statement, &signing_context); - let signature = key.sign(&payload[..]).into(); - - candidate.validity_votes.push(if vote_implicit { - ValidityAttestation::Implicit(signature) - } else { - ValidityAttestation::Explicit(signature) - }); - - if validator_indices.len() <= idx { - validator_indices.resize(idx + 1, false); - } - validator_indices.set(idx, true); - } - candidate.validator_indices = validator_indices; - } - - fn new_candidate_with_upward_messages( - id: u32, - upward_messages: Vec<(ParachainDispatchOrigin, Vec)> - ) -> AttestedCandidate { - let mut raw_candidate = raw_candidate(id.into()); - raw_candidate.commitments.upward_messages = upward_messages.into_iter() - .map(|x| UpwardMessage { origin: x.0, data: x.1 }) - .collect(); - - make_blank_attested(raw_candidate) - } - - fn start_session(session_index: SessionIndex) { - let mut parent_hash = System::parent_hash(); - - for i in Session::current_index()..session_index { - println!("session index {}", i); - Staking::on_finalize(System::block_number()); - System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() as primitives::Moment * 6000); - - // In order to be able to use `System::parent_hash()` in the tests - // we need to first get it via `System::finalize` and then set it - // the `System::initialize`. However, it is needed to be taken into - // consideration that finalizing will prune some data in `System` - // storage including old values `BlockHash` if that reaches above - // `BlockHashCount` capacity. - if System::block_number() > 1 { - let hdr = System::finalize(); - parent_hash = hdr.hash(); - } - - System::initialize( - &(i as BlockNumber + 1), - &parent_hash, - &Default::default(), - &Default::default(), - Default::default(), - ); - init_block(); - } - - assert_eq!(Session::current_index(), session_index); - } - - fn start_era(era_index: EraIndex) { - start_session((era_index * 3).into()); - assert_eq!(Staking::current_era(), Some(era_index)); - } - - fn init_block() { - println!("Initializing {}", System::block_number()); - Session::on_initialize(System::block_number()); - System::on_initialize(System::block_number()); - Registrar::on_initialize(System::block_number()); - Parachains::on_initialize(System::block_number()); - } - fn run_to_block(n: BlockNumber) { - println!("Running until block {}", n); - while System::block_number() < n { - if System::block_number() > 1 { - println!("Finalizing {}", System::block_number()); - if !DidUpdate::get().is_some() { - Parachains::set_heads(Origin::none(), vec![]).unwrap(); - } - - Parachains::on_finalize(System::block_number()); - Registrar::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - } - Staking::new_session(System::block_number() as u32); - System::set_block_number(System::block_number() + 1); - init_block(); - } - } - - fn queue_upward_messages(id: ParaId, upward_messages: &[UpwardMessage]) { - NeedsDispatch::mutate(|nd| - Parachains::queue_upward_messages(id, upward_messages, nd) - ); - } - - #[test] - fn check_dispatch_upward_works() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - (2u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - init_block(); - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![0; 4] } - ]); - queue_upward_messages(1.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1; 4] } - ]); - let mut dispatched: Vec<(ParaId, ParachainDispatchOrigin, Vec)> = vec![]; - let dummy = |id, origin, data: &[u8]| dispatched.push((id, origin, data.to_vec())); - Parachains::dispatch_upward_messages(2, 3, dummy); - assert_eq!(dispatched, vec![ - (0.into(), ParachainDispatchOrigin::Parachain, vec![0; 4]) - ]); - assert!(::get(ParaId::from(0)).is_empty()); - assert_eq!(::get(ParaId::from(1)).len(), 1); - }); - new_test_ext(parachains.clone()).execute_with(|| { - init_block(); - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![0; 2] } - ]); - queue_upward_messages(1.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1; 2] } - ]); - queue_upward_messages(2.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![2] } - ]); - let mut dispatched: Vec<(ParaId, ParachainDispatchOrigin, Vec)> = vec![]; - let dummy = |id, origin, data: &[u8]| dispatched.push((id, origin, data.to_vec())); - Parachains::dispatch_upward_messages(2, 3, dummy); - assert_eq!(dispatched, vec![ - (0.into(), ParachainDispatchOrigin::Parachain, vec![0; 2]), - (2.into(), ParachainDispatchOrigin::Parachain, vec![2]) - ]); - assert!(::get(ParaId::from(0)).is_empty()); - assert_eq!(::get(ParaId::from(1)).len(), 1); - assert!(::get(ParaId::from(2)).is_empty()); - }); - new_test_ext(parachains.clone()).execute_with(|| { - init_block(); - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![0; 2] } - ]); - queue_upward_messages(1.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1; 2] } - ]); - queue_upward_messages(2.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![2] } - ]); - let mut dispatched: Vec<(ParaId, ParachainDispatchOrigin, Vec)> = vec![]; - let dummy = |id, origin, data: &[u8]| dispatched.push((id, origin, data.to_vec())); - Parachains::dispatch_upward_messages(2, 3, dummy); - assert_eq!(dispatched, vec![ - (0.into(), ParachainDispatchOrigin::Parachain, vec![0; 2]), - (2.into(), ParachainDispatchOrigin::Parachain, vec![2]) - ]); - assert!(::get(ParaId::from(0)).is_empty()); - assert_eq!(::get(ParaId::from(1)).len(), 1); - assert!(::get(ParaId::from(2)).is_empty()); - }); - new_test_ext(parachains.clone()).execute_with(|| { - init_block(); - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![0; 2] } - ]); - queue_upward_messages(1.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1; 2] } - ]); - queue_upward_messages(2.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![2] } - ]); - let mut dispatched: Vec<(ParaId, ParachainDispatchOrigin, Vec)> = vec![]; - let dummy = |id, origin, data: &[u8]| dispatched.push((id, origin, data.to_vec())); - Parachains::dispatch_upward_messages(2, 3, dummy); - assert_eq!(dispatched, vec![ - (0.into(), ParachainDispatchOrigin::Parachain, vec![0; 2]), - (2.into(), ParachainDispatchOrigin::Parachain, vec![2]), - ]); - assert!(::get(ParaId::from(0)).is_empty()); - assert_eq!(::get(ParaId::from(1)).len(), 1); - assert!(::get(ParaId::from(2)).is_empty()); - }); - } - - #[test] - fn check_queue_upward_messages_works() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] } - ]; - assert_ok!(Parachains::check_upward_messages(0.into(), &messages, 2, 3)); - - // all good. - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - ]); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1, 2] } - ]; - assert_ok!(Parachains::check_upward_messages(0.into(), &messages, 2, 3)); - queue_upward_messages(0.into(), &messages); - assert_eq!(::get(ParaId::from(0)), vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - UpwardMessage { origin: ParachainDispatchOrigin::Parachain, data: vec![1, 2] }, - ]); - }); - } - - #[test] - fn check_queue_full_upward_messages_fails() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // oversize, but ok since it's just one and the queue is empty. - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0; 4] }, - ]; - assert_ok!(Parachains::check_upward_messages(0.into(), &messages, 2, 3)); - - // oversize and bad since it's not just one. - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0; 4] }, - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - - // too many messages. - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![1] }, - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![2] }, - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - }); - } - - #[test] - fn check_queued_too_many_upward_messages_fails() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // too many messages. - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - ]); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![1] }, - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![2] }, - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - }); - } - - #[test] - fn check_queued_total_oversize_upward_messages_fails() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // too much data. - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0, 1] }, - ]); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![2, 3] }, - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - }); - } - - #[test] - fn check_queued_pre_jumbo_upward_messages_fails() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // bad - already an oversize messages queued. - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0; 4] }, - ]); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] } - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - }); - } - - #[test] - fn check_queued_post_jumbo_upward_messages_fails() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - ]; - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // bad - oversized and already a message queued. - queue_upward_messages(0.into(), &vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0] }, - ]); - let messages = vec![ - UpwardMessage { origin: ParachainDispatchOrigin::Signed, data: vec![0; 4] } - ]; - assert_err!( - Parachains::check_upward_messages(0.into(), &messages, 2, 3), - Error::::QueueFull - ); - }); - } - - #[test] - fn upward_queuing_works() { - // That the list of egress queue roots is in ascending order by `ParaId`. - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - // parachain 0 is self - let mut candidates = vec![ - new_candidate_with_upward_messages(0, vec![ - (ParachainDispatchOrigin::Signed, vec![1]), - ]), - new_candidate_with_upward_messages(1, vec![ - (ParachainDispatchOrigin::Parachain, vec![2]), - ]) - ]; - candidates.iter_mut().for_each(make_attestations); - - assert_ok!(Call::from(set_heads(candidates)).dispatch(Origin::none())); - - assert!(::get(ParaId::from(0)).is_empty()); - assert!(::get(ParaId::from(1)).is_empty()); - }); - } - - #[test] - fn active_parachains_should_work() { - let parachains = vec![ - (5u32.into(), vec![1,2,3].into(), vec![1].into()), - (100u32.into(), vec![4,5,6].into(), vec![2].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - assert_eq!(Parachains::active_parachains(), vec![(5u32.into(), None), (100u32.into(), None)]); - assert_eq!(Parachains::parachain_code(ParaId::from(5u32)), Some(vec![1, 2, 3].into())); - assert_eq!(Parachains::parachain_code(ParaId::from(100u32)), Some(vec![4, 5, 6].into())); - }); - } - - #[test] - fn register_deregister() { - let parachains = vec![ - (5u32.into(), vec![1,2,3].into(), vec![1].into()), - (100u32.into(), vec![4,5,6].into(), vec![2,].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - assert_eq!(Parachains::active_parachains(), vec![(5u32.into(), None), (100u32.into(), None)]); - - assert_eq!(Parachains::parachain_code(ParaId::from(5u32)), Some(vec![1,2,3].into())); - assert_eq!(Parachains::parachain_code(ParaId::from(100u32)), Some(vec![4,5,6].into())); - - assert_ok!(Registrar::register_para( - Origin::root(), - 99u32.into(), - ParaInfo{scheduling: Scheduling::Always}, - vec![7,8,9].into(), - vec![1, 1, 1].into(), - )); - assert_ok!(Parachains::set_heads(Origin::none(), vec![])); - - run_to_block(3); - - assert_eq!(Parachains::active_parachains(), vec![(5u32.into(), None), (99u32.into(), None), (100u32.into(), None)]); - assert_eq!(Parachains::parachain_code(&ParaId::from(99u32)), Some(vec![7,8,9].into())); - - assert_ok!(Registrar::deregister_para(Origin::root(), 5u32.into())); - assert_ok!(Parachains::set_heads(Origin::none(), vec![])); - - // parachain still active this block. another block must pass before it's inactive. - run_to_block(4); - - assert_eq!(Parachains::active_parachains(), vec![(99u32.into(), None), (100u32.into(), None)]); - assert_eq!(Parachains::parachain_code(&ParaId::from(5u32)), None); - }); - } - - #[test] - fn duty_roster_works() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - let check_roster = |duty_roster: &DutyRoster| { - assert_eq!(duty_roster.validator_duty.len(), 8); - for i in (0..2).map(ParaId::from) { - assert_eq!(duty_roster.validator_duty.iter().filter(|&&j| j == Chain::Parachain(i)).count(), 3); - } - assert_eq!(duty_roster.validator_duty.iter().filter(|&&j| j == Chain::Relay).count(), 2); - }; - - let duty_roster_0 = Parachains::calculate_duty_roster().0; - check_roster(&duty_roster_0); - - System::initialize(&1, &H256::from([1; 32]), &Default::default(), &Default::default(), Default::default()); - RandomnessCollectiveFlip::on_initialize(1); - let duty_roster_1 = Parachains::calculate_duty_roster().0; - check_roster(&duty_roster_1); - assert_ne!(duty_roster_0, duty_roster_1); - - - System::initialize(&2, &H256::from([2; 32]), &Default::default(), &Default::default(), Default::default()); - RandomnessCollectiveFlip::on_initialize(2); - let duty_roster_2 = Parachains::calculate_duty_roster().0; - check_roster(&duty_roster_2); - assert_ne!(duty_roster_0, duty_roster_2); - assert_ne!(duty_roster_1, duty_roster_2); - }); - } - - #[test] - fn unattested_candidate_is_rejected() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - let candidate = make_blank_attested(raw_candidate(0.into())); - assert!(Call::from(set_heads(vec![candidate])).dispatch(Origin::none()).is_err()); - }) - } - - #[test] - fn attested_candidates_accepted_in_order() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - assert_eq!(Parachains::active_parachains().len(), 2); - - let mut candidate_a = make_blank_attested(raw_candidate(0.into())); - let mut candidate_b = make_blank_attested(raw_candidate(1.into())); - - make_attestations(&mut candidate_a); - make_attestations(&mut candidate_b); - - assert!(Call::from(set_heads(vec![candidate_b.clone(), candidate_a.clone()])) - .dispatch(Origin::none()).is_err()); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone(), candidate_b.clone()])) - .dispatch(Origin::none())); - - assert_eq!(Heads::get(&ParaId::from(0)), Some(candidate_a.candidate.head_data)); - assert_eq!(Heads::get(&ParaId::from(1)), Some(candidate_b.candidate.head_data)); - }); - } - - #[test] - fn duplicate_vote_is_rejected() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - - let mut candidate = make_blank_attested(raw_candidate(0.into())); - make_attestations(&mut candidate); - - let mut double_validity = candidate.clone(); - double_validity.validity_votes.push(candidate.validity_votes[0].clone()); - double_validity.validator_indices.push(true); - - assert!(Call::from(set_heads(vec![double_validity])).dispatch(Origin::none()).is_err()); - }); - } - - #[test] - fn validators_not_from_group_is_rejected() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - run_to_block(2); - - let mut candidate = make_blank_attested(raw_candidate(0.into())); - make_attestations(&mut candidate); - - // Change the last vote index to make it not corresponding to the assigned group. - assert!(candidate.validator_indices.pop().is_some()); - candidate.validator_indices.append(&mut bitvec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); - - assert!(Call::from(set_heads(vec![candidate])).dispatch(Origin::none()).is_err()); - }); - } - - #[test] - fn empty_trie_root_const_is_blake2_hashed_null_node() { - let hashed_null_node = as trie_db::NodeCodec>::hashed_null_node(); - assert_eq!(hashed_null_node, EMPTY_TRIE_ROOT.into()) - } - - #[test] - fn para_past_code_meta_gives_right_code() { - let mut past_code = ParaPastCodeMeta::default(); - assert_eq!(past_code.code_at(0u32), Some(UseCodeAt::Current)); - - past_code.note_replacement(10); - assert_eq!(past_code.code_at(0), Some(UseCodeAt::ReplacedAt(10))); - assert_eq!(past_code.code_at(10), Some(UseCodeAt::ReplacedAt(10))); - assert_eq!(past_code.code_at(11), Some(UseCodeAt::Current)); - - past_code.note_replacement(20); - assert_eq!(past_code.code_at(1), Some(UseCodeAt::ReplacedAt(10))); - assert_eq!(past_code.code_at(10), Some(UseCodeAt::ReplacedAt(10))); - assert_eq!(past_code.code_at(11), Some(UseCodeAt::ReplacedAt(20))); - assert_eq!(past_code.code_at(20), Some(UseCodeAt::ReplacedAt(20))); - assert_eq!(past_code.code_at(21), Some(UseCodeAt::Current)); - - past_code.last_pruned = Some(5); - assert_eq!(past_code.code_at(1), None); - assert_eq!(past_code.code_at(5), None); - assert_eq!(past_code.code_at(6), Some(UseCodeAt::ReplacedAt(10))); - } - - #[test] - fn para_past_code_pruning_works_correctly() { - let mut past_code = ParaPastCodeMeta::default(); - past_code.note_replacement(10u32); - past_code.note_replacement(20); - past_code.note_replacement(30); - - let old = past_code.clone(); - assert!(past_code.prune_up_to(9).collect::>().is_empty()); - assert_eq!(old, past_code); - - assert_eq!(past_code.prune_up_to(10).collect::>(), vec![10]); - assert_eq!(past_code, ParaPastCodeMeta { - upgrade_times: vec![30, 20], - last_pruned: Some(10), - }); - - assert_eq!(past_code.prune_up_to(21).collect::>(), vec![20]); - assert_eq!(past_code, ParaPastCodeMeta { - upgrade_times: vec![30], - last_pruned: Some(20), - }); - - past_code.note_replacement(40); - past_code.note_replacement(50); - past_code.note_replacement(60); - - assert_eq!(past_code, ParaPastCodeMeta { - upgrade_times: vec![60, 50, 40, 30], - last_pruned: Some(20), - }); - - assert_eq!(past_code.prune_up_to(60).collect::>(), vec![30, 40, 50, 60]); - assert_eq!(past_code, ParaPastCodeMeta { - upgrade_times: Vec::new(), - last_pruned: Some(60), - }); - } - - #[test] - fn para_past_code_pruning_in_initialize() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let id = ParaId::from(0u32); - let at_block: BlockNumber = 10; - ::PastCode::insert(&(id, at_block), &ValidationCode(vec![1, 2, 3])); - ::PastCodePruning::put(&vec![(id, at_block)]); - - { - let mut code_meta = Parachains::past_code_meta(&id); - code_meta.note_replacement(at_block); - ::PastCodeMeta::insert(&id, &code_meta); - } - - let pruned_at: BlockNumber = at_block + SlashPeriod::get() + 1; - assert_eq!(::PastCode::get(&(id, at_block)), Some(vec![1, 2, 3].into())); - - run_to_block(pruned_at - 1); - assert_eq!(::PastCode::get(&(id, at_block)), Some(vec![1, 2, 3].into())); - assert_eq!(Parachains::past_code_meta(&id).most_recent_change(), Some(at_block)); - - run_to_block(pruned_at); - assert!(::PastCode::get(&(id, at_block)).is_none()); - assert!(Parachains::past_code_meta(&id).most_recent_change().is_none()); - }); - } - - #[test] - fn note_past_code_sets_up_pruning_correctly() { - let parachains = vec![ - (0u32.into(), vec![].into(), vec![].into()), - (1u32.into(), vec![].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let id_a = ParaId::from(0u32); - let id_b = ParaId::from(1u32); - - Parachains::note_past_code(id_a, 10, vec![1, 2, 3].into()); - Parachains::note_past_code(id_b, 20, vec![4, 5, 6].into()); - - assert_eq!(Parachains::past_code_pruning_tasks(), vec![(id_a, 10), (id_b, 20)]); - assert_eq!( - Parachains::past_code_meta(&id_a), - ParaPastCodeMeta { - upgrade_times: vec![10], - last_pruned: None, - } - ); - assert_eq!( - Parachains::past_code_meta(&id_b), - ParaPastCodeMeta { - upgrade_times: vec![20], - last_pruned: None, - } - ); - }); - } - - #[test] - fn code_upgrade_applied_after_delay() { - let parachains = vec![ - (0u32.into(), vec![1, 2, 3].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let para_id = ParaId::from(0); - let new_code = ValidationCode(vec![4, 5, 6]); - - run_to_block(2); - assert_eq!(Parachains::active_parachains().len(), 1); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - - let applied_after ={ - let raw_candidate = raw_candidate(para_id); - let applied_after = raw_candidate.local_validation.code_upgrade_allowed.unwrap(); - let mut candidate_a = make_blank_attested(raw_candidate); - - candidate_a.candidate.commitments.new_validation_code = Some(new_code.clone()); - - // this parablock is in the context of block 1. - assert_eq!(applied_after, 1 + ValidationUpgradeDelay::get()); - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert!(Parachains::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(Parachains::code_upgrade_schedule(¶_id), Some(applied_after)); - assert_eq!(::FutureCode::get(¶_id), new_code); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - - applied_after - }; - - run_to_block(applied_after); - - // the candidate is in the context of the parent of `applied_after`, - // thus does not trigger the code upgrade. - { - let raw_candidate = raw_candidate(para_id); - assert!(raw_candidate.local_validation.code_upgrade_allowed.is_none()); - let mut candidate_a = make_blank_attested(raw_candidate); - - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert!(Parachains::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(Parachains::code_upgrade_schedule(¶_id), Some(applied_after)); - assert_eq!(::FutureCode::get(¶_id), new_code); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - } - - run_to_block(applied_after + 1); - - // the candidate is in the context of `applied_after`, and triggers - // the upgrade. - { - let raw_candidate = raw_candidate(para_id); - assert!(raw_candidate.local_validation.code_upgrade_allowed.is_some()); - let mut candidate_a = make_blank_attested(raw_candidate); - - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert_eq!( - Parachains::past_code_meta(¶_id).most_recent_change(), - Some(applied_after), - ); - assert_eq!( - ::PastCode::get(&(para_id, applied_after)), - Some(vec![1, 2, 3,].into()), - ); - assert!(Parachains::code_upgrade_schedule(¶_id).is_none()); - assert!(::FutureCode::get(¶_id).0.is_empty()); - assert_eq!(Parachains::parachain_code(¶_id), Some(new_code)); - } - }); - } - - #[test] - fn code_upgrade_applied_after_delay_even_when_late() { - let parachains = vec![ - (0u32.into(), vec![1, 2, 3].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let para_id = ParaId::from(0); - let new_code = ValidationCode(vec![4, 5, 6]); - - run_to_block(2); - assert_eq!(Parachains::active_parachains().len(), 1); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - - let applied_after ={ - let raw_candidate = raw_candidate(para_id); - let applied_after = raw_candidate.local_validation.code_upgrade_allowed.unwrap(); - let mut candidate_a = make_blank_attested(raw_candidate); - - candidate_a.candidate.commitments.new_validation_code = Some(new_code.clone()); - - // this parablock is in the context of block 1. - assert_eq!(applied_after, 1 + ValidationUpgradeDelay::get()); - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert!(Parachains::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(Parachains::code_upgrade_schedule(¶_id), Some(applied_after)); - assert_eq!(::FutureCode::get(¶_id), new_code); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - - applied_after - }; - - run_to_block(applied_after + 1 + 4); - - { - let raw_candidate = raw_candidate(para_id); - assert!(raw_candidate.local_validation.code_upgrade_allowed.is_some()); - let mut candidate_a = make_blank_attested(raw_candidate); - - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert_eq!( - Parachains::past_code_meta(¶_id).most_recent_change(), - Some(applied_after + 4), - ); - assert_eq!( - ::PastCode::get(&(para_id, applied_after + 4)), - Some(vec![1, 2, 3,].into()), - ); - assert!(Parachains::code_upgrade_schedule(¶_id).is_none()); - assert!(::FutureCode::get(¶_id).0.is_empty()); - assert_eq!(Parachains::parachain_code(¶_id), Some(new_code)); - } - }); - } - - #[test] - fn submit_code_change_when_not_allowed_is_err() { - let parachains = vec![ - (0u32.into(), vec![1, 2, 3].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let para_id = ParaId::from(0); - let new_code = ValidationCode(vec![4, 5, 6]); - - run_to_block(2); - - { - let raw_candidate = raw_candidate(para_id); - let mut candidate_a = make_blank_attested(raw_candidate); - - candidate_a.candidate.commitments.new_validation_code = Some(new_code.clone()); - - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - }; - - run_to_block(3); - - { - let raw_candidate = raw_candidate(para_id); - assert!(raw_candidate.local_validation.code_upgrade_allowed.is_none()); - let mut candidate_a = make_blank_attested(raw_candidate); - candidate_a.candidate.commitments.new_validation_code = Some(vec![1, 2, 3].into()); - - make_attestations(&mut candidate_a); - - assert_err!( - Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none()), - Error::::DisallowedCodeUpgrade, - ); - } - }); - } - - #[test] - fn full_parachain_cleanup_storage() { - let parachains = vec![ - (0u32.into(), vec![1, 2, 3].into(), vec![].into()), - ]; - - new_test_ext(parachains.clone()).execute_with(|| { - let para_id = ParaId::from(0); - let new_code = ValidationCode(vec![4, 5, 6]); - - run_to_block(2); - { - let raw_candidate = raw_candidate(para_id); - let applied_after = raw_candidate.local_validation.code_upgrade_allowed.unwrap(); - let mut candidate_a = make_blank_attested(raw_candidate); - - candidate_a.candidate.commitments.new_validation_code = Some(new_code.clone()); - - // this parablock is in the context of block 1. - assert_eq!(applied_after, 1 + ValidationUpgradeDelay::get()); - make_attestations(&mut candidate_a); - - assert_ok!(Call::from(set_heads(vec![candidate_a.clone()])).dispatch(Origin::none())); - - assert!(Parachains::past_code_meta(¶_id).most_recent_change().is_none()); - assert_eq!(Parachains::code_upgrade_schedule(¶_id), Some(applied_after)); - assert_eq!(::FutureCode::get(¶_id), new_code); - assert_eq!(Parachains::parachain_code(¶_id), Some(vec![1, 2, 3].into())); - - assert!(Parachains::past_code_pruning_tasks().is_empty()); - }; - - Parachains::cleanup_para(para_id); - - // cleaning up the parachain should place the current parachain code - // into the past code buffer & schedule cleanup. - assert_eq!(Parachains::past_code_meta(¶_id).most_recent_change(), Some(2)); - assert_eq!(::PastCode::get(&(para_id, 2)), Some(vec![1, 2, 3].into())); - assert_eq!(Parachains::past_code_pruning_tasks(), vec![(para_id, 2)]); - - // any future upgrades haven't been used to validate yet, so those - // are cleaned up immediately. - assert!(Parachains::code_upgrade_schedule(¶_id).is_none()); - assert!(::FutureCode::get(¶_id).0.is_empty()); - assert!(Parachains::parachain_code(¶_id).is_none()); - - let cleaned_up_at = 2 + SlashPeriod::get() + 1; - run_to_block(cleaned_up_at); - - // now the final cleanup: last past code cleaned up, and this triggers meta cleanup. - assert_eq!(Parachains::past_code_meta(¶_id), Default::default()); - assert!(::PastCode::get(&(para_id, 2)).is_none()); - assert!(Parachains::past_code_pruning_tasks().is_empty()); - }); - } - - #[test] - fn double_vote_candidate_and_valid_works() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that a Candidate and Valid statements on the same candidate get slashed. - new_test_ext(parachains.clone()).execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); - assert_eq!(Session::current_index(), 0); - - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_index = 0; - let key = extract_key(authorities[authority_index].clone()); - - let statement_candidate = Statement::Candidate(candidate_hash.clone()); - let statement_valid = Statement::Valid(candidate_hash.clone()); - - let signing_context = Parachains::signing_context(); - let payload_1 = localized_payload(statement_candidate.clone(), &signing_context); - let payload_2 = localized_payload(statement_valid.clone(), &signing_context); - - let signature_1 = key.sign(&payload_1[..]).into(); - let signature_2 = key.sign(&payload_2[..]).into(); - - // Check that in the beginning the genesis balances are there. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - let encoded_key = key.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key.public()), - first: (statement_candidate, signature_1), - second: (statement_valid, signature_2), - proof, - signing_context, - }; - - let inner = report_double_vote(report).unwrap(); - - assert_ok!(Call::from(inner).dispatch(Origin::signed(1))); - - start_era(2); - - // Check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); - - assert_eq!( - Staking::eras_stakers(2, 0), - staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, - ); - - // Check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(2, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - }); - } - - #[test] - fn double_vote_candidate_and_invalid_works() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that a Candidate and Invalid statements on the same candidate get slashed. - new_test_ext(parachains.clone()).execute_with(|| { - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_index = 0; - let key = extract_key(authorities[authority_index].clone()); - - let statement_candidate = Statement::Candidate(candidate_hash); - let statement_invalid = Statement::Invalid(candidate_hash.clone()); - - let signing_context = Parachains::signing_context(); - let payload_1 = localized_payload(statement_candidate.clone(), &signing_context); - let payload_2 = localized_payload(statement_invalid.clone(), &signing_context); - - let signature_1 = key.sign(&payload_1[..]).into(); - let signature_2 = key.sign(&payload_2[..]).into(); - - // Check that in the beginning the genesis balances are there. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - let encoded_key = key.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key.public()), - first: (statement_candidate, signature_1), - second: (statement_invalid, signature_2), - proof, - signing_context, - }; - - assert_ok!(Call::from(report_double_vote(report).unwrap()).dispatch(Origin::signed(1))); - - start_era(2); - - // Check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); - - assert_eq!( - Staking::eras_stakers(Staking::current_era().unwrap(), 0), - staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, - ); - - // Check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(2, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - }); - } - - #[test] - fn double_vote_valid_and_invalid_works() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that an Invalid and Valid statements on the same candidate get slashed. - new_test_ext(parachains.clone()).execute_with(|| { - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_index = 0; - let key = extract_key(authorities[authority_index].clone()); - - let statement_invalid = Statement::Invalid(candidate_hash.clone()); - let statement_valid = Statement::Valid(candidate_hash.clone()); - - let signing_context = Parachains::signing_context(); - let payload_1 = localized_payload(statement_invalid.clone(), &signing_context); - let payload_2 = localized_payload(statement_valid.clone(), &signing_context); - - let signature_1 = key.sign(&payload_1[..]).into(); - let signature_2 = key.sign(&payload_2[..]).into(); - - // Check that in the beginning the genesis balances are there. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - let encoded_key = key.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key.public()), - first: (statement_invalid, signature_1), - second: (statement_valid, signature_2), - proof, - signing_context, - }; - - assert_ok!(Call::from(report_double_vote(report).unwrap()).dispatch(Origin::signed(1))); - - start_era(2); - - // Check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); - - assert_eq!( - Staking::eras_stakers(2, 0), - staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, - ); - - // Check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(2, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - }); - } - - // Check that submitting the same report twice errors. - #[test] - fn double_vote_submit_twice_works() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that a Candidate and Valid statements on the same candidate get slashed. - new_test_ext(parachains.clone()).execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); - assert_eq!(Session::current_index(), 0); - - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_index = 0; - let key = extract_key(authorities[authority_index].clone()); - - let statement_candidate = Statement::Candidate(candidate_hash.clone()); - let statement_valid = Statement::Valid(candidate_hash.clone()); - - let signing_context = Parachains::signing_context(); - let payload_1 = localized_payload(statement_candidate.clone(), &signing_context); - let payload_2 = localized_payload(statement_valid.clone(), &signing_context); - - let signature_1 = key.sign(&payload_1[..]).into(); - let signature_2 = key.sign(&payload_2[..]).into(); - - // Check that in the beginning the genesis balances are there. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - let encoded_key = key.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key.public()), - first: (statement_candidate, signature_1), - second: (statement_valid, signature_2), - proof, - signing_context, - }; - - assert_ok!(Call::from(report_double_vote(report.clone()).unwrap()).dispatch(Origin::signed(1))); - - assert!(Call::from(report_double_vote(report).unwrap()).dispatch(Origin::signed(1)).is_err() - ); - - start_era(2); - - // Check that the balance of 0-th validator is slashed 100%. - assert_eq!(Balances::total_balance(&0), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&0), 0); - - assert_eq!( - Staking::eras_stakers(2, 0), - staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, - ); - - // Check that the balances of all other validators are left intact. - for i in 1..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(2, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - }); - } - - // Check that submitting invalid reports fail. - #[test] - fn double_vote_submit_invalid_works() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that a Candidate and Valid statements on the same candidate get slashed. - new_test_ext(parachains.clone()).execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); - assert_eq!(Session::current_index(), 0); - - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_1_index = 0; - let authority_2_index = 1; - let key_1 = extract_key(authorities[authority_1_index].clone()); - let key_2 = extract_key(authorities[authority_2_index].clone()); - - let statement_candidate = Statement::Candidate(candidate_hash.clone()); - let statement_valid = Statement::Valid(candidate_hash.clone()); - - let signing_context = Parachains::signing_context(); - let payload_1 = localized_payload(statement_candidate.clone(), &signing_context); - let payload_2 = localized_payload(statement_valid.clone(), &signing_context); - - let signature_1 = key_1.sign(&payload_1[..]).into(); - let signature_2 = key_2.sign(&payload_2[..]).into(); - - let encoded_key = key_1.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key_1.public()), - first: (statement_candidate, signature_1), - second: (statement_valid, signature_2), - proof, - signing_context, - }; - - assert_eq!( - report_double_vote(report.clone()), - Err(TransactionValidityError::Invalid( - InvalidTransaction::Custom(DoubleVoteValidityError::InvalidSignature as u8) - ) - ), - ); - }); - } - - #[test] - fn double_vote_proof_session_mismatch_fails() { - let parachains = vec![ - (1u32.into(), vec![].into(), vec![].into()), - ]; - - let extract_key = |public: ValidatorId| { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(public.as_ref()); - Sr25519Keyring::from_raw_public(raw_public).unwrap() - }; - - // Test that submitting a report with a session mismatch between the `parent_hash` - // and the proof itself fails. - new_test_ext(parachains.clone()).execute_with(|| { - assert_eq!(Staking::current_era(), Some(0)); - assert_eq!(Session::current_index(), 0); - - start_era(1); - - let candidate = raw_candidate(1.into()).abridge().0; - let candidate_hash = candidate.hash(); - - let authorities = Parachains::authorities(); - let authority_index = 0; - let key = extract_key(authorities[authority_index].clone()); - - let statement_candidate = Statement::Candidate(candidate_hash.clone()); - let statement_valid = Statement::Valid(candidate_hash.clone()); - let parent_hash = System::parent_hash(); - - let signing_context = SigningContext { - session_index: Session::current_index() - 1, - parent_hash, - }; - let payload_1 = localized_payload(statement_candidate.clone(), &signing_context); - let payload_2 = localized_payload(statement_valid.clone(), &signing_context); - - let signature_1 = key.sign(&payload_1[..]).into(); - let signature_2 = key.sign(&payload_2[..]).into(); - - // Check that in the beginning the genesis balances are there. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - - // Get the proof from another session. - start_era(2); - let encoded_key = key.encode(); - let proof = Historical::prove((PARACHAIN_KEY_TYPE_ID, &encoded_key[..])).unwrap(); - - let report = DoubleVoteReport { - identity: ValidatorId::from(key.public()), - first: (statement_candidate, signature_1), - second: (statement_valid, signature_2), - proof, - signing_context, - }; - - assert!(report_double_vote(report.clone()).is_err()); - - start_era(3); - - // Check that the balances are unchanged. - for i in 0..authorities.len() { - assert_eq!(Balances::total_balance(&(i as u64)), 10_000_000); - assert_eq!(Staking::slashable_balance_of(&(i as u64)), 10_000); - - assert_eq!( - Staking::eras_stakers(1, i as u64), - staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, - ); - } - }); - } -} diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs new file mode 100644 index 0000000000000000000000000000000000000000..d165123b422f5e8e663ff7d3868d9d165be80cd5 --- /dev/null +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -0,0 +1,63 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A simple wrapper allowing `Sudo` to call into `paras` routines. + +use frame_support::{ + decl_error, decl_module, + dispatch::DispatchResult, + weights::DispatchClass, +}; +use frame_system::ensure_root; +use runtime_parachains::paras::{ + self, + ParaGenesisArgs, +}; +use primitives::v1::Id as ParaId; + +/// The module's configuration trait. +pub trait Trait: paras::Trait { } + +decl_error! { + pub enum Error for Module { } +} + +decl_module! { + /// A sudo wrapper to call into v1 paras module. + pub struct Module for enum Call where origin: ::Origin { + type Error = Error; + + /// Schedule a para to be initialized at the start of the next session. + #[weight = (1_000, DispatchClass::Operational)] + pub fn sudo_schedule_para_initialize( + origin, + id: ParaId, + genesis: ParaGenesisArgs, + ) -> DispatchResult { + ensure_root(origin)?; + paras::Module::::schedule_para_initialize(id, genesis); + Ok(()) + } + + /// Schedule a para to be cleaned up at the start of the next session. + #[weight = (1_000, DispatchClass::Operational)] + pub fn sudo_schedule_para_cleanup(origin, id: ParaId) -> DispatchResult { + ensure_root(origin)?; + paras::Module::::schedule_para_cleanup(id); + Ok(()) + } + } +} diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs new file mode 100644 index 0000000000000000000000000000000000000000..8818649345e5b9ceb8c6c01626dac4e49afc30ed --- /dev/null +++ b/runtime/common/src/purchase.rs @@ -0,0 +1,1014 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Module to process purchase of DOTs. + +use codec::{Encode, Decode}; +use sp_runtime::{Permill, RuntimeDebug, DispatchResult, DispatchError, AnySignature}; +use sp_runtime::traits::{Zero, CheckedAdd, Verify, Saturating}; +use frame_support::{decl_event, decl_storage, decl_module, decl_error, ensure}; +use frame_support::traits::{ + EnsureOrigin, Currency, ExistenceRequirement, VestingSchedule, Get +}; +use frame_system::ensure_signed; +use sp_core::sr25519; +use sp_std::prelude::*; + +/// Configuration trait. +pub trait Trait: frame_system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; + /// Balances Pallet + type Currency: Currency; + /// Vesting Pallet + type VestingSchedule: VestingSchedule; + /// The origin allowed to set account status. + type ValidityOrigin: EnsureOrigin; + /// The origin allowed to make configurations to the pallet. + type ConfigurationOrigin: EnsureOrigin; + /// The maximum statement length for the statement users to sign when creating an account. + type MaxStatementLength: Get; + /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. + type UnlockedProportion: Get; + /// The maximum amount of locked DOTs that we will unlock. + type MaxUnlocked: Get>; +} + +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +/// The kind of a statement an account needs to make for a claim to be valid. +#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug)] +pub enum AccountValidity { + /// Account is not valid. + Invalid, + /// Account has initiated the account creation process. + Initiated, + /// Account is pending validation. + Pending, + /// Account is valid with a low contribution amount. + ValidLow, + /// Account is valid with a high contribution amount. + ValidHigh, + /// Account has completed the purchase process. + Completed, +} + +impl Default for AccountValidity { + fn default() -> Self { + AccountValidity::Invalid + } +} + +impl AccountValidity { + fn is_valid(&self) -> bool { + match self { + Self::Invalid => false, + Self::Initiated => false, + Self::Pending => false, + Self::ValidLow => true, + Self::ValidHigh => true, + Self::Completed => false, + } + } +} + +/// All information about an account regarding the purchase of DOTs. +#[derive(Encode, Decode, Default, Clone, Eq, PartialEq, RuntimeDebug)] +pub struct AccountStatus { + /// The current validity status of the user. Will denote if the user has passed KYC, + /// how much they are able to purchase, and when their purchase process has completed. + validity: AccountValidity, + /// The amount of free DOTs they have purchased. + free_balance: Balance, + /// The amount of locked DOTs they have purchased. + locked_balance: Balance, + /// Their sr25519/ed25519 signature verifying they have signed our required statement. + signature: Vec, + /// The percentage of VAT the purchaser is responsible for. This is already factored into account balance. + vat: Permill, +} + +decl_event!( + pub enum Event where + AccountId = ::AccountId, + Balance = BalanceOf, + BlockNumber = ::BlockNumber, + { + /// A [new] account was created. + AccountCreated(AccountId), + /// Someone's account validity was updated. [who, validity] + ValidityUpdated(AccountId, AccountValidity), + /// Someone's purchase balance was updated. [who, free, locked] + BalanceUpdated(AccountId, Balance, Balance), + /// A payout was made to a purchaser. [who, free, locked] + PaymentComplete(AccountId, Balance, Balance), + /// A new payment account was set. [who] + PaymentAccountSet(AccountId), + /// A new statement was set. + StatementUpdated, + /// A new statement was set. [block_number] + UnlockBlockUpdated(BlockNumber), + } +); + +decl_error! { + pub enum Error for Module { + /// Account is not currently valid to use. + InvalidAccount, + /// Account used in the purchase already exists. + ExistingAccount, + /// Provided signature is invalid + InvalidSignature, + /// Account has already completed the purchase process. + AlreadyCompleted, + /// An overflow occurred when doing calculations. + Overflow, + /// The statement is too long to be stored on chain. + InvalidStatement, + /// The unlock block is in the past! + InvalidUnlockBlock, + /// Vesting schedule already exists for this account. + VestingScheduleExists, + } +} + +decl_storage! { + trait Store for Module as Purchase { + // A map of all participants in the DOT purchase process. + Accounts: map hasher(blake2_128_concat) T::AccountId => AccountStatus>; + // The account that will be used to payout participants of the DOT purchase process. + PaymentAccount: T::AccountId; + // The statement purchasers will need to sign to participate. + Statement: Vec; + // The block where all locked dots will unlock. + UnlockBlock: T::BlockNumber; + } +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// The maximum statement length for the statement users to sign when creating an account. + const MaxStatementLength: u32 = T::MaxStatementLength::get() as u32; + /// The amount of purchased locked DOTs that we will unlock for basic actions on the chain. + const UnlockedProportion: Permill = T::UnlockedProportion::get(); + /// The maximum amount of locked DOTs that we will unlock. + const MaxUnlocked: BalanceOf = T::MaxUnlocked::get(); + + /// Deposit one of this module's events by using the default implementation. + fn deposit_event() = default; + + /// Create a new account. Proof of existence through a valid signed message. + /// + /// We check that the account does not exist at this stage. + /// + /// Origin must match the `ValidityOrigin`. + #[weight = 200_000_000 + T::DbWeight::get().reads_writes(4, 1)] + fn create_account(origin, + who: T::AccountId, + signature: Vec + ) { + T::ValidityOrigin::ensure_origin(origin)?; + // Account is already being tracked by the pallet. + ensure!(!Accounts::::contains_key(&who), Error::::ExistingAccount); + // Account should not have a vesting schedule. + ensure!(T::VestingSchedule::vesting_balance(&who).is_none(), Error::::VestingScheduleExists); + + // Verify the signature provided is valid for the statement. + Self::verify_signature(&who, &signature)?; + + // Create a new pending account. + let status = AccountStatus { + validity: AccountValidity::Initiated, + signature, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + vat: Permill::zero(), + }; + Accounts::::insert(&who, status); + Self::deposit_event(RawEvent::AccountCreated(who)); + } + + /// Update the validity status of an existing account. If set to completed, the account + /// will no longer be able to continue through the crowdfund process. + /// + /// We check tht the account exists at this stage, but has not completed the process. + /// + /// Origin must match the `ValidityOrigin`. + #[weight = T::DbWeight::get().reads_writes(1, 1)] + fn update_validity_status(origin, + who: T::AccountId, + validity: AccountValidity + ) { + T::ValidityOrigin::ensure_origin(origin)?; + ensure!(Accounts::::contains_key(&who), Error::::InvalidAccount); + Accounts::::try_mutate(&who, |status: &mut AccountStatus>| -> DispatchResult { + ensure!(status.validity != AccountValidity::Completed, Error::::AlreadyCompleted); + status.validity = validity; + Ok(()) + })?; + Self::deposit_event(RawEvent::ValidityUpdated(who, validity)); + } + + /// Update the balance of a valid account. + /// + /// We check tht the account is valid for a balance transfer at this point. + /// + /// Origin must match the `ValidityOrigin`. + #[weight = T::DbWeight::get().reads_writes(2, 1)] + fn update_balance(origin, + who: T::AccountId, + free_balance: BalanceOf, + locked_balance: BalanceOf, + vat: Permill, + ) { + T::ValidityOrigin::ensure_origin(origin)?; + + Accounts::::try_mutate(&who, |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + free_balance.checked_add(&locked_balance).ok_or(Error::::Overflow)?; + status.free_balance = free_balance; + status.locked_balance = locked_balance; + status.vat = vat; + Ok(()) + })?; + Self::deposit_event(RawEvent::BalanceUpdated(who, free_balance, locked_balance)); + } + + /// Pay the user and complete the purchase process. + /// + /// We reverify all assumptions about the state of an account, and complete the process. + /// + /// Origin must match the configured `PaymentAccount`. + #[weight = T::DbWeight::get().reads_writes(4, 2)] + fn payout(origin, who: T::AccountId) { + // Payments must be made directly by the `PaymentAccount`. + let payment_account = ensure_signed(origin)?; + ensure!(payment_account == PaymentAccount::::get(), DispatchError::BadOrigin); + + // Account should not have a vesting schedule. + ensure!(T::VestingSchedule::vesting_balance(&who).is_none(), Error::::VestingScheduleExists); + + Accounts::::try_mutate(&who, |status: &mut AccountStatus>| -> DispatchResult { + // Account has a valid status (not Invalid, Pending, or Completed)... + ensure!(status.validity.is_valid(), Error::::InvalidAccount); + + // Transfer funds from the payment account into the purchasing user. + let total_balance = status.free_balance + .checked_add(&status.locked_balance) + .ok_or(Error::::Overflow)?; + T::Currency::transfer(&payment_account, &who, total_balance, ExistenceRequirement::AllowDeath)?; + + if !status.locked_balance.is_zero() { + let unlock_block = UnlockBlock::::get(); + // We allow some configurable portion of the purchased locked DOTs to be unlocked for basic usage. + let unlocked = (T::UnlockedProportion::get() * status.locked_balance).min(T::MaxUnlocked::get()); + let locked = status.locked_balance.saturating_sub(unlocked); + // We checked that this account has no existing vesting schedule. So this function should + // never fail, however if it does, not much we can do about it at this point. + let _ = T::VestingSchedule::add_vesting_schedule( + // Apply vesting schedule to this user + &who, + // For this much amount + locked, + // Unlocking the full amount after one block + locked, + // When everything unlocks + unlock_block + ); + } + + // Setting the user account to `Completed` ends the purchase process for this user. + status.validity = AccountValidity::Completed; + Self::deposit_event(RawEvent::PaymentComplete(who.clone(), status.free_balance, status.locked_balance)); + Ok(()) + })?; + } + + /* Configuration Operations */ + + /// Set the account that will be used to payout users in the DOT purchase process. + /// + /// Origin must match the `ConfigurationOrigin` + #[weight = T::DbWeight::get().writes(1)] + fn set_payment_account(origin, who: T::AccountId) { + T::ConfigurationOrigin::ensure_origin(origin)?; + // Possibly this is worse than having the caller account be the payment account? + PaymentAccount::::set(who.clone()); + Self::deposit_event(RawEvent::PaymentAccountSet(who)); + } + + /// Set the statement that must be signed for a user to participate on the DOT sale. + /// + /// Origin must match the `ConfigurationOrigin` + #[weight = T::DbWeight::get().writes(1)] + fn set_statement(origin, statement: Vec) { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!(statement.len() < T::MaxStatementLength::get(), Error::::InvalidStatement); + // Possibly this is worse than having the caller account be the payment account? + Statement::set(statement); + Self::deposit_event(RawEvent::StatementUpdated); + } + + /// Set the block where locked DOTs will become unlocked. + /// + /// Origin must match the `ConfigurationOrigin` + #[weight = T::DbWeight::get().writes(1)] + fn set_unlock_block(origin, unlock_block: T::BlockNumber) { + T::ConfigurationOrigin::ensure_origin(origin)?; + ensure!(unlock_block > frame_system::Module::::block_number(), Error::::InvalidUnlockBlock); + // Possibly this is worse than having the caller account be the payment account? + UnlockBlock::::set(unlock_block); + Self::deposit_event(RawEvent::UnlockBlockUpdated(unlock_block)); + } + } +} + +impl Module { + fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { + // sr25519 always expects a 64 byte signature. + ensure!(signature.len() == 64, Error::::InvalidSignature); + let signature: AnySignature = sr25519::Signature::from_slice(signature).into(); + + // In Polkadot, the AccountId is always the same as the 32 byte public key. + let account_bytes: [u8; 32] = account_to_bytes(who)?; + let public_key = sr25519::Public::from_raw(account_bytes); + + let message = Statement::get(); + + // Check if everything is good or not. + match signature.verify(message.as_slice(), &public_key) { + true => Ok(()), + false => Err(Error::::InvalidSignature)?, + } + } +} + +// This function converts a 32 byte AccountId to its byte-array equivalent form. +fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], DispatchError> + where AccountId: Encode, +{ + let account_vec = account.encode(); + ensure!(account_vec.len() == 32, "AccountId must be 32 bytes."); + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(&account_vec); + Ok(bytes) +} + +#[cfg(test)] +mod tests { + use super::*; + + use sp_core::{H256, Pair, Public, crypto::AccountId32, ed25519}; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + Perbill, MultiSignature, + traits::{BlakeTwo256, IdentityLookup, Identity, Verify, IdentifyAccount, Dispatchable}, + testing::Header + }; + use frame_support::{ + impl_outer_origin, impl_outer_dispatch, assert_ok, assert_noop, parameter_types, + ord_parameter_types, dispatch::DispatchError::BadOrigin, + }; + use frame_support::traits::Currency; + use pallet_balances::Error as BalancesError; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + purchase::Purchase, + vesting::Vesting, + } + } + + type AccountId = AccountId32; + + // For testing the module, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of modules we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u32 = 250; + pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; + pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + } + impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = Balances; + type SystemWeightInfo = (); + } + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + + impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + } + + parameter_types! { + pub const MinVestedTransfer: u64 = 0; + } + + impl pallet_vesting::Trait for Test { + type Event = (); + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); + } + + parameter_types! { + pub const MaxStatementLength: usize = 1_000; + pub const UnlockedProportion: Permill = Permill::from_percent(10); + pub const MaxUnlocked: u64 = 10; + } + + ord_parameter_types! { + pub const ValidityOrigin: AccountId = AccountId32::from([0u8; 32]); + pub const PaymentOrigin: AccountId = AccountId32::from([1u8; 32]); + pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); + } + + impl Trait for Test { + type Event = (); + type Currency = Balances; + type VestingSchedule = Vesting; + type ValidityOrigin = frame_system::EnsureSignedBy; + type ConfigurationOrigin = frame_system::EnsureSignedBy; + type MaxStatementLength = MaxStatementLength; + type UnlockedProportion = UnlockedProportion; + type MaxUnlocked = MaxUnlocked; + } + + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Vesting = pallet_vesting::Module; + type Purchase = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. It also executes our `setup` function which sets up this pallet for use. + pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| setup()); + ext + } + + fn setup() { + let statement = b"Hello, World".to_vec(); + let unlock_block = 100; + Purchase::set_statement(Origin::signed(configuration_origin()), statement).unwrap(); + Purchase::set_unlock_block(Origin::signed(configuration_origin()), unlock_block).unwrap(); + Purchase::set_payment_account(Origin::signed(configuration_origin()), payment_account()).unwrap(); + Balances::make_free_balance_be(&payment_account(), 100_000); + } + + type AccountPublic = ::Signer; + + /// Helper function to generate a crypto pair from seed + fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() + } + + /// Helper function to generate an account ID from seed + fn get_account_id_from_seed(seed: &str) -> AccountId where + AccountPublic: From<::Public> + { + AccountPublic::from(get_from_seed::(seed)).into_account() + } + + fn alice() -> AccountId { + get_account_id_from_seed::("Alice") + } + + fn alice_ed25519() -> AccountId { + get_account_id_from_seed::("Alice") + } + + fn bob() -> AccountId { + get_account_id_from_seed::("Bob") + } + + fn alice_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + hex_literal::hex!("20e0faffdf4dfe939f2faa560f73b1d01cde8472e2b690b7b40606a374244c3a2e9eb9c8107c10b605138374003af8819bd4387d7c24a66ee9253c2e688ab881") + } + + fn bob_signature() -> [u8; 64] { + // echo -n "Hello, World" | subkey -s sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Bob" + hex_literal::hex!("d6d460187ecf530f3ec2d6e3ac91b9d083c8fbd8f1112d92a82e4d84df552d18d338e6da8944eba6e84afaacf8a9850f54e7b53a84530d649be2e0119c7ce889") + } + + fn alice_signature_ed25519() -> [u8; 64] { + // echo -n "Hello, World" | subkey -e sign "bottom drive obey lake curtain smoke basket hold race lonely fit walk//Alice" + hex_literal::hex!("ee3f5a6cbfc12a8f00c18b811dc921b550ddf272354cda4b9a57b1d06213fcd8509f5af18425d39a279d13622f14806c3e978e2163981f2ec1c06e9628460b0e") + } + + fn validity_origin() -> AccountId { + ValidityOrigin::get() + } + + fn configuration_origin() -> AccountId { + ConfigurationOrigin::get() + } + + fn payment_account() -> AccountId { + [42u8; 32].into() + } + + #[test] + fn set_statement_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let statement = b"Test Set Statement".to_vec(); + // Invalid origin + assert_noop!( + Purchase::set_statement(Origin::signed(alice()), statement.clone()), + BadOrigin, + ); + // Too Long + let long_statement = [0u8; 10_000].to_vec(); + assert_noop!( + Purchase::set_statement(Origin::signed(configuration_origin()), long_statement), + Error::::InvalidStatement, + ); + // Just right... + assert_ok!(Purchase::set_statement(Origin::signed(configuration_origin()), statement.clone())); + assert_eq!(Statement::get(), statement); + }); + } + + #[test] + fn set_unlock_block_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let unlock_block = 69; + // Invalid origin + assert_noop!( + Purchase::set_unlock_block(Origin::signed(alice()), unlock_block), + BadOrigin, + ); + // Block Number in Past + let bad_unlock_block = 50; + System::set_block_number(bad_unlock_block); + assert_noop!( + Purchase::set_unlock_block(Origin::signed(configuration_origin()), bad_unlock_block), + Error::::InvalidUnlockBlock, + ); + // Just right... + assert_ok!(Purchase::set_unlock_block(Origin::signed(configuration_origin()), unlock_block)); + assert_eq!(UnlockBlock::::get(), unlock_block); + }); + } + + #[test] + fn set_payment_account_works_and_handles_basic_errors() { + new_test_ext().execute_with(|| { + let payment_account: AccountId = [69u8; 32].into(); + // Invalid Origin + assert_noop!( + Purchase::set_payment_account(Origin::signed(alice()), payment_account.clone()), + BadOrigin, + ); + // Just right... + assert_ok!(Purchase::set_payment_account(Origin::signed(configuration_origin()), payment_account.clone())); + assert_eq!(PaymentAccount::::get(), payment_account); + }); + } + + #[test] + fn signature_verification_works() { + new_test_ext().execute_with(|| { + assert_ok!(Purchase::verify_signature(&alice(), &alice_signature())); + assert_ok!(Purchase::verify_signature(&alice_ed25519(), &alice_signature_ed25519())); + assert_ok!(Purchase::verify_signature(&bob(), &bob_signature())); + + // Mixing and matching fails + assert_noop!(Purchase::verify_signature(&alice(), &bob_signature()), Error::::InvalidSignature); + assert_noop!(Purchase::verify_signature(&bob(), &alice_signature()), Error::::InvalidSignature); + }); + } + + #[test] + fn account_creation_works() { + new_test_ext().execute_with(|| { + assert!(!Accounts::::contains_key(alice())); + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Initiated, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn account_creation_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!( + Purchase::create_account(Origin::signed(alice()), alice(), alice_signature().to_vec()), + BadOrigin, + ); + + // Wrong Account/Signature + assert_noop!( + Purchase::create_account(Origin::signed(validity_origin()), alice(), bob_signature().to_vec()), + Error::::InvalidSignature, + ); + + // Account with vesting + assert_ok!(::VestingSchedule::add_vesting_schedule( + &alice(), + 100, + 1, + 50 + )); + assert_noop!( + Purchase::create_account(Origin::signed(validity_origin()), alice(), alice_signature().to_vec()), + Error::::VestingScheduleExists, + ); + + // Duplicate Purchasing Account + assert_ok!( + Purchase::create_account(Origin::signed(validity_origin()), bob(), bob_signature().to_vec()) + ); + assert_noop!( + Purchase::create_account(Origin::signed(validity_origin()), bob(), bob_signature().to_vec()), + Error::::ExistingAccount, + ); + }); + } + + #[test] + fn update_validity_status_works() { + new_test_ext().execute_with(|| { + // Alice account is created. + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + // She submits KYC, and we update the status to `Pending`. + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + )); + // KYC comes back negative, so we mark the account invalid. + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::Invalid, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Invalid, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + // She fixes it, we mark her account valid. + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: Zero::zero(), + locked_balance: Zero::zero(), + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn update_validity_status_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!(Purchase::update_validity_status( + Origin::signed(alice()), + alice(), + AccountValidity::Pending, + ), BadOrigin); + // Inactive Account + assert_noop!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), Error::::InvalidAccount); + // Already Completed + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec(), + )); + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::Completed, + )); + assert_noop!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::Pending, + ), Error::::AlreadyCompleted); + }); + } + + #[test] + fn update_balance_works() { + new_test_ext().execute_with(|| { + // Alice account is created + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec()) + ); + // And approved for basic contribution + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // We set a balance on the user based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::from_rational_approximation(77u32, 1000u32), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::from_parts(77000), + } + ); + // We can update the balance based on new information. + assert_ok!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + 25, + 50, + Permill::zero(), + )); + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::ValidLow, + free_balance: 25, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + }); + } + + #[test] + fn update_balance_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!(Purchase::update_balance( + Origin::signed(alice()), + alice(), + 50, + 50, + Permill::zero(), + ), BadOrigin); + // Inactive Account + assert_noop!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + ), Error::::InvalidAccount); + // Overflow + assert_noop!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + u64::max_value(), + u64::max_value(), + Permill::zero(), + ), Error::::InvalidAccount); + }); + } + + #[test] + fn payout_works() { + new_test_ext().execute_with(|| { + // Alice and Bob accounts are created + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec()) + ); + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + bob(), + bob_signature().to_vec()) + ); + // Alice is approved for basic contribution + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::ValidLow, + )); + // Bob is approved for high contribution + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + bob(), + AccountValidity::ValidHigh, + )); + // We set a balance on the users based on the payment they made. 50 locked, 50 free. + assert_ok!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + 50, + 50, + Permill::zero(), + )); + assert_ok!(Purchase::update_balance( + Origin::signed(validity_origin()), + bob(), + 100, + 150, + Permill::zero(), + )); + // Now we call payout for Alice and Bob. + assert_ok!(Purchase::payout( + Origin::signed(payment_account()), + alice(), + )); + assert_ok!(Purchase::payout( + Origin::signed(payment_account()), + bob(), + )); + // Payment is made. + assert_eq!(::Currency::free_balance(&payment_account()), 99_650); + assert_eq!(::Currency::free_balance(&alice()), 100); + // 10% of the 50 units is unlocked automatically for Alice + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::Currency::free_balance(&bob()), 250); + // A max of 10 units is unlocked automatically for Bob + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + // Status is completed. + assert_eq!( + Accounts::::get(alice()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 50, + locked_balance: 50, + signature: alice_signature().to_vec(), + vat: Permill::zero(), + } + ); + assert_eq!( + Accounts::::get(bob()), + AccountStatus { + validity: AccountValidity::Completed, + free_balance: 100, + locked_balance: 150, + signature: bob_signature().to_vec(), + vat: Permill::zero(), + } + ); + // Vesting lock is removed in whole on block 101 (100 blocks after block 1) + System::set_block_number(100); + let vest_call = Call::Vesting(pallet_vesting::Call::::vest()); + assert_ok!(vest_call.clone().dispatch(Origin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(Origin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + System::set_block_number(101); + assert_ok!(vest_call.clone().dispatch(Origin::signed(alice()))); + assert_ok!(vest_call.clone().dispatch(Origin::signed(bob()))); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); + }); + } + + #[test] + fn payout_handles_basic_errors() { + new_test_ext().execute_with(|| { + // Wrong Origin + assert_noop!(Purchase::payout( + Origin::signed(alice()), + alice(), + ), BadOrigin); + // Account with Existing Vesting Schedule + assert_ok!(::VestingSchedule::add_vesting_schedule( + &bob(), 100, 1, 50, + )); + assert_noop!(Purchase::payout( + Origin::signed(payment_account()), + bob(), + ), Error::::VestingScheduleExists); + // Invalid Account (never created) + assert_noop!(Purchase::payout( + Origin::signed(payment_account()), + alice(), + ), Error::::InvalidAccount); + // Invalid Account (created, but not valid) + assert_ok!(Purchase::create_account( + Origin::signed(validity_origin()), + alice(), + alice_signature().to_vec()) + ); + assert_noop!(Purchase::payout( + Origin::signed(payment_account()), + alice(), + ), Error::::InvalidAccount); + // Not enough funds in payment account + assert_ok!(Purchase::update_validity_status( + Origin::signed(validity_origin()), + alice(), + AccountValidity::ValidHigh, + )); + assert_ok!(Purchase::update_balance( + Origin::signed(validity_origin()), + alice(), + 100_000, + 100_000, + Permill::zero(), + )); + assert_noop!(Purchase::payout( + Origin::signed(payment_account()), + alice(), + ), BalancesError::::InsufficientBalance); + }); + } +} diff --git a/runtime/common/src/registrar.rs b/runtime/common/src/registrar.rs deleted file mode 100644 index b5fbddb49eea697b612092ab0347e6e2a7b9e0f6..0000000000000000000000000000000000000000 --- a/runtime/common/src/registrar.rs +++ /dev/null @@ -1,1633 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Module to handle which parachains/parathreads (collectively referred to as "paras") are -//! registered and which are scheduled. Doesn't manage any of the actual execution/validation logic -//! which is left to `parachains.rs`. - -use sp_std::{prelude::*, result}; -#[cfg(any(feature = "std", test))] -use sp_std::marker::PhantomData; -use codec::{Encode, Decode}; - -use sp_runtime::{ - transaction_validity::{TransactionValidityError, ValidTransaction, TransactionValidity}, - traits::{Hash as HashT, SignedExtension, DispatchInfoOf}, -}; - -use frame_support::{ - decl_storage, decl_module, decl_event, decl_error, ensure, - dispatch::{DispatchResult, IsSubType}, traits::{Get, Currency, ReservableCurrency}, - weights::{DispatchClass, Weight}, -}; -use system::{self, ensure_root, ensure_signed}; -use primitives::parachain::{ - Id as ParaId, CollatorId, Scheduling, LOWEST_USER_ID, SwapAux, Info as ParaInfo, ActiveParas, - Retriable, ValidationCode, HeadData, -}; -use crate::parachains; -use sp_runtime::transaction_validity::InvalidTransaction; - -/// Parachain registration API. -pub trait Registrar { - /// Create a new unique parachain identity for later registration. - fn new_id() -> ParaId; - - /// Checks whether the given initial head data size falls within the limit. - fn head_data_size_allowed(head_data_size: u32) -> bool; - - /// Checks whether the given validation code falls within the limit. - fn code_size_allowed(code_size: u32) -> bool; - - /// Fetches metadata for a para by ID, if any. - fn para_info(id: ParaId) -> Option; - - /// Register a parachain with given `code` and `initial_head_data`. `id` must not yet be registered or it will - /// result in a error. - /// - /// This does not enforce any code size or initial head data limits, as these - /// are governable and parameters for parachain initialization are often - /// determined long ahead-of-time. Not checking these values ensures that changes to limits - /// do not invalidate in-progress auction winners. - fn register_para( - id: ParaId, - info: ParaInfo, - code: ValidationCode, - initial_head_data: HeadData, - ) -> DispatchResult; - - /// Deregister a parachain with given `id`. If `id` is not currently registered, an error is returned. - fn deregister_para(id: ParaId) -> DispatchResult; -} - -impl Registrar for Module { - fn new_id() -> ParaId { - ::mutate(|n| { let r = *n; *n = ParaId::from(u32::from(*n) + 1); r }) - } - - fn head_data_size_allowed(head_data_size: u32) -> bool { - head_data_size <= ::MaxHeadDataSize::get() - } - - fn code_size_allowed(code_size: u32) -> bool { - code_size <= ::MaxCodeSize::get() - } - - fn para_info(id: ParaId) -> Option { - Self::paras(&id) - } - - fn register_para( - id: ParaId, - info: ParaInfo, - code: ValidationCode, - initial_head_data: HeadData, - ) -> DispatchResult { - ensure!(!Paras::contains_key(id), Error::::ParaAlreadyExists); - if let Scheduling::Always = info.scheduling { - Parachains::mutate(|parachains| - match parachains.binary_search(&id) { - Ok(_) => Err(Error::::ParaAlreadyExists), - Err(idx) => { - parachains.insert(idx, id); - Ok(()) - } - } - )?; - } - >::initialize_para(id, code, initial_head_data); - Paras::insert(id, info); - Ok(()) - } - - fn deregister_para(id: ParaId) -> DispatchResult { - let info = Paras::take(id).ok_or(Error::::InvalidChainId)?; - if let Scheduling::Always = info.scheduling { - Parachains::mutate(|parachains| - parachains.binary_search(&id) - .map(|index| parachains.remove(index)) - .map_err(|_| Error::::InvalidChainId) - )?; - } - >::cleanup_para(id); - Paras::remove(id); - Ok(()) - } -} - -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -pub trait Trait: parachains::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; - - /// The aggregated origin type must support the parachains origin. We require that we can - /// infallibly convert between this origin and the system origin, but in reality, they're the - /// same type, we just can't express that to the Rust type system without writing a `where` - /// clause everywhere. - type Origin: From<::Origin> - + Into::Origin>>; - - /// The system's currency for parathread payment. - type Currency: ReservableCurrency; - - /// The deposit to be paid to run a parathread. - type ParathreadDeposit: Get>; - - /// Handler for when two ParaIds are swapped. - type SwapAux: SwapAux; - - /// The number of items in the parathread queue, aka the number of blocks in advance to schedule - /// parachain execution. - type QueueSize: Get; - - /// The number of rotations that you will have as grace if you miss a block. - type MaxRetries: Get; -} - -decl_storage! { - trait Store for Module as Registrar { - // Vector of all parachain IDs, in ascending order. - Parachains: Vec; - - /// The number of threads to schedule per block. - ThreadCount: u32; - - /// An array of the queue of set of threads scheduled for the coming blocks; ordered by - /// ascending para ID. There can be no duplicates of para ID in each list item. - SelectedThreads: Vec>; - - /// Parathreads/chains scheduled for execution this block. If the collator ID is set, then - /// a particular collator has already been chosen for the next block, and no other collator - /// may provide the block. In this case we allow the possibility of the combination being - /// retried in a later block, expressed by `Retriable`. - /// - /// Ordered by ParaId. - Active: Vec<(ParaId, Option<(CollatorId, Retriable)>)>; - - /// The next unused ParaId value. Start this high in order to keep low numbers for - /// system-level chains. - NextFreeId: ParaId = LOWEST_USER_ID; - - /// Pending swap operations. - PendingSwap: map hasher(twox_64_concat) ParaId => Option; - - /// Map of all registered parathreads/chains. - Paras get(fn paras): map hasher(twox_64_concat) ParaId => Option; - - /// The current queue for parathreads that should be retried. - RetryQueue get(fn retry_queue): Vec>; - - /// Users who have paid a parathread's deposit - Debtors: map hasher(twox_64_concat) ParaId => T::AccountId; - } - add_extra_genesis { - config(parachains): Vec<(ParaId, ValidationCode, HeadData)>; - config(_phdata): PhantomData; - build(build::); - } -} - -#[cfg(feature = "std")] -fn build(config: &GenesisConfig) { - let mut p = config.parachains.clone(); - p.sort_unstable_by_key(|&(ref id, _, _)| *id); - p.dedup_by_key(|&mut (ref id, _, _)| *id); - - let only_ids: Vec = p.iter().map(|&(ref id, _, _)| id).cloned().collect(); - - Parachains::put(&only_ids); - - for (id, code, genesis) in p { - Paras::insert(id, &primitives::parachain::PARACHAIN_INFO); - // no ingress -- a chain cannot be routed to until it is live. - ::insert(&id, &code); - ::insert(&id, &genesis); - // Save initial parachains in registrar - Paras::insert(id, ParaInfo { scheduling: Scheduling::Always }) - } -} - -/// Swap the existence of two items, provided by value, within an ordered list. -/// -/// If neither item exists, or if both items exist this will do nothing. If exactly one of the -/// items exists, then it will be removed and the other inserted. -pub fn swap_ordered_existence(ids: &mut [T], one: T, other: T) { - let maybe_one_pos = ids.binary_search(&one); - let maybe_other_pos = ids.binary_search(&other); - match (maybe_one_pos, maybe_other_pos) { - (Ok(one_pos), Err(_)) => ids[one_pos] = other, - (Err(_), Ok(other_pos)) => ids[other_pos] = one, - _ => return, - }; - ids.sort(); -} - -decl_error! { - pub enum Error for Module { - /// Parachain already exists. - ParaAlreadyExists, - /// Invalid parachain ID. - InvalidChainId, - /// Invalid parathread ID. - InvalidThreadId, - /// Invalid para code size. - CodeTooLarge, - /// Invalid para head data size. - HeadDataTooLarge, - } -} - -decl_module! { - /// Parachains module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Register a parachain with given code. Must be called by root. - /// Fails if given ID is already used. - /// - /// Unlike the `Registrar` trait function of the same name, this - /// checks the code and head data against size limits. - #[weight = (5_000_000_000, DispatchClass::Operational)] - pub fn register_para(origin, - #[compact] id: ParaId, - info: ParaInfo, - code: ValidationCode, - initial_head_data: HeadData, - ) -> DispatchResult { - ensure_root(origin)?; - - ensure!( - >::code_size_allowed(code.0.len() as _), - Error::::CodeTooLarge, - ); - - ensure!( - >::head_data_size_allowed( - initial_head_data.0.len() as _ - ), - Error::::HeadDataTooLarge, - ); - >:: - register_para(id, info, code, initial_head_data) - } - - /// Deregister a parachain with given id - #[weight = (10_000_000, DispatchClass::Operational)] - pub fn deregister_para(origin, #[compact] id: ParaId) -> DispatchResult { - ensure_root(origin)?; - >::deregister_para(id) - } - - /// Reset the number of parathreads that can pay to be scheduled in a single block. - /// - /// - `count`: The number of parathreads. - /// - /// Must be called from Root origin. - #[weight = 0] - fn set_thread_count(origin, count: u32) { - ensure_root(origin)?; - ThreadCount::put(count); - } - - /// Register a parathread for immediate use. - /// - /// Must be sent from a Signed origin that is able to have ParathreadDeposit reserved. - /// `code` and `initial_head_data` are used to initialize the parathread's state. - /// - /// Unlike `register_para`, this function does check that the maximum code size - /// and head data size are respected, as parathread registration is an atomic - /// action. - #[weight = 0] - fn register_parathread(origin, - code: ValidationCode, - initial_head_data: HeadData, - ) { - let who = ensure_signed(origin)?; - - ::Currency::reserve(&who, T::ParathreadDeposit::get())?; - - let info = ParaInfo { - scheduling: Scheduling::Dynamic, - }; - - ensure!( - >::code_size_allowed(code.0.len() as _), - Error::::CodeTooLarge, - ); - - ensure!( - >::head_data_size_allowed( - initial_head_data.0.len() as _ - ), - Error::::HeadDataTooLarge, - ); - - let id = >::new_id(); - - let _ = >:: - register_para(id, info, code, initial_head_data); - - >::insert(id, who); - - Self::deposit_event(Event::ParathreadRegistered(id)); - } - - /// Place a bid for a parathread to be progressed in the next block. - /// - /// This is a kind of special transaction that should be heavily prioritized in the - /// transaction pool according to the `value`; only `ThreadCount` of them may be presented - /// in any single block. - #[weight = 0] - fn select_parathread(origin, - #[compact] _id: ParaId, - _collator: CollatorId, - _head_hash: T::Hash, - ) { - ensure_signed(origin)?; - // Everything else is checked for in the transaction `SignedExtension`. - } - - /// Deregister a parathread and retrieve the deposit. - /// - /// Must be sent from a `Parachain` origin which is currently a parathread. - /// - /// Ensure that before calling this that any funds you want emptied from the parathread's - /// account is moved out; after this it will be impossible to retrieve them (without - /// governance intervention). - #[weight = 0] - fn deregister_parathread(origin) { - let id = parachains::ensure_parachain(::Origin::from(origin))?; - - let info = Paras::get(id).ok_or(Error::::InvalidChainId)?; - if let Scheduling::Dynamic = info.scheduling {} else { Err(Error::::InvalidThreadId)? } - - >::deregister_para(id)?; - Self::force_unschedule(|i| i == id); - - let debtor = >::take(id); - let _ = ::Currency::unreserve(&debtor, T::ParathreadDeposit::get()); - - Self::deposit_event(Event::ParathreadRegistered(id)); - } - - /// Swap a parachain with another parachain or parathread. The origin must be a `Parachain`. - /// The swap will happen only if there is already an opposite swap pending. If there is not, - /// the swap will be stored in the pending swaps map, ready for a later confirmatory swap. - /// - /// The `ParaId`s remain mapped to the same head data and code so external code can rely on - /// `ParaId` to be a long-term identifier of a notional "parachain". However, their - /// scheduling info (i.e. whether they're a parathread or parachain), auction information - /// and the auction deposit are switched. - #[weight = 0] - fn swap(origin, #[compact] other: ParaId) { - let id = parachains::ensure_parachain(::Origin::from(origin))?; - - if PendingSwap::get(other) == Some(id) { - // actually do the swap. - T::SwapAux::ensure_can_swap(id, other)?; - - // Remove intention to swap. - PendingSwap::remove(other); - Self::force_unschedule(|i| i == id || i == other); - Parachains::mutate(|ids| swap_ordered_existence(ids, id, other)); - Paras::mutate(id, |i| - Paras::mutate(other, |j| - sp_std::mem::swap(i, j) - ) - ); - - >::mutate(id, |i| - >::mutate(other, |j| - sp_std::mem::swap(i, j) - ) - ); - let _ = T::SwapAux::on_swap(id, other); - } else { - PendingSwap::insert(id, other); - } - } - - /// Block initializer. Clears SelectedThreads and constructs/replaces Active. - fn on_initialize() -> Weight { - let next_up = SelectedThreads::mutate(|t| { - let r = if t.len() >= T::QueueSize::get() { - // Take the first set of parathreads in queue - t.remove(0) - } else { - vec![] - }; - while t.len() < T::QueueSize::get() { - t.push(vec![]); - } - r - }); - // mutable so that we can replace with `None` if parathread appears in new schedule. - let mut retrying = Self::take_next_retry(); - if let Some(((para, _), _)) = retrying { - // this isn't really ideal: better would be if there were an earlier pass that set - // retrying to the first item in the Missed queue that isn't already scheduled, but - // this is potentially O(m*n) in terms of missed queue size and parathread pool size. - if next_up.iter().any(|x| x.0 == para) { - retrying = None - } - } - - let mut paras = Parachains::get().into_iter() - .map(|id| (id, None)) - .chain(next_up.into_iter() - .map(|(para, collator)| - (para, Some((collator, Retriable::WithRetries(0)))) - ) - ).chain(retrying.into_iter() - .map(|((para, collator), retries)| - (para, Some((collator, Retriable::WithRetries(retries + 1)))) - ) - ).collect::>(); - // for Rust's timsort algorithm, sorting a concatenation of two sorted ranges is near - // O(N). - paras.sort_by_key(|&(ref id, _)| *id); - - Active::put(paras); - - 0 - } - - fn on_finalize() { - // a block without this will panic, but let's not panic here. - if let Some(proceeded_vec) = parachains::DidUpdate::get() { - // Active is sorted and DidUpdate is a sorted subset of its elements. - // - // We just go through the contents of active and find any items that don't appear in - // DidUpdate *and* which are enabled for retry. - let mut proceeded = proceeded_vec.into_iter(); - let mut i = proceeded.next(); - for sched in Active::get().into_iter() { - match i { - // Scheduled parachain proceeded properly. Move onto next item. - Some(para) if para == sched.0 => i = proceeded.next(), - // Scheduled `sched` missed their block. - // Queue for retry if it's allowed. - _ => if let (i, Some((c, Retriable::WithRetries(n)))) = sched { - Self::retry_later((i, c), n) - }, - } - } - } - } - } -} - -decl_event!{ - pub enum Event { - /// A parathread was registered; its new ID is supplied. - ParathreadRegistered(ParaId), - - /// The parathread of the supplied ID was de-registered. - ParathreadDeregistered(ParaId), - } -} - -impl Module { - /// Ensures that the given `ParaId` corresponds to a registered parathread, and returns a descriptor if so. - pub fn ensure_thread_id(id: ParaId) -> Option { - Paras::get(id).and_then(|info| if let Scheduling::Dynamic = info.scheduling { - Some(info) - } else { - None - }) - } - - fn retry_later(sched: (ParaId, CollatorId), retries: u32) { - if retries < T::MaxRetries::get() { - RetryQueue::mutate(|q| { - q.resize(T::MaxRetries::get() as usize, vec![]); - q[retries as usize].push(sched); - }); - } - } - - fn take_next_retry() -> Option<((ParaId, CollatorId), u32)> { - RetryQueue::mutate(|q| { - for (i, q) in q.iter_mut().enumerate() { - if !q.is_empty() { - return Some((q.remove(0), i as u32)); - } - } - None - }) - } - - /// Forcibly remove the threads matching `m` from all current and future scheduling. - fn force_unschedule(m: impl Fn(ParaId) -> bool) { - RetryQueue::mutate(|qs| for q in qs.iter_mut() { - q.retain(|i| !m(i.0)) - }); - SelectedThreads::mutate(|qs| for q in qs.iter_mut() { - q.retain(|i| !m(i.0)) - }); - Active::mutate(|a| for i in a.iter_mut() { - if m(i.0) { - if let Some((_, ref mut r)) = i.1 { - *r = Retriable::Never; - } - } - }); - } -} - -impl ActiveParas for Module { - fn active_paras() -> Vec<(ParaId, Option<(CollatorId, Retriable)>)> { - Active::get() - } -} - -/// Ensure that parathread selections happen prioritized by fees. -#[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct LimitParathreadCommits(sp_std::marker::PhantomData) where - ::Call: IsSubType, T>; - -impl LimitParathreadCommits where - ::Call: IsSubType, T> -{ - /// Create a new `LimitParathreadCommits` struct. - pub fn new() -> Self { - LimitParathreadCommits(sp_std::marker::PhantomData) - } -} - -impl sp_std::fmt::Debug for LimitParathreadCommits where - ::Call: IsSubType, T> -{ - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "LimitParathreadCommits") - } -} - -/// Custom validity errors used in Polkadot while validating transactions. -#[repr(u8)] -pub enum ValidityError { - /// Parathread ID has already been submitted for this block. - Duplicate = 0, - /// Parathread ID does not identify a parathread. - InvalidId = 1, -} - -impl SignedExtension for LimitParathreadCommits where - ::Call: IsSubType, T> -{ - const IDENTIFIER: &'static str = "LimitParathreadCommits"; - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) - -> sp_std::result::Result - { - Ok(()) - } - - fn validate( - &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let mut r = ValidTransaction::default(); - if let Some(local_call) = call.is_sub_type() { - if let Call::select_parathread(id, collator, hash) = local_call { - // ensure that the para ID is actually a parathread. - let e = TransactionValidityError::from(InvalidTransaction::Custom(ValidityError::InvalidId as u8)); - >::ensure_thread_id(*id).ok_or(e)?; - - // ensure that we haven't already had a full complement of selected parathreads. - let mut upcoming_selected_threads = SelectedThreads::get(); - if upcoming_selected_threads.is_empty() { - upcoming_selected_threads.push(vec![]); - } - let i = upcoming_selected_threads.len() - 1; - let selected_threads = &mut upcoming_selected_threads[i]; - let thread_count = ThreadCount::get() as usize; - ensure!( - selected_threads.len() < thread_count, - InvalidTransaction::ExhaustsResources, - ); - - // ensure that this is not selecting a duplicate parathread ID - let e = TransactionValidityError::from(InvalidTransaction::Custom(ValidityError::Duplicate as u8)); - let pos = selected_threads - .binary_search_by(|&(ref other_id, _)| other_id.cmp(id)) - .err() - .ok_or(e)?; - - // ensure that this is a live bid (i.e. that the thread's chain head matches) - let e = TransactionValidityError::from(InvalidTransaction::Custom(ValidityError::InvalidId as u8)); - let head = >::parachain_head(id).ok_or(e)?; - let actual = T::Hashing::hash(&head.0); - ensure!(&actual == hash, InvalidTransaction::Stale); - - // updated the selected threads. - selected_threads.insert(pos, (*id, collator.clone())); - sp_std::mem::drop(selected_threads); - SelectedThreads::put(upcoming_selected_threads); - - // provides the state-transition for this head-data-hash; this should cue the pool - // to throw out competing transactions with lesser fees. - r.provides = vec![hash.encode()]; - } - } - Ok(r) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bitvec::vec::BitVec; - use sp_io::TestExternalities; - use sp_core::{H256, Pair}; - use sp_runtime::{ - traits::{ - BlakeTwo256, IdentityLookup, Dispatchable, - AccountIdConversion, Extrinsic as ExtrinsicT, - }, testing::{UintAuthorityId, TestXt}, KeyTypeId, Perbill, curve::PiecewiseLinear, - }; - use primitives::{ - parachain::{ - ValidatorId, Info as ParaInfo, Scheduling, LOWEST_USER_ID, AttestedCandidate, - CandidateReceipt, HeadData, ValidityAttestation, CompactStatement as Statement, Chain, - CollatorPair, CandidateCommitments, - }, - Balance, BlockNumber, Header, Signature, - }; - use frame_support::{ - traits::{KeyOwnerProofSystem, OnInitialize, OnFinalize}, - impl_outer_origin, impl_outer_dispatch, assert_ok, parameter_types, assert_noop, - weights::DispatchInfo, - }; - use keyring::Sr25519Keyring; - - use crate::parachains; - use crate::slots; - use crate::attestations; - - impl_outer_origin! { - pub enum Origin for Test { - parachains, - } - } - - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - parachains::Parachains, - registrar::Registrar, - staking::Staking, - } - } - - pallet_staking_reward_curve::build! { - const REWARD_CURVE: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - } - impl system::Trait for Test { - type BaseCallFilter = (); - type Origin = Origin; - type Call = Call; - type Index = u64; - type BlockNumber = BlockNumber; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = Balances; - } - - impl system::offchain::SendTransactionTypes for Test where - Call: From, - { - type OverarchingCall = Call; - type Extrinsic = TestXt; - } - - parameter_types! { - pub const ExistentialDeposit: Balance = 1; - } - - impl balances::Trait for Test { - type Balance = u128; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - - parameter_types!{ - pub const LeasePeriod: BlockNumber = 10; - pub const EndingPeriod: BlockNumber = 3; - } - - impl slots::Trait for Test { - type Event = (); - type Currency = balances::Module; - type Parachains = Registrar; - type EndingPeriod = EndingPeriod; - type LeasePeriod = LeasePeriod; - type Randomness = RandomnessCollectiveFlip; - } - - parameter_types!{ - pub const SlashDeferDuration: staking::EraIndex = 7; - pub const AttestationPeriod: BlockNumber = 100; - pub const MinimumPeriod: u64 = 3; - pub const SessionsPerEra: sp_staking::SessionIndex = 6; - pub const BondingDuration: staking::EraIndex = 28; - pub const MaxNominatorRewardedPerValidator: u32 = 64; - } - - impl attestations::Trait for Test { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = parachains::ValidatorIdentities; - type RewardAttestation = (); - } - - parameter_types! { - pub const Period: BlockNumber = 1; - pub const Offset: BlockNumber = 0; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); - pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - } - - impl session::Trait for Test { - type SessionManager = (); - type Keys = UintAuthorityId; - type ShouldEndSession = session::PeriodicSessions; - type NextSessionRotation = session::PeriodicSessions; - type SessionHandler = session::TestSessionHandler; - type Event = (); - type ValidatorId = u64; - type ValidatorIdOf = (); - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - } - - parameter_types! { - pub const MaxHeadDataSize: u32 = 100; - pub const MaxCodeSize: u32 = 100; - - pub const ValidationUpgradeFrequency: BlockNumber = 10; - pub const ValidationUpgradeDelay: BlockNumber = 2; - pub const SlashPeriod: BlockNumber = 50; - pub const ElectionLookahead: BlockNumber = 0; - pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; - } - - impl staking::Trait for Test { - type RewardRemainder = (); - type CurrencyToVote = (); - type Event = (); - type Currency = balances::Module; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = system::EnsureRoot; - type SessionInterface = Self; - type UnixTime = timestamp::Module; - type RewardCurve = RewardCurve; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type UnsignedPriority = StakingUnsignedPriority; - type MaxIterations = (); - type MinSolutionScoreBump = (); - } - - impl timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - } - - impl session::historical::Trait for Test { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; - } - - // This is needed for a custom `AccountId` type which is `u64` in testing here. - pub mod test_keys { - use sp_core::{crypto::KeyTypeId, sr25519}; - use primitives::Signature; - - pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); - - mod app { - use super::super::Parachains; - use sp_application_crypto::{app_crypto, sr25519}; - - app_crypto!(sr25519, super::KEY_TYPE); - - impl sp_runtime::traits::IdentifyAccount for Public { - type AccountId = u64; - - fn into_account(self) -> Self::AccountId { - let id = self.0.clone().into(); - Parachains::authorities().iter().position(|b| *b == id).unwrap() as u64 - } - } - } - - pub type ReporterId = app::Public; - pub struct ReporterAuthorityId; - impl system::offchain::AppCrypto for ReporterAuthorityId { - type RuntimeAppPublic = ReporterId; - type GenericSignature = sr25519::Signature; - type GenericPublic = sr25519::Public; - } - } - - impl parachains::Trait for Test { - type AuthorityId = test_keys::ReporterAuthorityId; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = balances::Module; - type BlockNumberConversion = sp_runtime::traits::Identity; - type ActiveParachains = Registrar; - type Registrar = Registrar; - type Randomness = RandomnessCollectiveFlip; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - type Proof = sp_session::MembershipProof; - type KeyOwnerProofSystem = session::historical::Module; - type IdentificationTuple = , - )>>::IdentificationTuple; - type ReportOffence = (); - type BlockHashConversion = sp_runtime::traits::Identity; - } - - type Extrinsic = TestXt; - - impl system::offchain::CreateSignedTransaction for Test where - Call: From, - { - fn create_transaction>( - call: Call, - _public: test_keys::ReporterId, - _account: ::AccountId, - nonce: ::Index, - ) -> Option<(Call, ::SignaturePayload)> { - Some((call, (nonce, ()))) - } - } - - impl system::offchain::SigningTypes for Test { - type Public = test_keys::ReporterId; - type Signature = Signature; - } - - parameter_types! { - pub const ParathreadDeposit: Balance = 10; - pub const QueueSize: usize = 2; - pub const MaxRetries: u32 = 3; - } - - impl Trait for Test { - type Event = (); - type Origin = Origin; - type Currency = balances::Module; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = slots::Module; - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; - } - - type Balances = balances::Module; - type Parachains = parachains::Module; - type System = system::Module; - type Slots = slots::Module; - type Registrar = Module; - type RandomnessCollectiveFlip = randomness_collective_flip::Module; - type Session = session::Module; - type Staking = staking::Module; - - const AUTHORITY_KEYS: [Sr25519Keyring; 8] = [ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Eve, - Sr25519Keyring::Ferdie, - Sr25519Keyring::One, - Sr25519Keyring::Two, - ]; - - fn new_test_ext(parachains: Vec<(ParaId, ValidationCode, HeadData)>) -> TestExternalities { - let mut t = system::GenesisConfig::default().build_storage::().unwrap(); - - let authority_keys = [ - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - Sr25519Keyring::Eve, - Sr25519Keyring::Ferdie, - Sr25519Keyring::One, - Sr25519Keyring::Two, - ]; - - // stashes are the index. - let session_keys: Vec<_> = authority_keys.iter().enumerate() - .map(|(i, _k)| (i as u64, i as u64, UintAuthorityId(i as u64))) - .collect(); - - let authorities: Vec<_> = authority_keys.iter().map(|k| ValidatorId::from(k.public())).collect(); - - let balances: Vec<_> = (0..authority_keys.len()).map(|i| (i as u64, 10_000_000)).collect(); - - parachains::GenesisConfig { - authorities: authorities.clone(), - }.assimilate_storage::(&mut t).unwrap(); - - GenesisConfig:: { - parachains, - _phdata: Default::default(), - }.assimilate_storage(&mut t).unwrap(); - - session::GenesisConfig:: { - keys: session_keys, - }.assimilate_storage(&mut t).unwrap(); - - balances::GenesisConfig:: { - balances, - }.assimilate_storage(&mut t).unwrap(); - - t.into() - } - - fn init_block() { - println!("Initializing {}", System::block_number()); - System::on_initialize(System::block_number()); - Registrar::on_initialize(System::block_number()); - Parachains::on_initialize(System::block_number()); - Slots::on_initialize(System::block_number()); - } - - fn run_to_block(n: BlockNumber) { - println!("Running until block {}", n); - while System::block_number() < n { - if System::block_number() > 1 { - println!("Finalizing {}", System::block_number()); - if !parachains::DidUpdate::exists() { - println!("Null heads update"); - assert_ok!(Parachains::set_heads(system::RawOrigin::None.into(), vec![])); - } - Slots::on_finalize(System::block_number()); - Parachains::on_finalize(System::block_number()); - Registrar::on_finalize(System::block_number()); - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - init_block(); - } - } - - fn schedule_thread(id: ParaId, head_data: &[u8], col: &CollatorId) { - let tx: LimitParathreadCommits = LimitParathreadCommits(Default::default()); - let hdh = BlakeTwo256::hash(head_data); - let inner_call = super::Call::select_parathread(id, col.clone(), hdh); - let call = Call::Registrar(inner_call); - let origin = 4u64; - assert!(tx.validate(&origin, &call, &Default::default(), 0).is_ok()); - assert_ok!(call.dispatch(Origin::signed(origin))); - } - - fn user_id(i: u32) -> ParaId { - LOWEST_USER_ID + i - } - - fn attest(id: ParaId, collator: &CollatorPair, head_data: &[u8], block_data: &[u8]) -> AttestedCandidate { - let pov_block_hash = BlakeTwo256::hash(block_data); - let relay_parent = System::parent_hash(); - let candidate = CandidateReceipt { - parachain_index: id, - relay_parent, - head_data: HeadData(head_data.to_vec()), - collator: collator.public(), - signature: pov_block_hash.using_encoded(|d| collator.sign(d)), - pov_block_hash, - global_validation: Parachains::global_validation_schedule(), - local_validation: Parachains::current_local_validation_data(&id).unwrap(), - commitments: CandidateCommitments { - fees: 0, - upward_messages: vec![], - erasure_root: [1; 32].into(), - new_validation_code: None, - }, - }; - let (candidate, _) = candidate.abridge(); - let candidate_hash = candidate.hash(); - let payload = (Statement::Valid(candidate_hash), session::Module::::current_index(), System::parent_hash()).encode(); - let roster = Parachains::calculate_duty_roster().0.validator_duty; - AttestedCandidate { - candidate, - validity_votes: AUTHORITY_KEYS.iter() - .enumerate() - .filter(|(i, _)| roster[*i] == Chain::Parachain(id)) - .map(|(_, k)| k.sign(&payload).into()) - .map(ValidityAttestation::Explicit) - .collect(), - validator_indices: roster.iter() - .map(|i| i == &Chain::Parachain(id)) - .collect::>(), - } - } - - #[test] - fn basic_setup_works() { - new_test_ext(vec![]).execute_with(|| { - assert_eq!(super::Parachains::get(), vec![]); - assert_eq!(ThreadCount::get(), 0); - assert_eq!(Active::get(), vec![]); - assert_eq!(NextFreeId::get(), LOWEST_USER_ID); - assert_eq!(PendingSwap::get(&ParaId::from(0u32)), None); - assert_eq!(Paras::get(&ParaId::from(0u32)), None); - }); - } - - #[test] - fn genesis_registration_works() { - let parachains = vec![ - (5u32.into(), vec![1,2,3].into(), vec![1].into()), - (100u32.into(), vec![4,5,6].into(), vec![2,].into()), - ]; - - new_test_ext(parachains).execute_with(|| { - // Need to trigger on_initialize - run_to_block(2); - // Genesis registration works - assert_eq!(Registrar::active_paras(), vec![(5u32.into(), None), (100u32.into(), None)]); - assert_eq!( - Registrar::paras(&ParaId::from(5u32)), - Some(ParaInfo { scheduling: Scheduling::Always }), - ); - assert_eq!( - Registrar::paras(&ParaId::from(100u32)), - Some(ParaInfo { scheduling: Scheduling::Always }), - ); - assert_eq!(Parachains::parachain_code(&ParaId::from(5u32)), Some(vec![1, 2, 3].into())); - assert_eq!(Parachains::parachain_code(&ParaId::from(100u32)), Some(vec![4, 5, 6].into())); - }); - } - - #[test] - fn swap_chain_and_thread_works() { - new_test_ext(vec![]).execute_with(|| { - assert_ok!(Registrar::set_thread_count(Origin::root(), 1)); - - // Need to trigger on_initialize - run_to_block(2); - - // Register a new parathread - assert_ok!(Registrar::register_parathread( - Origin::signed(1u64), - vec![1; 3].into(), - vec![1; 3].into(), - )); - - // Lease out a new parachain - assert_ok!(Slots::new_auction(Origin::root(), 5, 1)); - assert_ok!(Slots::bid(Origin::signed(1), 0, 1, 1, 4, 1)); - - run_to_block(9); - // Ensure that the thread is scheduled around the swap time. - let col = Sr25519Keyring::One.public().into(); - schedule_thread(user_id(0), &[1; 3], &col); - - run_to_block(10); - let h = BlakeTwo256::hash(&[2u8; 3]); - assert_ok!(Slots::fix_deploy_data(Origin::signed(1), 0, user_id(1), h, 3, vec![2; 3].into())); - assert_ok!(Slots::elaborate_deploy_data(Origin::signed(0), user_id(1), vec![2; 3].into())); - assert_ok!(Slots::set_offboarding(Origin::signed(user_id(1).into_account()), 1)); - - run_to_block(11); - // should be one active parachain and one active parathread. - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries(0)))), - (user_id(1), None), - ]); - - // One half of the swap call does not actually trigger the swap. - assert_ok!(Registrar::swap(parachains::Origin::Parachain(user_id(0)).into(), user_id(1))); - - // Nothing changes from what was originally registered - assert_eq!(Registrar::paras(&user_id(0)), Some(ParaInfo { scheduling: Scheduling::Dynamic })); - assert_eq!(Registrar::paras(&user_id(1)), Some(ParaInfo { scheduling: Scheduling::Always })); - assert_eq!(super::Parachains::get(), vec![user_id(1)]); - assert_eq!(Slots::managed_ids(), vec![user_id(1)]); - assert_eq!(Slots::deposits(user_id(1)), vec![1; 3]); - assert_eq!(Slots::offboarding(user_id(1)), 1); - assert_eq!(Parachains::parachain_code(&user_id(0)), Some(vec![1u8; 3].into())); - assert_eq!(Parachains::parachain_head(&user_id(0)), Some(vec![1u8; 3].into())); - assert_eq!(Parachains::parachain_code(&user_id(1)), Some(vec![2u8; 3].into())); - assert_eq!(Parachains::parachain_head(&user_id(1)), Some(vec![2u8; 3].into())); - // Intention to swap is added - assert_eq!(PendingSwap::get(user_id(0)), Some(user_id(1))); - - // Intention to swap is reciprocated, swap actually happens - assert_ok!(Registrar::swap(parachains::Origin::Parachain(user_id(1)).into(), user_id(0))); - - assert_eq!(Registrar::paras(&user_id(0)), Some(ParaInfo { scheduling: Scheduling::Always })); - assert_eq!(Registrar::paras(&user_id(1)), Some(ParaInfo { scheduling: Scheduling::Dynamic })); - assert_eq!(super::Parachains::get(), vec![user_id(0)]); - assert_eq!(Slots::managed_ids(), vec![user_id(0)]); - assert_eq!(Slots::deposits(user_id(0)), vec![1; 3]); - assert_eq!(Slots::offboarding(user_id(0)), 1); - assert_eq!(Parachains::parachain_code(&user_id(0)), Some(vec![1u8; 3].into())); - assert_eq!(Parachains::parachain_head(&user_id(0)), Some(vec![1u8; 3].into())); - assert_eq!(Parachains::parachain_code(&user_id(1)), Some(vec![2u8; 3].into())); - assert_eq!(Parachains::parachain_head(&user_id(1)), Some(vec![2u8; 3].into())); - - // Intention to swap is no longer present - assert_eq!(PendingSwap::get(user_id(0)), None); - assert_eq!(PendingSwap::get(user_id(1)), None); - - run_to_block(12); - // thread should not be queued or scheduled any more, even though it would otherwise be - // being retried.. - assert_eq!(Registrar::active_paras(), vec![(user_id(0), None)]); - }); - } - - #[test] - fn swap_handles_funds_correctly() { - new_test_ext(vec![]).execute_with(|| { - assert_ok!(Registrar::set_thread_count(Origin::root(), 1)); - - // Need to trigger on_initialize - run_to_block(2); - - let initial_1_balance = Balances::free_balance(1); - let initial_2_balance = Balances::free_balance(2); - - // User 1 register a new parathread - assert_ok!(Registrar::register_parathread( - Origin::signed(1), - vec![1; 3].into(), - vec![1; 3].into(), - )); - - // User 2 leases out a new parachain - assert_ok!(Slots::new_auction(Origin::root(), 5, 1)); - assert_ok!(Slots::bid(Origin::signed(2), 0, 1, 1, 4, 1)); - - run_to_block(9); - - // Swap the parachain and parathread - assert_ok!(Registrar::swap(parachains::Origin::Parachain(user_id(0)).into(), user_id(1))); - assert_ok!(Registrar::swap(parachains::Origin::Parachain(user_id(1)).into(), user_id(0))); - - // Deregister the parathread that was originally a parachain - assert_ok!(Registrar::deregister_parathread(parachains::Origin::Parachain(user_id(1)).into())); - - // Go past when a parachain loses its slot - run_to_block(50); - - // Funds are correctly returned - assert_eq!(Balances::free_balance(1), initial_1_balance); - assert_eq!(Balances::free_balance(2), initial_2_balance); - }); - } - - #[test] - fn register_deregister_chains_works() { - let parachains = vec![ - (1u32.into(), vec![1; 3].into(), vec![1; 3].into()), - ]; - - new_test_ext(parachains).execute_with(|| { - // Need to trigger on_initialize - run_to_block(2); - - // Genesis registration works - assert_eq!(Registrar::active_paras(), vec![(1u32.into(), None)]); - assert_eq!( - Registrar::paras(&ParaId::from(1u32)), - Some(ParaInfo { scheduling: Scheduling::Always }) - ); - assert_eq!(Parachains::parachain_code(&ParaId::from(1u32)), Some(vec![1; 3].into())); - - // Register a new parachain - assert_ok!(Registrar::register_para( - Origin::root(), - 2u32.into(), - ParaInfo { scheduling: Scheduling::Always }, - vec![2; 3].into(), - vec![2; 3].into(), - )); - - let orig_bal = Balances::free_balance(&3u64); - // Register a new parathread - assert_ok!(Registrar::register_parathread( - Origin::signed(3u64), - vec![3; 3].into(), - vec![3; 3].into(), - )); - // deposit should be taken (reserved) - assert_eq!(Balances::free_balance(3u64) + ParathreadDeposit::get(), orig_bal); - assert_eq!(Balances::reserved_balance(3u64), ParathreadDeposit::get()); - - run_to_block(3); - - // New paras are registered - assert_eq!(Registrar::active_paras(), vec![(1u32.into(), None), (2u32.into(), None)]); - assert_eq!( - Registrar::paras(&ParaId::from(2u32)), - Some(ParaInfo { scheduling: Scheduling::Always }) - ); - assert_eq!( - Registrar::paras(&user_id(0)), - Some(ParaInfo { scheduling: Scheduling::Dynamic }) - ); - assert_eq!(Parachains::parachain_code(&ParaId::from(2u32)), Some(vec![2; 3].into())); - assert_eq!(Parachains::parachain_code(&user_id(0)), Some(vec![3; 3].into())); - - assert_ok!(Registrar::deregister_para(Origin::root(), 2u32.into())); - assert_ok!(Registrar::deregister_parathread( - parachains::Origin::Parachain(user_id(0)).into() - )); - // reserved balance should be returned. - assert_eq!(Balances::free_balance(3u64), orig_bal); - assert_eq!(Balances::reserved_balance(3u64), 0); - - run_to_block(4); - - assert_eq!(Registrar::active_paras(), vec![(1u32.into(), None)]); - assert_eq!(Registrar::paras(&ParaId::from(2u32)), None); - assert_eq!(Parachains::parachain_code(&ParaId::from(2u32)), None); - assert_eq!(Registrar::paras(&user_id(0)), None); - assert_eq!(Parachains::parachain_code(&user_id(0)), None); - }); - } - - #[test] - fn parathread_scheduling_works() { - new_test_ext(vec![]).execute_with(|| { - assert_ok!(Registrar::set_thread_count(Origin::root(), 1)); - - run_to_block(2); - - // Register a new parathread - assert_ok!(Registrar::register_parathread( - Origin::signed(3u64), - vec![3; 3].into(), - vec![3; 3].into(), - )); - - run_to_block(3); - - // transaction submitted to get parathread progressed. - let col = Sr25519Keyring::One.public().into(); - schedule_thread(user_id(0), &[3; 3], &col); - - run_to_block(5); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries(0)))) - ]); - assert_ok!(Parachains::set_heads(Origin::none(), vec![ - attest(user_id(0), &Sr25519Keyring::One.pair().into(), &[3; 3], &[0; 0]) - ])); - - run_to_block(6); - // at next block, it shouldn't be retried. - assert_eq!(Registrar::active_paras(), vec![]); - }); - } - - #[test] - fn removing_scheduled_parathread_works() { - new_test_ext(vec![]).execute_with(|| { - assert_ok!(Registrar::set_thread_count(Origin::root(), 1)); - - run_to_block(2); - - // Register some parathreads. - assert_ok!(Registrar::register_parathread(Origin::signed(3), vec![3; 3].into(), vec![3; 3].into())); - - run_to_block(3); - // transaction submitted to get parathread progressed. - let col = Sr25519Keyring::One.public().into(); - schedule_thread(user_id(0), &[3; 3], &col); - - // now we remove the parathread - assert_ok!(Registrar::deregister_parathread( - parachains::Origin::Parachain(user_id(0)).into() - )); - - run_to_block(5); - assert_eq!(Registrar::active_paras(), vec![]); // should not be scheduled. - - assert_ok!(Registrar::register_parathread(Origin::signed(3), vec![4; 3].into(), vec![4; 3].into())); - - run_to_block(6); - // transaction submitted to get parathread progressed. - schedule_thread(user_id(1), &[4; 3], &col); - - run_to_block(9); - // thread's slot was missed and is now being re-scheduled. - - assert_ok!(Registrar::deregister_parathread( - parachains::Origin::Parachain(user_id(1)).into() - )); - - run_to_block(10); - // thread's rescheduled slot was missed, but should not be reschedule since it was - // removed. - assert_eq!(Registrar::active_paras(), vec![]); // should not be scheduled. - }); - } - - #[test] - fn parathread_rescheduling_works() { - new_test_ext(vec![]).execute_with(|| { - assert_ok!(Registrar::set_thread_count(Origin::root(), 1)); - - run_to_block(2); - - // Register some parathreads. - assert_ok!(Registrar::register_parathread(Origin::signed(3), vec![3; 3].into(), vec![3; 3].into())); - assert_ok!(Registrar::register_parathread(Origin::signed(4), vec![4; 3].into(), vec![4; 3].into())); - assert_ok!(Registrar::register_parathread(Origin::signed(5), vec![5; 3].into(), vec![5; 3].into())); - - run_to_block(3); - - // transaction submitted to get parathread progressed. - let col = Sr25519Keyring::One.public().into(); - schedule_thread(user_id(0), &[3; 3], &col); - - // 4x: the initial time it was scheduled, plus 3 retries. - for n in 5..9 { - run_to_block(n); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries((n - 5) as u32)))) - ]); - } - - // missed too many times. dropped. - run_to_block(9); - assert_eq!(Registrar::active_paras(), vec![]); - - // schedule and miss all 3 and check that they go through the queueing system ok. - assert_ok!(Registrar::set_thread_count(Origin::root(), 2)); - schedule_thread(user_id(0), &[3; 3], &col); - schedule_thread(user_id(1), &[4; 3], &col); - - run_to_block(10); - schedule_thread(user_id(2), &[5; 3], &col); - - // 0 and 1 scheduled as normal. - run_to_block(11); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries(0)))), - (user_id(1), Some((col.clone(), Retriable::WithRetries(0)))) - ]); - - // 2 scheduled, 0 retried - run_to_block(12); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries(1)))), - (user_id(2), Some((col.clone(), Retriable::WithRetries(0)))), - ]); - - // 1 retried - run_to_block(13); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(1), Some((col.clone(), Retriable::WithRetries(1)))) - ]); - - // 2 retried - run_to_block(14); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(2), Some((col.clone(), Retriable::WithRetries(1)))) - ]); - - run_to_block(15); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(0), Some((col.clone(), Retriable::WithRetries(2)))) - ]); - - run_to_block(16); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(1), Some((col.clone(), Retriable::WithRetries(2)))) - ]); - - run_to_block(17); - assert_eq!(Registrar::active_paras(), vec![ - (user_id(2), Some((col.clone(), Retriable::WithRetries(2)))) - ]); - }); - } - - #[test] - fn parathread_auction_handles_basic_errors() { - new_test_ext(vec![]).execute_with(|| { - run_to_block(2); - let o = Origin::signed(0); - assert_ok!(Registrar::register_parathread(o, vec![7, 8, 9].into(), vec![1, 1, 1].into())); - - run_to_block(3); - assert_eq!( - Registrar::paras(&user_id(0)), - Some(ParaInfo { scheduling: Scheduling::Dynamic }) - ); - - let good_para_id = user_id(0); - let bad_para_id = user_id(1); - let bad_head_hash = ::Hashing::hash(&vec![1, 2, 1]); - let good_head_hash = ::Hashing::hash(&vec![1, 1, 1]); - let info = &DispatchInfo::default(); - - // Allow for threads - assert_ok!(Registrar::set_thread_count(Origin::root(), 10)); - - // Bad parathread id - let col = CollatorId::default(); - let inner = super::Call::select_parathread(bad_para_id, col.clone(), good_head_hash); - let call = Call::Registrar(inner); - assert!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0).is_err() - ); - - // Bad head data - let inner = super::Call::select_parathread(good_para_id, col.clone(), bad_head_hash); - let call = Call::Registrar(inner); - assert!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0).is_err() - ); - - // No duplicates - let inner = super::Call::select_parathread(good_para_id, col.clone(), good_head_hash); - let call = Call::Registrar(inner); - assert!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0).is_ok() - ); - assert!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0).is_err() - ); - }); - } - - #[test] - fn parathread_auction_works() { - new_test_ext(vec![]).execute_with(|| { - run_to_block(2); - // Register 5 parathreads - for x in 0..5 { - let o = Origin::signed(x as u64); - assert_ok!(Registrar::register_parathread(o, vec![x; 3].into(), vec![x; 3].into())); - } - - run_to_block(3); - - for x in 0..5 { - assert_eq!( - Registrar::paras(&user_id(x)), - Some(ParaInfo { scheduling: Scheduling::Dynamic }) - ); - } - - // Only 3 slots available... who will win?? - assert_ok!(Registrar::set_thread_count(Origin::root(), 3)); - - // Everyone wants a thread - for x in 0..5 { - let para_id = user_id(x as u32); - let collator_id = CollatorId::default(); - let head_hash = ::Hashing::hash(&vec![x; 3]); - let inner = super::Call::select_parathread(para_id, collator_id, head_hash); - let call = Call::Registrar(inner); - let info = &DispatchInfo::default(); - - // First 3 transactions win a slot - if x < 3 { - assert!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0) - .is_ok() - ); - } else { - // All others lose - assert_noop!( - LimitParathreadCommits::(std::marker::PhantomData) - .validate(&0, &call, info, 0), - InvalidTransaction::ExhaustsResources, - ); - } - } - - // 3 Threads are selected - assert_eq!( - SelectedThreads::get()[1], - vec![ - (user_id(0), CollatorId::default()), - (user_id(1), CollatorId::default()), - (user_id(2), CollatorId::default()), - ] - ); - - // Assuming Queue Size is 2 - assert_eq!(::QueueSize::get(), 2); - - // 2 blocks later - run_to_block(5); - // Threads left queue - assert_eq!(SelectedThreads::get()[0], vec![]); - // Threads are active - assert_eq!( - Registrar::active_paras(), - vec![ - (user_id(0), Some((CollatorId::default(), Retriable::WithRetries(0)))), - (user_id(1), Some((CollatorId::default(), Retriable::WithRetries(0)))), - (user_id(2), Some((CollatorId::default(), Retriable::WithRetries(0)))), - ] - ); - }); - } - - #[test] - fn register_does_not_enforce_limits_when_registering() { - new_test_ext(vec![]).execute_with(|| { - let bad_code_size = ::MaxCodeSize::get() + 1; - let bad_head_size = ::MaxHeadDataSize::get() + 1; - - let code = vec![1u8; bad_code_size as _].into(); - let head_data = vec![2u8; bad_head_size as _].into(); - - assert!(!>::code_size_allowed(bad_code_size)); - assert!(!>::head_data_size_allowed(bad_head_size)); - - let id = >::new_id(); - assert_ok!(>::register_para( - id, - ParaInfo { scheduling: Scheduling::Always }, - code, - head_data, - )); - }); - } -} diff --git a/runtime/common/src/slots.rs b/runtime/common/src/slots.rs index b3140278f9e55c035165938a6cd4493a88a0efb8..60bf8f81745b8bb0ed81d3283c7ec9318900310b 100644 --- a/runtime/common/src/slots.rs +++ b/runtime/common/src/slots.rs @@ -28,19 +28,18 @@ use frame_support::{ traits::{Currency, ReservableCurrency, WithdrawReason, ExistenceRequirement, Get, Randomness}, weights::{DispatchClass, Weight}, }; -use primitives::parachain::{ - SwapAux, PARACHAIN_INFO, Id as ParaId, ValidationCode, HeadData, +use primitives::v1::{ + Id as ParaId, ValidationCode, HeadData, }; -use system::{ensure_signed, ensure_root}; -use crate::registrar::{Registrar, swap_ordered_existence}; +use frame_system::{ensure_signed, ensure_root}; use crate::slot_range::{SlotRange, SLOT_RANGE_COUNT}; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module's configuration trait. -pub trait Trait: system::Trait { +pub trait Trait: frame_system::Trait { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency type used for bidding. type Currency: ReservableCurrency; @@ -58,6 +57,50 @@ pub trait Trait: system::Trait { type Randomness: Randomness; } +/// Parachain registration API. +pub trait Registrar { + /// Create a new unique parachain identity for later registration. + fn new_id() -> ParaId; + + /// Checks whether the given initial head data size falls within the limit. + fn head_data_size_allowed(head_data_size: u32) -> bool; + + /// Checks whether the given validation code falls within the limit. + fn code_size_allowed(code_size: u32) -> bool; + + /// Register a parachain with given `code` and `initial_head_data`. `id` must not yet be registered or it will + /// result in a error. + /// + /// This does not enforce any code size or initial head data limits, as these + /// are governable and parameters for parachain initialization are often + /// determined long ahead-of-time. Not checking these values ensures that changes to limits + /// do not invalidate in-progress auction winners. + fn register_para( + id: ParaId, + _parachain: bool, + code: ValidationCode, + initial_head_data: HeadData, + ) -> DispatchResult; + + /// Deregister a parachain with given `id`. If `id` is not currently registered, an error is returned. + fn deregister_para(id: ParaId) -> DispatchResult; +} + +/// Auxilliary for when there's an attempt to swap two parachains/parathreads. +pub trait SwapAux { + /// Result describing whether it is possible to swap two parachains. Doesn't mutate state. + fn ensure_can_swap(one: ParaId, other: ParaId) -> Result<(), &'static str>; + + /// Updates any needed state/references to enact a logical swap of two parachains. Identity, + /// code and `head_data` remain equivalent for all parachains/threads, however other properties + /// such as leases, deposits held and thread/chain nature are swapped. + /// + /// May only be called on a state that `ensure_can_swap` has previously returned `Ok` for: if this is + /// not the case, the result is undefined. May only return an error if `ensure_can_swap` also returns + /// an error. + fn on_swap(one: ParaId, other: ParaId) -> Result<(), &'static str>; +} + /// A sub-bidder identifier. Used to distinguish between different logical bidders coming from the /// same account ID. pub type SubId = u32; @@ -118,14 +161,14 @@ pub enum IncomingParachain { Deploy { code: ValidationCode, initial_head_data: HeadData }, } -type LeasePeriodOf = ::BlockNumber; +type LeasePeriodOf = ::BlockNumber; // Winning data type. This encodes the top bidders of each range together with their bid. type WinningData = - [Option<(Bidder<::AccountId>, BalanceOf)>; SLOT_RANGE_COUNT]; + [Option<(Bidder<::AccountId>, BalanceOf)>; SLOT_RANGE_COUNT]; // Winners data type. This encodes each of the final winners of a parachain auction, the parachain // index assigned to them, their winning bid and the range that they won. type WinnersData = - Vec<(Option::AccountId>>, ParaId, BalanceOf, SlotRange)>; + Vec<(Option::AccountId>>, ParaId, BalanceOf, SlotRange)>; // This module's storage items. decl_storage! { @@ -187,6 +230,21 @@ decl_storage! { } } +/// Swap the existence of two items, provided by value, within an ordered list. +/// +/// If neither item exists, or if both items exist this will do nothing. If exactly one of the +/// items exists, then it will be removed and the other inserted. +fn swap_ordered_existence(ids: &mut [T], one: T, other: T) { + let maybe_one_pos = ids.binary_search(&one); + let maybe_other_pos = ids.binary_search(&other); + match (maybe_one_pos, maybe_other_pos) { + (Ok(one_pos), Err(_)) => ids[one_pos] = other, + (Err(_), Ok(other_pos)) => ids[other_pos] = one, + _ => return, + }; + ids.sort(); +} + impl SwapAux for Module { fn ensure_can_swap(one: ParaId, other: ParaId) -> Result<(), &'static str> { if >::contains_key(one) || >::contains_key(other) { @@ -204,28 +262,31 @@ impl SwapAux for Module { decl_event!( pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, + AccountId = ::AccountId, + BlockNumber = ::BlockNumber, LeasePeriod = LeasePeriodOf, ParaId = ParaId, Balance = BalanceOf, { - /// A new lease period is beginning. + /// A new [lease_period] is beginning. NewLeasePeriod(LeasePeriod), /// An auction started. Provides its index and the block number where it will begin to /// close and the first lease period of the quadruplet that is auctioned. + /// [auction_index, lease_period, ending] AuctionStarted(AuctionIndex, LeasePeriod, BlockNumber), - /// An auction ended. All funds become unreserved. + /// An auction ended. All funds become unreserved. [auction_index] AuctionClosed(AuctionIndex), /// Someone won the right to deploy a parachain. Balance amount is deducted for deposit. + /// [bidder, range, parachain_id, amount] WonDeploy(NewBidder, SlotRange, ParaId, Balance), /// An existing parachain won the right to continue. /// First balance is the extra amount reseved. Second is the total amount reserved. + /// [parachain_id, range, extra_reseved, total_amount] WonRenewal(ParaId, SlotRange, Balance, Balance), /// Funds were reserved for a winning bid. First balance is the extra amount reserved. - /// Second is the total. + /// Second is the total. [bidder, extra_reserved, total_amount] Reserved(AccountId, Balance, Balance), - /// Funds were unreserved since bidder is no longer active. + /// Funds were unreserved since bidder is no longer active. [bidder, amount] Unreserved(AccountId, Balance), } ); @@ -322,7 +383,7 @@ decl_module! { let n = ::mutate(|n| { *n += 1; *n }); // Set the information. - let ending = >::block_number() + duration; + let ending = >::block_number() + duration; >::put((lease_period_index, ending)); Self::deposit_event(RawEvent::AuctionStarted(n, lease_period_index, ending)) @@ -459,7 +520,7 @@ decl_module! { .ok_or(Error::::ParaNotOnboarding)?; if let IncomingParachain::Fixed{code_hash, code_size, initial_head_data} = details { ensure!(code.0.len() as u32 == code_size, Error::::InvalidCode); - ensure!(::Hashing::hash(&code.0) == code_hash, Error::::InvalidCode); + ensure!(::Hashing::hash(&code.0) == code_hash, Error::::InvalidCode); if starts > Self::lease_period_index() { // Hasn't yet begun. Replace the on-boarding entry with the new information. @@ -470,7 +531,7 @@ decl_module! { // parachain for its immediate start. >::remove(¶_id); let _ = T::Parachains:: - register_para(para_id, PARACHAIN_INFO, code, initial_head_data); + register_para(para_id, true, code, initial_head_data); } Ok(()) @@ -507,7 +568,7 @@ impl Module { /// Returns the current lease period. fn lease_period_index() -> LeasePeriodOf { - (>::block_number() / T::LeasePeriod::get()).into() + (>::block_number() / T::LeasePeriod::get()).into() } /// Some when the auction's end is known (with the end block number). None if it is unknown. @@ -719,7 +780,7 @@ impl Module { // The chain's deployment data is set; go ahead and register it, and remove the // now-redundant on-boarding entry. let _ = T::Parachains:: - register_para(para_id.clone(), PARACHAIN_INFO, code, initial_head_data); + register_para(para_id.clone(), true, code, initial_head_data); // ^^ not much we can do if it fails for some reason. >::remove(para_id) } @@ -751,7 +812,7 @@ impl Module { // Range as an array index. let range_index = range as u8 as usize; // The offset into the auction ending set. - let offset = Self::is_ending(>::block_number()).unwrap_or_default(); + let offset = Self::is_ending(>::block_number()).unwrap_or_default(); // The current winning ranges. let mut current_winning = >::get(offset) .or_else(|| offset.checked_sub(&One::one()).and_then(>::get)) @@ -889,9 +950,8 @@ mod tests { impl_outer_origin, parameter_types, assert_ok, assert_noop, traits::{OnInitialize, OnFinalize} }; - use balances; - use primitives::{BlockNumber, Header}; - use primitives::parachain::{Id as ParaId, Info as ParaInfo, Scheduling}; + use pallet_balances; + use primitives::v1::{BlockNumber, Header, Id as ParaId}; impl_outer_origin! { pub enum Origin for Test {} @@ -908,7 +968,7 @@ mod tests { pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl system::Trait for Test { + impl frame_system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; type Call = (); @@ -930,21 +990,23 @@ mod tests { type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type ModuleToIndex = (); - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = Balances; + type SystemWeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl balances::Trait for Test { + impl pallet_balances::Trait for Test { type Balance = u64; type Event = (); type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } thread_local! { @@ -973,13 +1035,9 @@ mod tests { code_size <= MAX_CODE_SIZE } - fn para_info(_id: ParaId) -> Option { - Some(ParaInfo { scheduling: Scheduling::Always }) - } - fn register_para( id: ParaId, - _info: ParaInfo, + _parachain: bool, code: ValidationCode, initial_head_data: HeadData, ) -> DispatchResult { @@ -1024,16 +1082,16 @@ mod tests { type Randomness = RandomnessCollectiveFlip; } - type System = system::Module; - type Balances = balances::Module; + type System = frame_system::Module; + type Balances = pallet_balances::Module; type Slots = Module; - type RandomnessCollectiveFlip = randomness_collective_flip::Module; + type RandomnessCollectiveFlip = pallet_randomness_collective_flip::Module; // This function basically just builds a genesis storage key/value store according to // our desired mock up. fn new_test_ext() -> sp_io::TestExternalities { - let mut t = system::GenesisConfig::default().build_storage::().unwrap(); - balances::GenesisConfig::{ + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], }.assimilate_storage(&mut t).unwrap(); t.into() diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index ee608f37dbb1bfe41f67c8eb61f3d430b2c28907..8b56dc17fa18ccda575d75e47575c905a7063f94 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "kusama-runtime" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } serde_derive = { version = "1.0.102", optional = true } static_assertions = "1.1.0" -smallvec = "1.4.0" +smallvec = "1.4.1" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -26,49 +26,49 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -version = { package = "sp-version", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authority-discovery = { package = "pallet-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment = { package = "pallet-transaction-payment", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment-rpc-runtime-api = { package = "pallet-transaction-payment-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -collective = { package = "pallet-collective", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -democracy = { package = "pallet-democracy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -executive = { package = "frame-executive", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -finality-tracker = { package = "pallet-finality-tracker", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -grandpa = { package = "pallet-grandpa", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -identity = { package = "pallet-identity", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -im-online = { package = "pallet-im-online", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -indices = { package = "pallet-indices", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -membership = { package = "pallet-membership", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -multisig = { package = "pallet-multisig", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -nicks = { package = "pallet-nicks", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -proxy = { package = "pallet-proxy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -recovery = { package = "pallet-recovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -scheduler = { package = "pallet-scheduler", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -society = { package = "pallet-society", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-finality-tracker = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-society = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -treasury = { package = "pallet-treasury", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -utility = { package = "pallet-utility", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } hex-literal = { version = "0.2.1", optional = true } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } @@ -91,7 +91,7 @@ no_std = [] only-staking = [] std = [ "authority-discovery-primitives/std", - "authority-discovery/std", + "pallet-authority-discovery/std", "bitvec/std", "primitives/std", "rustc-hex/std", @@ -105,45 +105,45 @@ std = [ "sp-std/std", "sp-io/std", "frame-support/std", - "authorship/std", - "balances/std", - "transaction-payment/std", - "transaction-payment-rpc-runtime-api/std", - "collective/std", - "elections-phragmen/std", - "democracy/std", - "executive/std", - "finality-tracker/std", - "grandpa/std", - "identity/std", - "im-online/std", - "indices/std", - "membership/std", - "multisig/std", - "nicks/std", - "offences/std", - "proxy/std", - "recovery/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-collective/std", + "pallet-elections-phragmen/std", + "pallet-democracy/std", + "frame-executive/std", + "pallet-finality-tracker/std", + "pallet-grandpa/std", + "pallet-identity/std", + "pallet-im-online/std", + "pallet-indices/std", + "pallet-membership/std", + "pallet-multisig/std", + "pallet-nicks/std", + "pallet-offences/std", + "pallet-proxy/std", + "pallet-recovery/std", "sp-runtime/std", "sp-staking/std", - "scheduler/std", - "session/std", - "society/std", - "staking/std", - "system/std", - "system_rpc_runtime_api/std", - "timestamp/std", - "treasury/std", - "version/std", - "utility/std", - "vesting/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-society/std", + "pallet-staking/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "pallet-timestamp/std", + "pallet-treasury/std", + "sp-version/std", + "pallet-utility/std", + "pallet-vesting/std", "serde_derive", "serde/std", "log", - "babe/std", + "pallet-babe/std", "babe-primitives/std", "sp-session/std", - "randomness-collective-flip/std", + "pallet-randomness-collective-flip/std", "runtime-common/std", ] runtime-benchmarks = [ @@ -151,22 +151,28 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system-benchmarking", - "system/runtime-benchmarks", + "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "balances/runtime-benchmarks", - "collective/runtime-benchmarks", - "democracy/runtime-benchmarks", - "elections-phragmen/runtime-benchmarks", - "identity/runtime-benchmarks", - "im-online/runtime-benchmarks", - "scheduler/runtime-benchmarks", - "society/runtime-benchmarks", - "staking/runtime-benchmarks", - "timestamp/runtime-benchmarks", - "treasury/runtime-benchmarks", - "utility/runtime-benchmarks", - "vesting/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-democracy/runtime-benchmarks", + "pallet-elections-phragmen/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-society/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", "pallet-offences-benchmarking", "pallet-session-benchmarking", "hex-literal", ] +# When enabled, the runtime api will not be build. +# +# This is required by Cumulus to access certain types of the +# runtime without clashing with the runtime api exported functions +# in WASM. +disable-runtime-api = [] diff --git a/runtime/kusama/build.rs b/runtime/kusama/build.rs index 56051bd627f6c37b55324eaa425df106bbdaa2cb..af219a29319898d2f6180ef13bbe5263cd114727 100644 --- a/runtime/kusama/build.rs +++ b/runtime/kusama/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .import_memory() .export_heap_base() .build() diff --git a/runtime/kusama/src/constants.rs b/runtime/kusama/src/constants.rs index e06325d1bb95eca4ecbbfd6736fc161a2e603601..5d81cf0bb853881906ce71473ddea03f11ec2c12 100644 --- a/runtime/kusama/src/constants.rs +++ b/runtime/kusama/src/constants.rs @@ -16,7 +16,7 @@ /// Money matters. pub mod currency { - use primitives::Balance; + use primitives::v0::Balance; pub const DOTS: Balance = 1_000_000_000_000; pub const DOLLARS: Balance = DOTS / 6; @@ -30,7 +30,7 @@ pub mod currency { /// Time and blocks. pub mod time { - use primitives::{Moment, BlockNumber}; + use primitives::v0::{Moment, BlockNumber}; // Kusama & mainnet pub const MILLISECS_PER_BLOCK: Moment = 6000; // Testnet @@ -55,7 +55,7 @@ pub mod time { /// Fee-related. pub mod fee { pub use sp_runtime::Perbill; - use primitives::Balance; + use primitives::v0::Balance; use runtime_common::ExtrinsicBaseWeight; use frame_support::weights::{ WeightToFeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, @@ -69,7 +69,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, system::MaximumBlockWeight] + /// - [0, frame_system::MaximumBlockWeight] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 9b00eda2b2dfe497b19806a22c58bc8757299f67..41ecdba4dd5e03fe7ad17a0370499a9beba2c142 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -23,16 +23,16 @@ use sp_std::prelude::*; use sp_core::u32_trait::{_1, _2, _3, _4, _5}; use codec::{Encode, Decode}; -use primitives::{ +use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, - parachain::{self, ActiveParas, AbridgedCandidateReceipt, SigningContext}, }; +use primitives::v0 as p_v0; use runtime_common::{ - attestations, claims, parachains, registrar, slots, SlowAdjustingFeeUpdate, + dummy, claims, SlowAdjustingFeeUpdate, impls::{CurrencyToVoteHandler, ToAuthor}, NegativeImbalance, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, - MaximumExtrinsicWeight, + MaximumExtrinsicWeight, ParachainSessionKeyPlaceholder, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, ModuleId, @@ -46,10 +46,10 @@ use sp_runtime::{ }; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::RuntimeString; -use version::RuntimeVersion; -use grandpa::{AuthorityId as GrandpaId, fg_primitives}; +use sp_version::RuntimeVersion; +use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; #[cfg(any(feature = "std", test))] -use version::NativeVersion; +use sp_version::NativeVersion; use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ @@ -57,26 +57,27 @@ use frame_support::{ traits::{KeyOwnerProofSystem, SplitTwoWays, Randomness, LockIdentifier, Filter, InstanceFilter}, weights::Weight, }; -use system::{EnsureRoot, EnsureOneOf}; -use im_online::sr25519::AuthorityId as ImOnlineId; +use frame_system::{EnsureRoot, EnsureOneOf}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -use session::{historical as session_historical}; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use pallet_session::{historical as session_historical}; use static_assertions::const_assert; #[cfg(feature = "std")] -pub use staking::StakerStatus; +pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use attestations::{Call as AttestationsCall, MORE_ATTESTATIONS_IDENTIFIER}; -pub use parachains::Call as ParachainsCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*, fee::*}; +// Weights used in the runtime. +mod weights; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -86,10 +87,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("kusama"), impl_name: create_runtime_str!("parity-kusama"), authoring_version: 2, - spec_version: 2012, + spec_version: 2023, impl_version: 0, + #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + #[cfg(feature = "disable-runtime-api")] + apis: version::create_apis_vec![[]], + transaction_version: 3, }; /// Native version. @@ -104,22 +108,22 @@ pub fn native_version() -> NativeVersion { /// Avoid processing transactions from slots and parachain registrar. pub struct BaseFilter; impl Filter for BaseFilter { - fn filter(call: &Call) -> bool { - !matches!(call, Call::Slots(_) | Call::Registrar(_)) + fn filter(_: &Call) -> bool { + true } } type MoreThanHalfCouncil = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl system::Trait for Runtime { +impl frame_system::Trait for Runtime { type BaseCallFilter = BaseFilter; type Origin = Origin; type Call = Call; @@ -141,16 +145,20 @@ impl system::Trait for Runtime { type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type ModuleToIndex = ModuleToIndex; - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = weights::frame_system::WeightInfo; } -impl scheduler::Trait for Runtime { +impl pallet_scheduler::Trait for Runtime { type Event = Event; type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumBlockWeight; + type ScheduleOrigin = EnsureRoot; + type WeightInfo = (); } parameter_types! { @@ -158,23 +166,39 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl babe::Trait for Runtime { +impl pallet_babe::Trait for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // session module is the trigger - type EpochChangeTrigger = babe::ExternalTrigger; + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = + pallet_babe::EquivocationHandler; } parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl indices::Trait for Runtime { +impl pallet_indices::Trait for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; + type WeightInfo = (); } parameter_types! { @@ -189,19 +213,20 @@ pub type DealWithFees = SplitTwoWays< _1, ToAuthor, // 1 part (20%) goes to the block author. >; -impl balances::Trait for Runtime { +impl pallet_balances::Trait for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; } parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = DealWithFees; type TransactionByteFee = TransactionByteFee; @@ -212,10 +237,11 @@ impl transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl timestamp::Trait for Runtime { +impl pallet_timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = weights::pallet_timestamp::WeightInfo; } parameter_types! { @@ -223,8 +249,8 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl authorship::Trait for Runtime { - type FindAuthor = session::FindAccountFromAuthorIndex; +impl pallet_authorship::Trait for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = (Staking, ImOnline); @@ -240,7 +266,7 @@ impl_opaque_keys! { pub grandpa: Grandpa, pub babe: Babe, pub im_online: ImOnline, - pub parachain_validator: Parachains, + pub parachain_validator: ParachainSessionKeyPlaceholder, pub authority_discovery: AuthorityDiscovery, } } @@ -249,21 +275,22 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl session::Trait for Runtime { +impl pallet_session::Trait for Runtime { type Event = Event; type ValidatorId = AccountId; - type ValidatorIdOf = staking::StashOf; + type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; - type SessionManager = session::historical::NoteHistoricalRoot; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } -impl session::historical::Trait for Runtime { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; +impl pallet_session::historical::Trait for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } // TODO #6469: This shouldn't be static, but a lazily cached value, not built unless needed, and @@ -286,9 +313,9 @@ parameter_types! { // Six sessions in an era (6 hours). pub const SessionsPerEra: SessionIndex = 6; // 28 eras for unbonding (7 days). - pub const BondingDuration: staking::EraIndex = 28; - // 28 eras in which slashes can be cancelled (7 days). - pub const SlashDeferDuration: staking::EraIndex = 28; + pub const BondingDuration: pallet_staking::EraIndex = 28; + // 27 eras in which slashes can be cancelled (slightly less than 7 days). + pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; // quarter of the last session will be for election. @@ -300,10 +327,10 @@ parameter_types! { type SlashCancelOrigin = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective> >; -impl staking::Trait for Runtime { +impl pallet_staking::Trait for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; @@ -325,6 +352,7 @@ impl staking::Trait for Runtime { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; type MinSolutionScoreBump = MinSolutionScoreBump; + type WeightInfo = (); } parameter_types! { @@ -340,7 +368,7 @@ parameter_types! { pub const MaxVotes: u32 = 100; } -impl democracy::Trait for Runtime { +impl pallet_democracy::Trait for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -349,29 +377,31 @@ impl democracy::Trait for Runtime { type VotingPeriod = VotingPeriod; type MinimumDeposit = MinimumDeposit; /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; /// A majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; /// A unanimous council can have the next scheduled referendum be a straight default-carries /// (NTB) vote. - type ExternalDefaultOrigin = collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; + type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type InstantOrigin = collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; + type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; type InstantAllowed = InstantAllowed; type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; + type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; // Any single technical committee member may veto a coming council proposal, however they can // only do it once and it lasts only for the cooloff period. - type VetoOrigin = collective::EnsureMember; + type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; type PreimageByteDeposit = PreimageByteDeposit; type Slash = Treasury; type Scheduler = Scheduler; + type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; - type OperationalPreimageOrigin = collective::EnsureMember; + type OperationalPreimageOrigin = pallet_collective::EnsureMember; + type WeightInfo = weights::pallet_democracy::WeightInfo; } parameter_types! { @@ -379,13 +409,14 @@ parameter_types! { pub const CouncilMaxProposals: u32 = 100; } -type CouncilCollective = collective::Instance1; -impl collective::Trait for Runtime { +type CouncilCollective = pallet_collective::Instance1; +impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; + type WeightInfo = (); } parameter_types! { @@ -393,14 +424,14 @@ parameter_types! { pub const VotingBond: Balance = 5 * CENTS; /// Daily council elections. pub const TermDuration: BlockNumber = 24 * HOURS; - pub const DesiredMembers: u32 = 17; - pub const DesiredRunnersUp: u32 = 7; + pub const DesiredMembers: u32 = 19; + pub const DesiredRunnersUp: u32 = 19; pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; } // Make sure that there are no more than MAX_MEMBERS members elected via phragmen. -const_assert!(DesiredMembers::get() <= collective::MAX_MEMBERS); +const_assert!(DesiredMembers::get() <= pallet_collective::MAX_MEMBERS); -impl elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Trait for Runtime { type Event = Event; type Currency = Balances; type ChangeMembers = Council; @@ -415,6 +446,7 @@ impl elections_phragmen::Trait for Runtime { type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; type ModuleId = ElectionsPhragmenModuleId; + type WeightInfo = (); } parameter_types! { @@ -422,16 +454,17 @@ parameter_types! { pub const TechnicalMaxProposals: u32 = 100; } -type TechnicalCollective = collective::Instance2; -impl collective::Trait for Runtime { +type TechnicalCollective = pallet_collective::Instance2; +impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; + type WeightInfo = (); } -impl membership::Trait for Runtime { +impl pallet_membership::Trait for Runtime { type Event = Event; type AddOrigin = MoreThanHalfCouncil; type RemoveOrigin = MoreThanHalfCouncil; @@ -446,7 +479,7 @@ parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: Balance = 20 * DOLLARS; pub const SpendPeriod: BlockNumber = 6 * DAYS; - pub const Burn: Permill = Permill::from_percent(0); + pub const Burn: Permill = Permill::from_perthousand(2); pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); pub const TipCountdown: BlockNumber = 1 * DAYS; @@ -458,10 +491,10 @@ parameter_types! { type ApproveOrigin = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> >; -impl treasury::Trait for Runtime { +impl pallet_treasury::Trait for Runtime { type Currency = Balances; type ApproveOrigin = ApproveOrigin; type RejectOrigin = MoreThanHalfCouncil; @@ -476,21 +509,24 @@ impl treasury::Trait for Runtime { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = Society; type ModuleId = TreasuryModuleId; + type WeightInfo = (); } parameter_types! { pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); } -impl offences::Trait for Runtime { +impl pallet_offences::Trait for Runtime { type Event = Event; - type IdentificationTuple = session::historical::IdentificationTuple; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } -impl authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Trait for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -501,15 +537,16 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl im_online::Trait for Runtime { +impl pallet_im_online::Trait for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type ReportUnresponsiveness = Offences; type SessionDuration = SessionDuration; type UnsignedPriority = ImOnlineUnsignedPriority; + type WeightInfo = (); } -impl grandpa::Trait for Runtime { +impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; @@ -523,76 +560,30 @@ impl grandpa::Trait for Runtime { GrandpaId, )>>::IdentificationTuple; - type HandleEquivocation = grandpa::EquivocationHandler< - Self::KeyOwnerIdentification, - primitives::fisherman::FishermanAppCrypto, - Runtime, - Offences, - >; + type HandleEquivocation = pallet_grandpa::EquivocationHandler; } parameter_types! { - pub WindowSize: BlockNumber = finality_tracker::DEFAULT_WINDOW_SIZE.into(); - pub ReportLatency: BlockNumber = finality_tracker::DEFAULT_REPORT_LATENCY.into(); + pub WindowSize: BlockNumber = pallet_finality_tracker::DEFAULT_WINDOW_SIZE.into(); + pub ReportLatency: BlockNumber = pallet_finality_tracker::DEFAULT_REPORT_LATENCY.into(); } -impl finality_tracker::Trait for Runtime { +impl pallet_finality_tracker::Trait for Runtime { type OnFinalizationStalled = (); type WindowSize = WindowSize; type ReportLatency = ReportLatency; } -parameter_types! { - pub const AttestationPeriod: BlockNumber = 50; -} - -impl attestations::Trait for Runtime { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = parachains::ValidatorIdentities; - type RewardAttestation = Staking; -} - -parameter_types! { - pub const MaxCodeSize: u32 = 10 * 1024 * 1024; // 10 MB - pub const MaxHeadDataSize: u32 = 20 * 1024; // 20 KB - pub const ValidationUpgradeFrequency: BlockNumber = 2 * DAYS; - pub const ValidationUpgradeDelay: BlockNumber = 8 * HOURS; - pub const SlashPeriod: BlockNumber = 7 * DAYS; -} - -impl parachains::Trait for Runtime { - type AuthorityId = primitives::fisherman::FishermanAppCrypto; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = Balances; - type BlockNumberConversion = sp_runtime::traits::Identity; - type Randomness = RandomnessCollectiveFlip; - type ActiveParachains = Registrar; - type Registrar = Registrar; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - - type Proof = sp_session::MembershipProof; - type KeyOwnerProofSystem = session::historical::Module; - type IdentificationTuple = )>>::IdentificationTuple; - type ReportOffence = Offences; - type BlockHashConversion = sp_runtime::traits::Identity; -} - /// Submits transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. -impl system::offchain::CreateSignedTransaction for Runtime where +impl frame_system::offchain::CreateSignedTransaction for Runtime where Call: From, { - fn create_transaction>( + fn create_transaction>( call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -607,16 +598,13 @@ impl system::offchain::CreateSignedTransaction for Runtime .saturating_sub(1); let tip = 0; let extra: SignedExtra = ( - system::CheckSpecVersion::::new(), - system::CheckTxVersion::::new(), - system::CheckGenesis::::new(), - system::CheckMortality::::from(generic::Era::mortal(period, current_block)), - system::CheckNonce::::from(nonce), - system::CheckWeight::::new(), - transaction_payment::ChargeTransactionPayment::::from(tip), - registrar::LimitParathreadCommits::::new(), - parachains::ValidateDoubleVoteReports::::new(), - grandpa::ValidateEquivocationReport::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); let raw_payload = SignedPayload::new(call, extra).map_err(|e| { debug::warn!("Unable to create signed payload: {:?}", e); @@ -629,48 +617,18 @@ impl system::offchain::CreateSignedTransaction for Runtime } } -impl system::offchain::SigningTypes for Runtime { +impl frame_system::offchain::SigningTypes for Runtime { type Public = ::Signer; type Signature = Signature; } -impl system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime where Call: From, { type OverarchingCall = Call; type Extrinsic = UncheckedExtrinsic; } -parameter_types! { - pub const ParathreadDeposit: Balance = 5 * DOLLARS; - pub const QueueSize: usize = 2; - pub const MaxRetries: u32 = 3; -} - -impl registrar::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = Slots; - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; -} - -parameter_types! { - pub const LeasePeriod: BlockNumber = 100_000; - pub const EndingPeriod: BlockNumber = 1000; -} - -impl slots::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type Parachains = Registrar; - type LeasePeriod = LeasePeriod; - type EndingPeriod = EndingPeriod; - type Randomness = RandomnessCollectiveFlip; -} - parameter_types! { pub Prefix: &'static [u8] = b"Pay KSMs to the Kusama account:"; } @@ -679,7 +637,7 @@ impl claims::Trait for Runtime { type Event = Event; type VestingSchedule = Vesting; type Prefix = Prefix; - type MoveClaimOrigin = collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type MoveClaimOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; } parameter_types! { @@ -692,7 +650,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl identity::Trait for Runtime { +impl pallet_identity::Trait for Runtime { type Event = Event; type Currency = Balances; type Slashed = Treasury; @@ -704,11 +662,13 @@ impl identity::Trait for Runtime { type MaxRegistrars = MaxRegistrars; type RegistrarOrigin = MoreThanHalfCouncil; type ForceOrigin = MoreThanHalfCouncil; + type WeightInfo = (); } -impl utility::Trait for Runtime { +impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; + type WeightInfo = weights::pallet_utility::WeightInfo; } parameter_types! { @@ -719,13 +679,14 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl multisig::Trait for Runtime { +impl pallet_multisig::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; + type WeightInfo = (); } parameter_types! { @@ -735,7 +696,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl recovery::Trait for Runtime { +impl pallet_recovery::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -756,7 +717,7 @@ parameter_types! { pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); } -impl society::Trait for Runtime { +impl pallet_society::Trait for Runtime { type Event = Event; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; @@ -767,8 +728,8 @@ impl society::Trait for Runtime { type MembershipChanged = (); type RotationPeriod = RotationPeriod; type MaxLockDuration = MaxLockDuration; - type FounderSetOrigin = collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type SuspensionJudgementOrigin = society::EnsureFounder; + type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type SuspensionJudgementOrigin = pallet_society::EnsureFounder; type ChallengePeriod = ChallengePeriod; type ModuleId = SocietyModuleId; } @@ -777,11 +738,12 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl vesting::Trait for Runtime { +impl pallet_vesting::Trait for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } parameter_types! { @@ -790,8 +752,13 @@ parameter_types! { // Additional storage item size of 33 bytes. pub const ProxyDepositFactor: Balance = deposit(0, 33); pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; } +impl dummy::Trait for Runtime { } + /// The type used to represent the kinds of proxying allowed. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] pub enum ProxyType { @@ -799,6 +766,7 @@ pub enum ProxyType { NonTransfer, Governance, Staking, + IdentityJudgement, } impl Default for ProxyType { fn default() -> Self { Self::Any } } impl InstanceFilter for ProxyType { @@ -809,9 +777,9 @@ impl InstanceFilter for ProxyType { Call::System(..) | Call::Babe(..) | Call::Timestamp(..) | - Call::Indices(indices::Call::claim(..)) | - Call::Indices(indices::Call::free(..)) | - Call::Indices(indices::Call::freeze(..)) | + Call::Indices(pallet_indices::Call::claim(..)) | + Call::Indices(pallet_indices::Call::free(..)) | + Call::Indices(pallet_indices::Call::freeze(..)) | // Specifically omitting Indices `transfer`, `force_transfer` // Specifically omitting the entire Balances pallet Call::Authorship(..) | @@ -829,22 +797,22 @@ impl InstanceFilter for ProxyType { Call::TechnicalMembership(..) | Call::Treasury(..) | Call::Claims(..) | - Call::Parachains(..) | - Call::Attestations(..) | - Call::Slots(..) | - Call::Registrar(..) | + Call::DummyParachains(..) | + Call::DummyAttestations(..) | + Call::DummySlots(..) | + Call::DummyRegistrar(..) | Call::Utility(..) | Call::Identity(..) | Call::Society(..) | - Call::Recovery(recovery::Call::as_recovered(..)) | - Call::Recovery(recovery::Call::vouch_recovery(..)) | - Call::Recovery(recovery::Call::claim_recovery(..)) | - Call::Recovery(recovery::Call::close_recovery(..)) | - Call::Recovery(recovery::Call::remove_recovery(..)) | - Call::Recovery(recovery::Call::cancel_recovered(..)) | + Call::Recovery(pallet_recovery::Call::as_recovered(..)) | + Call::Recovery(pallet_recovery::Call::vouch_recovery(..)) | + Call::Recovery(pallet_recovery::Call::claim_recovery(..)) | + Call::Recovery(pallet_recovery::Call::close_recovery(..)) | + Call::Recovery(pallet_recovery::Call::remove_recovery(..)) | + Call::Recovery(pallet_recovery::Call::cancel_recovered(..)) | // Specifically omitting Recovery `create_recovery`, `initiate_recovery` - Call::Vesting(vesting::Call::vest(..)) | - Call::Vesting(vesting::Call::vest_other(..)) | + Call::Vesting(pallet_vesting::Call::vest(..)) | + Call::Vesting(pallet_vesting::Call::vest_other(..)) | // Specifically omitting Vesting `vested_transfer`, and `force_vested_transfer` Call::Scheduler(..) | Call::Proxy(..) | @@ -852,14 +820,15 @@ impl InstanceFilter for ProxyType { ), ProxyType::Governance => matches!(c, Call::Democracy(..) | Call::Council(..) | Call::TechnicalCommittee(..) - | Call::ElectionsPhragmen(..) | Call::Treasury(..) - | Call::Utility(utility::Call::batch(..)) - | Call::Utility(utility::Call::as_limited_sub(..)) + | Call::ElectionsPhragmen(..) | Call::Treasury(..) | Call::Utility(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(utility::Call::batch(..)) - | Call::Utility(utility::Call::as_limited_sub(..)) + Call::Staking(..) | Call::Utility(..) ), + ProxyType::IdentityJudgement => matches!(c, + Call::Identity(pallet_identity::Call::provide_judgement(..)) + | Call::Utility(pallet_utility::Call::batch(..)) + ) } } fn is_superset(&self, o: &Self) -> bool { @@ -873,7 +842,7 @@ impl InstanceFilter for ProxyType { } } -impl proxy::Trait for Runtime { +impl pallet_proxy::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -881,78 +850,93 @@ impl proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +pub struct CustomOnRuntimeUpgrade; +impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if pallet_scheduler::Module::::migrate_v1_to_t2() { + ::MaximumBlockWeight::get() + } else { + ::DbWeight::get().reads(1) + 500_000_000 + } + } } construct_runtime! { pub enum Runtime where Block = Block, - NodeBlock = primitives::Block, + NodeBlock = primitives::v1::Block, UncheckedExtrinsic = UncheckedExtrinsic { // Basic stuff; balances is uncallable initially. - System: system::{Module, Call, Storage, Config, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Storage}, + System: frame_system::{Module, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Storage}, // Must be before session. - Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, - Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Indices: indices::{Module, Call, Storage, Config, Event}, - Balances: balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: transaction_payment::{Module, Storage}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, // Consensus support. - Authorship: authorship::{Module, Call, Storage}, - Staking: staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: offences::{Module, Call, Storage, Event}, + Authorship: pallet_authorship::{Module, Call, Storage}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, Historical: session_historical::{Module}, - Session: session::{Module, Call, Storage, Event, Config}, - FinalityTracker: finality_tracker::{Module, Call, Storage, Inherent}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - ImOnline: im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: authority_discovery::{Module, Call, Config}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + FinalityTracker: pallet_finality_tracker::{Module, Call, Storage, Inherent}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, // Governance stuff; uncallable initially. - Democracy: democracy::{Module, Call, Storage, Config, Event}, - Council: collective::::{Module, Call, Storage, Origin, Event, Config}, - TechnicalCommittee: collective::::{Module, Call, Storage, Origin, Event, Config}, - ElectionsPhragmen: elections_phragmen::{Module, Call, Storage, Event, Config}, - TechnicalMembership: membership::::{Module, Call, Storage, Event, Config}, - Treasury: treasury::{Module, Call, Storage, Event}, + Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, + Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, + TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, + ElectionsPhragmen: pallet_elections_phragmen::{Module, Call, Storage, Event, Config}, + TechnicalMembership: pallet_membership::::{Module, Call, Storage, Event, Config}, + Treasury: pallet_treasury::{Module, Call, Storage, Event}, // Claims. Usable initially. Claims: claims::{Module, Call, Storage, Event, Config, ValidateUnsigned}, - // Parachains stuff; slots are disabled (no auctions initially). The rest are safe as they - // have no public dispatchables. - Parachains: parachains::{Module, Call, Storage, Config, Inherent, Origin}, - Attestations: attestations::{Module, Call, Storage}, - Slots: slots::{Module, Call, Storage, Event}, - Registrar: registrar::{Module, Call, Storage, Event, Config}, + // Old parachains stuff. All dummies to avoid messing up the transaction indices. + DummyParachains: dummy::::{Module, Call}, + DummyAttestations: dummy::::{Module, Call}, + DummySlots: dummy::::{Module, Call}, + DummyRegistrar: dummy::::{Module, Call}, // Utility module. - Utility: utility::{Module, Call, Event}, + Utility: pallet_utility::{Module, Call, Event}, // Less simple identity module. - Identity: identity::{Module, Call, Storage, Event}, + Identity: pallet_identity::{Module, Call, Storage, Event}, // Society module. - Society: society::{Module, Call, Storage, Event}, + Society: pallet_society::{Module, Call, Storage, Event}, // Social recovery module. - Recovery: recovery::{Module, Call, Storage, Event}, + Recovery: pallet_recovery::{Module, Call, Storage, Event}, // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: vesting::{Module, Call, Storage, Event, Config}, + Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, // System scheduler. - Scheduler: scheduler::{Module, Call, Storage, Event}, + Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, // Proxy module. Late addition. - Proxy: proxy::{Module, Call, Storage, Event}, + Proxy: pallet_proxy::{Module, Call, Storage, Event}, // Multisig module. Late addition. - Multisig: multisig::{Module, Call, Storage, Event}, + Multisig: pallet_multisig::{Module, Call, Storage, Event}, } } @@ -968,26 +952,31 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - system::CheckSpecVersion, - system::CheckTxVersion, - system::CheckGenesis, - system::CheckMortality, - system::CheckNonce, - system::CheckWeight, - transaction_payment::ChargeTransactionPayment, - registrar::LimitParathreadCommits, - parachains::ValidateDoubleVoteReports, - grandpa::ValidateEquivocationReport, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllModules, + CustomOnRuntimeUpgrade +>; /// The payload being signed in the transactions. pub type SignedPayload = generic::SignedPayload; +#[cfg(not(feature = "disable-runtime-api"))] sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -1049,42 +1038,55 @@ sp_api::impl_runtime_apis! { } } - impl parachain::ParachainHost for Runtime { - fn validators() -> Vec { - Parachains::authorities() + // Dummy implementation to continue supporting old parachains runtime temporarily. + impl p_v0::ParachainHost for Runtime { + fn validators() -> Vec { + // this is a compile-time check of size equality. note that we don't invoke + // the function and nothing here is unsafe. + let _ = core::mem::transmute::; + + // Yes, these aren't actually the parachain session keys. + // It doesn't matter, but we shouldn't return a zero-sized vector here. + // As there are no parachains + Session::validators() + .into_iter() + .map(|k| k.using_encoded(|s| Decode::decode(&mut &s[..])) + .expect("correct size and raw-bytes; qed")) + .collect() } - fn duty_roster() -> parachain::DutyRoster { - Parachains::calculate_duty_roster().0 + fn duty_roster() -> p_v0::DutyRoster { + let v = Session::validators(); + p_v0::DutyRoster { validator_duty: (0..v.len()).map(|_| p_v0::Chain::Relay).collect() } } - fn active_parachains() -> Vec<(parachain::Id, Option<(parachain::CollatorId, parachain::Retriable)>)> { - Registrar::active_paras() + fn active_parachains() -> Vec<(p_v0::Id, Option<(p_v0::CollatorId, p_v0::Retriable)>)> { + Vec::new() } - fn global_validation_schedule() -> parachain::GlobalValidationSchedule { - Parachains::global_validation_schedule() + fn global_validation_data() -> p_v0::GlobalValidationData { + p_v0::GlobalValidationData { + max_code_size: 1, + max_head_data_size: 1, + block_number: System::block_number().saturating_sub(1), + } } - fn local_validation_data(id: parachain::Id) -> Option { - Parachains::current_local_validation_data(&id) + fn local_validation_data(_id: p_v0::Id) -> Option { + None } - fn parachain_code(id: parachain::Id) -> Option { - Parachains::parachain_code(&id) + fn parachain_code(_id: p_v0::Id) -> Option { + None } - fn get_heads(extrinsics: Vec<::Extrinsic>) - -> Option> + fn get_heads(_extrinsics: Vec<::Extrinsic>) + -> Option> { - extrinsics - .into_iter() - .find_map(|ex| match UncheckedExtrinsic::decode(&mut ex.encode().as_slice()) { - Ok(ex) => match ex.function { - Call::Parachains(ParachainsCall::set_heads(heads)) => { - Some(heads.into_iter().map(|c| c.candidate).collect()) - } - _ => None, - } - Err(_) => None, - }) + None } - fn signing_context() -> SigningContext { - Parachains::signing_context() + fn signing_context() -> p_v0::SigningContext { + p_v0::SigningContext { + parent_hash: System::parent_hash(), + session_index: Session::current_index(), + } + } + fn downward_messages(_id: p_v0::Id) -> Vec { + Vec::new() } } @@ -1093,7 +1095,7 @@ sp_api::impl_runtime_apis! { Grandpa::grandpa_authorities() } - fn submit_report_equivocation_extrinsic( + fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: fg_primitives::EquivocationProof< ::Hash, sp_runtime::traits::NumberFor, @@ -1102,7 +1104,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Grandpa::submit_report_equivocation_extrinsic( + Grandpa::submit_unsigned_equivocation_report( equivocation_proof, key_owner_proof, ) @@ -1140,6 +1142,29 @@ sp_api::impl_runtime_apis! { fn current_epoch_start() -> babe_primitives::SlotNumber { Babe::current_epoch_start() } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + authority_id: babe_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(babe_primitives::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: babe_primitives::EquivocationProof<::Header>, + key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } } impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { @@ -1160,18 +1185,17 @@ sp_api::impl_runtime_apis! { } } - impl system_rpc_runtime_api::AccountNonceApi for Runtime { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } - impl transaction_payment_rpc_runtime_api::TransactionPaymentApi< + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, - UncheckedExtrinsic, > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } } @@ -1179,14 +1203,9 @@ sp_api::impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, + config: frame_benchmarking::BenchmarkConfig ) -> Result, RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. // To get around that, we separated the Session benchmarks into its own crate, which is why // we need these two lines below. @@ -1198,44 +1217,41 @@ sp_api::impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} - let whitelist: Vec> = vec![ + let whitelist: Vec = vec![ // Block Number - // frame_system::Number::::hashed_key().to_vec(), - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); + let params = (&config, &whitelist); // Polkadot - add_benchmark!(params, batches, b"claims", Claims); + add_benchmark!(params, batches, claims, Claims); // Substrate - add_benchmark!(params, batches, b"balances", Balances); - add_benchmark!(params, batches, b"collective", Council); - add_benchmark!(params, batches, b"democracy", Democracy); - add_benchmark!(params, batches, b"elections-phragmen", ElectionsPhragmen); - add_benchmark!(params, batches, b"identity", Identity); - add_benchmark!(params, batches, b"im-online", ImOnline); - add_benchmark!(params, batches, b"offences", OffencesBench::); - add_benchmark!(params, batches, b"scheduler", Scheduler); - add_benchmark!(params, batches, b"session", SessionBench::); - add_benchmark!(params, batches, b"staking", Staking); - add_benchmark!(params, batches, b"system", SystemBench::); - add_benchmark!(params, batches, b"timestamp", Timestamp); - add_benchmark!(params, batches, b"treasury", Treasury); - add_benchmark!(params, batches, b"utility", Utility); - add_benchmark!(params, batches, b"vesting", Vesting); + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_collective, Council); + add_benchmark!(params, batches, pallet_democracy, Democracy); + add_benchmark!(params, batches, pallet_elections_phragmen, ElectionsPhragmen); + add_benchmark!(params, batches, pallet_identity, Identity); + add_benchmark!(params, batches, pallet_im_online, ImOnline); + add_benchmark!(params, batches, pallet_offences, OffencesBench::); + add_benchmark!(params, batches, pallet_scheduler, Scheduler); + add_benchmark!(params, batches, pallet_session, SessionBench::); + add_benchmark!(params, batches, pallet_staking, Staking); + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_treasury, Treasury); + add_benchmark!(params, batches, pallet_utility, Utility); + add_benchmark!(params, batches, pallet_vesting, Vesting); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/runtime/kusama/src/weights/frame_system.rs b/runtime/kusama/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..9522fa75203906ab3c7264154a4b33835375843c --- /dev/null +++ b/runtime/kusama/src/weights/frame_system.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl frame_system::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/runtime/kusama/src/weights/mod.rs b/runtime/kusama/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a54417c2d0de4b280952f03db51edd61c73ac29d --- /dev/null +++ b/runtime/kusama/src/weights/mod.rs @@ -0,0 +1,24 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// A collection of weight modules used for pallets in the runtime. + +pub mod frame_system; +pub mod pallet_balances; +pub mod pallet_democracy; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_proxy; diff --git a/runtime/kusama/src/weights/pallet_balances.rs b/runtime/kusama/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..53431ba48f2f4d878476122b30daaf481ac03487 --- /dev/null +++ b/runtime/kusama/src/weights/pallet_balances.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// Weights for the Balances Pallet + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +pub struct WeightInfo; +impl pallet_balances::WeightInfo for WeightInfo { + fn transfer() -> Weight { + (65949000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer_keep_alive() -> Weight { + (46665000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_creating() -> Weight { + (27086000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_killing() -> Weight { + (33424000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (65343000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/runtime/kusama/src/weights/pallet_democracy.rs b/runtime/kusama/src/weights/pallet_democracy.rs new file mode 100644 index 0000000000000000000000000000000000000000..676281309c3fac02e24e52dc74ba710fc33a4802 --- /dev/null +++ b/runtime/kusama/src/weights/pallet_democracy.rs @@ -0,0 +1,156 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Weights for the Democracy Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_democracy::WeightInfo for WeightInfo { + fn propose() -> Weight { + (49113000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn second(s: u32, ) -> Weight { + (42067000 as Weight) + .saturating_add((220000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vote_new(r: u32, ) -> Weight { + (54159000 as Weight) + .saturating_add((252000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vote_existing(r: u32, ) -> Weight { + (54145000 as Weight) + .saturating_add((262000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn emergency_cancel() -> Weight { + (31071000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn external_propose(v: u32, ) -> Weight { + (14282000 as Weight) + .saturating_add((109000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_majority() -> Weight { + (3478000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_default() -> Weight { + (3442000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn fast_track() -> Weight { + (30820000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn veto_external(v: u32, ) -> Weight { + (30971000 as Weight) + .saturating_add((184000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_referendum() -> Weight { + (20431000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_queued(r: u32, ) -> Weight { + (42438000 as Weight) + .saturating_add((3284000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn on_initialize_base(r: u32, ) -> Weight { + (70826000 as Weight) + .saturating_add((10716000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn delegate(r: u32, ) -> Weight { + (72046000 as Weight) + .saturating_add((7837000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn undelegate(r: u32, ) -> Weight { + (41028000 as Weight) + .saturating_add((7810000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn clear_public_proposals() -> Weight { + (3643000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_preimage(b: u32, ) -> Weight { + (46629000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_imminent_preimage(b: u32, ) -> Weight { + (31147000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn reap_preimage(b: u32, ) -> Weight { + (42848000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unlock_remove(r: u32, ) -> Weight { + (45333000 as Weight) + .saturating_add((171000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn unlock_set(r: u32, ) -> Weight { + (44424000 as Weight) + .saturating_add((291000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn remove_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_other_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/runtime/kusama/src/weights/pallet_proxy.rs b/runtime/kusama/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d8655e6c3b0fa31d618b6b112e75c44eaf64f23 --- /dev/null +++ b/runtime/kusama/src/weights/pallet_proxy.rs @@ -0,0 +1,86 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_proxy::WeightInfo for WeightInfo { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/runtime/kusama/src/weights/pallet_timestamp.rs b/runtime/kusama/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..cfd5f192d35298b512ee75e4d26acf11355ce3ba --- /dev/null +++ b/runtime/kusama/src/weights/pallet_timestamp.rs @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_timestamp::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/runtime/kusama/src/weights/pallet_utility.rs b/runtime/kusama/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ae0d7d2333b19bec65e4f5c1556df65b21e086 --- /dev/null +++ b/runtime/kusama/src/weights/pallet_utility.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_utility::WeightInfo for WeightInfo { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index ab4ca5a783097d74c75db0127e37cfdd8c3e9278..a2aef4d65cb1cd83bcd1593523cb560319bc7c49 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } @@ -21,15 +21,15 @@ sp-session = { git = "https://github.com/paritytech/substrate", branch = "master sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -42,13 +42,15 @@ rand_chacha = { version = "0.2.2", default-features = false } hex-literal = "0.2.1" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -treasury = { package = "pallet-treasury", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master" } serde_json = "1.0.41" libsecp256k1 = "0.3.2" +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + [features] default = ["std"] @@ -67,20 +69,20 @@ std = [ "sp-std/std", "sp-io/std", "frame-support/std", - "authorship/std", - "balances/std", + "pallet-authorship/std", + "pallet-balances/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", - "session/std", - "staking/std", - "system/std", - "timestamp/std", - "vesting/std", + "pallet-session/std", + "pallet-staking/std", + "frame-system/std", + "pallet-timestamp/std", + "pallet-vesting/std", ] runtime-benchmarks = [ "libsecp256k1/hmac", "frame-benchmarking", "frame-support/runtime-benchmarks", - "system/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 01d379fbeebfcd1dfc2acd97ca85d834cbeeb886..9b51404fc06bf69108f30c74dad1f79dce08d124 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -19,16 +19,14 @@ //! Configuration can change only at session boundaries and is buffered until then. use sp_std::prelude::*; -use primitives::{ - parachain::{ValidatorId}, -}; +use primitives::v1::ValidatorId; use frame_support::{ decl_storage, decl_module, decl_error, dispatch::DispatchResult, weights::{DispatchClass, Weight}, }; use codec::{Encode, Decode}; -use system::ensure_root; +use frame_system::ensure_root; /// All configuration of the runtime with respect to parachains and parathreads. #[derive(Clone, Encode, Decode, PartialEq, Default)] @@ -50,7 +48,7 @@ pub struct HostConfiguration { /// The number of retries that a parathread author has to submit their block. pub parathread_retries: u32, /// How often parachain groups should be rotated across parachains. Must be non-zero. - pub parachain_rotation_frequency: BlockNumber, + pub group_rotation_frequency: BlockNumber, /// The availability period, in blocks, for parachains. This is the amount of blocks /// after inclusion that validators have to make the block available and signal its availability to /// the chain. Must be at least 1. @@ -62,7 +60,7 @@ pub struct HostConfiguration { pub scheduling_lookahead: u32, } -pub trait Trait: system::Trait { } +pub trait Trait: frame_system::Trait { } decl_storage! { trait Store for Module as Configuration { @@ -79,7 +77,7 @@ decl_error! { decl_module! { /// The parachains configuration module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; /// Set the validation upgrade frequency. @@ -155,10 +153,10 @@ decl_module! { /// Set the parachain validator-group rotation frequency #[weight = (1_000, DispatchClass::Operational)] - pub fn set_parachain_rotation_frequency(origin, new: T::BlockNumber) -> DispatchResult { + pub fn set_group_rotation_frequency(origin, new: T::BlockNumber) -> DispatchResult { ensure_root(origin)?; Self::update_config_member(|config| { - sp_std::mem::replace(&mut config.parachain_rotation_frequency, new) != new + sp_std::mem::replace(&mut config.group_rotation_frequency, new) != new }); Ok(()) } @@ -266,7 +264,7 @@ mod tests { max_head_data_size: 1_000, parathread_cores: 2, parathread_retries: 5, - parachain_rotation_frequency: 20, + group_rotation_frequency: 20, chain_availability_period: 10, thread_availability_period: 8, scheduling_lookahead: 3, @@ -295,8 +293,8 @@ mod tests { Configuration::set_parathread_retries( Origin::root(), new_config.parathread_retries, ).unwrap(); - Configuration::set_parachain_rotation_frequency( - Origin::root(), new_config.parachain_rotation_frequency, + Configuration::set_group_rotation_frequency( + Origin::root(), new_config.group_rotation_frequency, ).unwrap(); Configuration::set_chain_availability_period( Origin::root(), new_config.chain_availability_period, diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index c31c4a62e6e27686a6638246e75893b3755d7480..a22ca47389b5429712fff18f5a92d989ce025e34 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -21,24 +21,22 @@ //! to included. use sp_std::prelude::*; -use primitives::{ - parachain::{ - ValidatorId, AbridgedCandidateReceipt, ValidatorIndex, Id as ParaId, - AvailabilityBitfield as AvailabilityBitfield, SignedAvailabilityBitfields, SigningContext, - BackedCandidate, - }, +use primitives::v1::{ + ValidatorId, CandidateCommitments, CandidateDescriptor, ValidatorIndex, Id as ParaId, + AvailabilityBitfield as AvailabilityBitfield, SignedAvailabilityBitfields, SigningContext, + BackedCandidate, CoreIndex, GroupIndex, CommittedCandidateReceipt, + CandidateReceipt, HeadData, }; use frame_support::{ - decl_storage, decl_module, decl_error, ensure, dispatch::DispatchResult, IterableStorageMap, - weights::Weight, - traits::Get, + decl_storage, decl_module, decl_error, decl_event, ensure, debug, + dispatch::DispatchResult, IterableStorageMap, weights::Weight, traits::Get, }; use codec::{Encode, Decode}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use sp_staking::SessionIndex; use sp_runtime::{DispatchError, traits::{One, Saturating}}; -use crate::{configuration, paras, scheduler::{CoreIndex, GroupIndex, CoreAssignment}}; +use crate::{configuration, paras, scheduler::CoreAssignment}; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding /// for any backed candidates referred to by a `1` bit available. @@ -53,13 +51,15 @@ pub struct AvailabilityBitfieldRecord { } /// A backed candidate pending availability. +// TODO: split this type and change this to hold a plain `CandidateReceipt`. +// https://github.com/paritytech/polkadot/issues/1357 #[derive(Encode, Decode, PartialEq)] #[cfg_attr(test, derive(Debug))] pub struct CandidatePendingAvailability { /// The availability core this is assigned to. core: CoreIndex, - /// The candidate receipt itself. - receipt: AbridgedCandidateReceipt, + /// The candidate descriptor. + descriptor: CandidateDescriptor, /// The received availability votes. One bit per validator. availability_votes: BitVec, /// The block number of the relay-parent of the receipt. @@ -68,7 +68,28 @@ pub struct CandidatePendingAvailability { backed_in_number: N, } -pub trait Trait: system::Trait + paras::Trait + configuration::Trait { } +impl CandidatePendingAvailability { + /// Get the availability votes on the candidate. + pub(crate) fn availability_votes(&self) -> &BitVec { + &self.availability_votes + } + + /// Get the relay-chain block number this was backed in. + pub(crate) fn backed_in_number(&self) -> &N { + &self.backed_in_number + } + + /// Get the core index. + pub(crate) fn core_occupied(&self)-> CoreIndex { + self.core.clone() + } +} + +pub trait Trait: + frame_system::Trait + paras::Trait + configuration::Trait +{ + type Event: From> + Into<::Event>; +} decl_storage! { trait Store for Module as ParaInclusion { @@ -80,11 +101,15 @@ decl_storage! { PendingAvailability: map hasher(twox_64_concat) ParaId => Option>; + /// The commitments of candidates pending availability, by ParaId. + PendingAvailabilityCommitments: map hasher(twox_64_concat) ParaId + => Option; + /// The current validators, by their parachain session keys. Validators get(fn validators) config(validators): Vec; /// The current session index. - CurrentSessionIndex: SessionIndex; + CurrentSessionIndex get(fn session_index): SessionIndex; } } @@ -120,15 +145,32 @@ decl_error! { InvalidBacking, /// Collator did not sign PoV. NotCollatorSigned, + /// The validation data hash does not match expected. + ValidationDataHashMismatch, /// Internal error only returned when compiled with debug assertions. InternalError, } } +decl_event! { + pub enum Event where ::Hash { + /// A candidate was backed. [candidate, head_data] + CandidateBacked(CandidateReceipt, HeadData), + /// A candidate was included. [candidate, head_data] + CandidateIncluded(CandidateReceipt, HeadData), + /// A candidate timed out. [candidate, head_data] + CandidateTimedOut(CandidateReceipt, HeadData), + } +} + decl_module! { /// The parachain-candidate inclusion module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module + for enum Call where origin: ::Origin + { type Error = Error; + + fn deposit_event() = default; } } @@ -146,6 +188,7 @@ impl Module { ) { // unlike most drain methods, drained elements are not cleared on `Drop` of the iterator // and require consumption. + for _ in ::drain() { } for _ in >::drain() { } for _ in >::drain() { } @@ -186,11 +229,11 @@ impl Module { let mut last_index = None; let signing_context = SigningContext { - parent_hash: >::parent_hash(), + parent_hash: >::parent_hash(), session_index, }; - for signed_bitfield in &signed_bitfields.0 { + for signed_bitfield in &signed_bitfields { ensure!( signed_bitfield.payload().0.len() == n_bits, Error::::WrongBitfieldSize, @@ -213,25 +256,28 @@ impl Module { let validator_public = &validators[signed_bitfield.validator_index() as usize]; - signed_bitfield.check_signature(&signing_context, validator_public).map_err(|_| Error::::InvalidBitfieldSignature)?; + signed_bitfield.check_signature( + &signing_context, + validator_public, + ).map_err(|_| Error::::InvalidBitfieldSignature)?; last_index = Some(signed_bitfield.validator_index()); } } - let now = >::block_number(); - for signed_bitfield in signed_bitfields.0 { + let now = >::block_number(); + for signed_bitfield in signed_bitfields { for (bit_idx, _) in signed_bitfield.payload().0.iter().enumerate().filter(|(_, is_av)| **is_av) { - let record = assigned_paras_record[bit_idx] + let (_, pending_availability) = assigned_paras_record[bit_idx] .as_mut() .expect("validator bitfields checked not to contain bits corresponding to unoccupied cores; qed"); // defensive check - this is constructed by loading the availability bitfield record, // which is always `Some` if the core is occupied - that's why we're here. let val_idx = signed_bitfield.validator_index() as usize; - if let Some(mut bit) = record.1.as_mut() + if let Some(mut bit) = pending_availability.as_mut() .and_then(|r| r.availability_votes.get_mut(val_idx)) { *bit = true; @@ -258,9 +304,25 @@ impl Module { { if pending_availability.availability_votes.count_ones() >= threshold { >::remove(¶_id); + let commitments = match ::take(¶_id) { + Some(commitments) => commitments, + None => { + debug::warn!(r#" + Inclusion::process_bitfields: + PendingAvailability and PendingAvailabilityCommitments + are out of sync, did someone mess with the storage? + "#); + continue; + } + }; + + let receipt = CommittedCandidateReceipt { + descriptor: pending_availability.descriptor, + commitments, + }; Self::enact_candidate( pending_availability.relay_parent_number, - pending_availability.receipt, + receipt, ); freed_cores.push(pending_availability.core); @@ -293,9 +355,9 @@ impl Module { } let validators = Validators::get(); - let parent_hash = >::parent_hash(); + let parent_hash = >::parent_hash(); let config = >::config(); - let now = >::block_number(); + let now = >::block_number(); let relay_parent_number = now - One::one(); // do all checks before writing storage. @@ -331,40 +393,69 @@ impl Module { // list. 'a: for candidate in &candidates { - let para_id = candidate.candidate.parachain_index; + let para_id = candidate.descriptor().para_id; // we require that the candidate is in the context of the parent block. ensure!( - candidate.candidate.relay_parent == parent_hash, + candidate.descriptor().relay_parent == parent_hash, Error::::CandidateNotInParentContext, ); - let code_upgrade_allowed = >::last_code_upgrade(para_id, true) - .map_or( - true, - |last| last <= relay_parent_number && - relay_parent_number.saturating_sub(last) >= config.validation_upgrade_frequency, - ); + // if any, the code upgrade attempt is allowed. + let valid_upgrade_attempt = + candidate.candidate.commitments.new_validation_code.is_none() || + >::last_code_upgrade(para_id, true) + .map_or( + true, + |last| last <= relay_parent_number && + relay_parent_number.saturating_sub(last) + >= config.validation_upgrade_frequency, + ); - ensure!(code_upgrade_allowed, Error::::PrematureCodeUpgrade); ensure!( - candidate.candidate.check_signature().is_ok(), + valid_upgrade_attempt, + Error::::PrematureCodeUpgrade, + ); + ensure!( + candidate.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, ); for (i, assignment) in scheduled[skip..].iter().enumerate() { check_assignment_in_order(assignment)?; - if candidate.candidate.parachain_index == assignment.para_id { + if para_id == assignment.para_id { if let Some(required_collator) = assignment.required_collator() { ensure!( - required_collator == &candidate.candidate.collator, + required_collator == &candidate.descriptor().collator, Error::::WrongCollator, ); } + { + // this should never fail because the para is registered + let persisted_validation_data = + match crate::util::make_persisted_validation_data::(para_id) { + Some(l) => l, + None => { + // We don't want to error out here because it will + // brick the relay-chain. So we return early without + // doing anything. + return Ok(Vec::new()); + } + }; + + let expected = persisted_validation_data.hash(); + + ensure!( + expected == candidate.descriptor().persisted_validation_data_hash, + Error::::ValidationDataHashMismatch, + ); + } + ensure!( - >::get(&assignment.para_id).is_none(), + >::get(¶_id).is_none() && + ::get(¶_id).is_none(), Error::::CandidateScheduledBeforeParaFree, ); @@ -377,7 +468,7 @@ impl Module { // check the signatures in the backing and that it is a majority. { let maybe_amount_validated - = primitives::parachain::check_candidate_backing( + = primitives::v1::check_candidate_backing( &candidate, &signing_context, group_vals.len(), @@ -419,18 +510,30 @@ impl Module { // one more sweep for actually writing to storage. for (candidate, core) in candidates.into_iter().zip(core_indices.iter().cloned()) { - let para_id = candidate.candidate.parachain_index; + let para_id = candidate.descriptor().para_id; // initialize all availability votes to 0. let availability_votes: BitVec = bitvec::bitvec![BitOrderLsb0, u8; 0; validators.len()]; + + Self::deposit_event(Event::::CandidateBacked( + candidate.candidate.to_plain(), + candidate.candidate.commitments.head_data.clone(), + )); + + let (descriptor, commitments) = ( + candidate.candidate.descriptor, + candidate.candidate.commitments, + ); + >::insert(¶_id, CandidatePendingAvailability { core, - receipt: candidate.candidate, + descriptor, availability_votes, relay_parent_number, backed_in_number: now, }); + ::insert(¶_id, commitments); } Ok(core_indices) @@ -438,8 +541,9 @@ impl Module { fn enact_candidate( relay_parent_number: T::BlockNumber, - receipt: AbridgedCandidateReceipt, + receipt: CommittedCandidateReceipt, ) -> Weight { + let plain = receipt.to_plain(); let commitments = receipt.commitments; let config = >::config(); @@ -447,15 +551,19 @@ impl Module { let mut weight = T::DbWeight::get().reads_writes(1, 0); if let Some(new_code) = commitments.new_validation_code { weight += >::schedule_code_upgrade( - receipt.parachain_index, + receipt.descriptor.para_id, new_code, relay_parent_number + config.validation_upgrade_delay, ); } + Self::deposit_event( + Event::::CandidateIncluded(plain, commitments.head_data.clone()) + ); + weight + >::note_new_head( - receipt.parachain_index, - receipt.head_data, + receipt.descriptor.para_id, + commitments.head_data, relay_parent_number, ) } @@ -478,11 +586,66 @@ impl Module { } for para_id in cleaned_up_ids { - >::remove(¶_id); + let pending = >::take(¶_id); + let commitments = ::take(¶_id); + + if let (Some(pending), Some(commitments)) = (pending, commitments) { + // defensive: this should always be true. + let candidate = CandidateReceipt { + descriptor: pending.descriptor, + commitments_hash: commitments.hash(), + }; + + Self::deposit_event(Event::::CandidateTimedOut( + candidate, + commitments.head_data, + )); + } } cleaned_up_cores } + + /// Forcibly enact the candidate with the given ID as though it had been deemed available + /// by bitfields. + /// + /// Is a no-op if there is no candidate pending availability for this para-id. + /// This should generally not be used but it is useful during execution of Runtime APIs, + /// where the changes to the state are expected to be discarded directly after. + pub(crate) fn force_enact(para: ParaId) { + let pending = >::take(¶); + let commitments = ::take(¶); + + if let (Some(pending), Some(commitments)) = (pending, commitments) { + let candidate = CommittedCandidateReceipt { + descriptor: pending.descriptor, + commitments, + }; + + Self::enact_candidate( + pending.relay_parent_number, + candidate, + ); + } + } + + /// Returns the CommittedCandidateReceipt pending availability for the para provided, if any. + pub(crate) fn candidate_pending_availability(para: ParaId) + -> Option> + { + >::get(¶) + .map(|p| p.descriptor) + .and_then(|d| ::get(¶).map(move |c| (d, c))) + .map(|(d, c)| CommittedCandidateReceipt { descriptor: d, commitments: c }) + } + + /// Returns the metadata around the candidate pending availability for the + /// para provided, if any. + pub(crate) fn pending_availability(para: ParaId) + -> Option> + { + >::get(¶) + } } const fn availability_threshold(n_validators: usize) -> usize { @@ -495,10 +658,10 @@ const fn availability_threshold(n_validators: usize) -> usize { mod tests { use super::*; - use primitives::{BlockNumber, Hash}; - use primitives::parachain::{ + use primitives::v1::{BlockNumber, Hash}; + use primitives::v1::{ SignedAvailabilityBitfield, CompactStatement as Statement, ValidityAttestation, CollatorId, - CandidateCommitments, SignedStatement, + CandidateCommitments, SignedStatement, CandidateDescriptor, ValidationCode, }; use frame_support::traits::{OnFinalize, OnInitialize}; use keyring::Sr25519Keyring; @@ -546,22 +709,23 @@ mod tests { fn collator_sign_candidate( collator: Sr25519Keyring, - candidate: &mut AbridgedCandidateReceipt, + candidate: &mut CommittedCandidateReceipt, ) { - candidate.collator = collator.public().into(); + candidate.descriptor.collator = collator.public().into(); - let payload = primitives::parachain::collator_signature_payload( - &candidate.relay_parent, - &candidate.parachain_index, - &candidate.pov_block_hash, + let payload = primitives::v1::collator_signature_payload( + &candidate.descriptor.relay_parent, + &candidate.descriptor.para_id, + &candidate.descriptor.persisted_validation_data_hash, + &candidate.descriptor.pov_hash, ); - candidate.signature = collator.sign(&payload[..]).into(); - assert!(candidate.check_signature().is_ok()); + candidate.descriptor.signature = collator.sign(&payload[..]).into(); + assert!(candidate.descriptor().check_collator_signature().is_ok()); } fn back_candidate( - candidate: AbridgedCandidateReceipt, + candidate: CommittedCandidateReceipt, validators: &[Sr25519Keyring], group: &[ValidatorIndex], signing_context: &SigningContext, @@ -604,7 +768,7 @@ mod tests { BackingKind::Lacking => false, }; - let successfully_backed = primitives::parachain::check_candidate_backing( + let successfully_backed = primitives::v1::check_candidate_backing( &backed, signing_context, group.len(), @@ -675,6 +839,41 @@ mod tests { ) } + #[derive(Default)] + struct TestCandidateBuilder { + para_id: ParaId, + head_data: HeadData, + pov_hash: Hash, + relay_parent: Hash, + persisted_validation_data_hash: Hash, + new_validation_code: Option, + } + + impl TestCandidateBuilder { + fn build(self) -> CommittedCandidateReceipt { + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: self.para_id, + pov_hash: self.pov_hash, + relay_parent: self.relay_parent, + persisted_validation_data_hash: self.persisted_validation_data_hash, + ..Default::default() + }, + commitments: CandidateCommitments { + head_data: self.head_data, + new_validation_code: self.new_validation_code, + ..Default::default() + }, + } + } + } + + fn make_vdata_hash(para_id: ParaId) -> Option { + let persisted_validation_data + = crate::util::make_persisted_validation_data::(para_id)?; + Some(persisted_validation_data.hash()) + } + #[test] fn collect_pending_cleans_up_pending() { let chain_a = ParaId::from(1); @@ -683,31 +882,38 @@ mod tests { let paras = vec![(chain_a, true), (chain_b, true), (thread_a, false)]; new_test_ext(genesis_config(paras)).execute_with(|| { + let default_candidate = TestCandidateBuilder::default().build(); >::insert(chain_a, CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: Default::default(), + descriptor: default_candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, }); + PendingAvailabilityCommitments::insert(chain_a, default_candidate.commitments.clone()); - >::insert(chain_b, CandidatePendingAvailability { + >::insert(&chain_b, CandidatePendingAvailability { core: CoreIndex::from(1), - receipt: Default::default(), + descriptor: default_candidate.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, }); + PendingAvailabilityCommitments::insert(chain_b, default_candidate.commitments); run_to_block(5, |_| None); assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); + assert!(::get(&chain_a).is_some()); + assert!(::get(&chain_b).is_some()); Inclusion::collect_pending(|core, _since| core == CoreIndex::from(0)); assert!(>::get(&chain_a).is_none()); assert!(>::get(&chain_b).is_some()); + assert!(::get(&chain_a).is_none()); + assert!(::get(&chain_b).is_some()); }); } @@ -755,7 +961,7 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed]), + vec![signed], &core_lookup, ).is_err()); } @@ -771,7 +977,7 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed.clone(), signed]), + vec![signed.clone(), signed], &core_lookup, ).is_err()); } @@ -794,7 +1000,7 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed_1, signed_0]), + vec![signed_1, signed_0], &core_lookup, ).is_err()); } @@ -811,7 +1017,7 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed]), + vec![signed], &core_lookup, ).is_err()); } @@ -827,7 +1033,7 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed]), + vec![signed], &core_lookup, ).is_ok()); } @@ -838,13 +1044,15 @@ mod tests { assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); + let default_candidate = TestCandidateBuilder::default().build(); >::insert(chain_a, CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: Default::default(), + descriptor: default_candidate.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, }); + PendingAvailabilityCommitments::insert(chain_a, default_candidate.commitments); *bare_bitfield.0.get_mut(0).unwrap() = true; let signed = sign_bitfield( @@ -855,9 +1063,45 @@ mod tests { ); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(vec![signed]), + vec![signed], &core_lookup, ).is_ok()); + + >::remove(chain_a); + PendingAvailabilityCommitments::remove(chain_a); + } + + // bitfield signed with pending bit signed, but no commitments. + { + let mut bare_bitfield = default_bitfield(); + + assert_eq!(core_lookup(CoreIndex::from(0)), Some(chain_a)); + + let default_candidate = TestCandidateBuilder::default().build(); + >::insert(chain_a, CandidatePendingAvailability { + core: CoreIndex::from(0), + descriptor: default_candidate.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: 0, + backed_in_number: 0, + }); + + *bare_bitfield.0.get_mut(0).unwrap() = true; + let signed = sign_bitfield( + &validators[0], + 0, + bare_bitfield, + &signing_context, + ); + + // no core is freed + assert_eq!( + Inclusion::process_bitfields( + vec![signed], + &core_lookup, + ), + Ok(vec![]), + ); } }); } @@ -894,29 +1138,35 @@ mod tests { _ => panic!("Core out of bounds for 2 parachains and 1 parathread core."), }; + let candidate_a = TestCandidateBuilder { + para_id: chain_a, + head_data: vec![1, 2, 3, 4].into(), + ..Default::default() + }.build(); + >::insert(chain_a, CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: AbridgedCandidateReceipt { - parachain_index: chain_a, - head_data: vec![1, 2, 3, 4].into(), - ..Default::default() - }, + descriptor: candidate_a.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, }); + PendingAvailabilityCommitments::insert(chain_a, candidate_a.commitments); + + let candidate_b = TestCandidateBuilder { + para_id: chain_b, + head_data: vec![5, 6, 7, 8].into(), + ..Default::default() + }.build(); >::insert(chain_b, CandidatePendingAvailability { core: CoreIndex::from(1), - receipt: AbridgedCandidateReceipt { - parachain_index: chain_b, - head_data: vec![5, 6, 7, 8].into(), - ..Default::default() - }, + descriptor: candidate_b.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 0, backed_in_number: 0, }); + PendingAvailabilityCommitments::insert(chain_b, candidate_b.commitments); // this bitfield signals that a and b are available. let a_and_b_available = { @@ -959,13 +1209,15 @@ mod tests { }).collect(); assert!(Inclusion::process_bitfields( - SignedAvailabilityBitfields(signed_bitfields), + signed_bitfields, &core_lookup, ).is_ok()); // chain A had 4 signing off, which is >= threshold. // chain B has 3 signing off, which is < threshold. assert!(>::get(&chain_a).is_none()); + assert!(::get(&chain_a).is_none()); + assert!(::get(&chain_b).is_some()); assert_eq!( >::get(&chain_b).unwrap().availability_votes, { @@ -1023,7 +1275,7 @@ mod tests { let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_b, + para_id: chain_a, kind: AssignmentKind::Parachain, group_idx: GroupIndex::from(0), }; @@ -1037,19 +1289,20 @@ mod tests { let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), - para_id: chain_b, + para_id: thread_a, kind: AssignmentKind::Parathread(thread_collator.clone(), 0), group_idx: GroupIndex::from(2), }; // unscheduled candidate. { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, &mut candidate, @@ -1063,27 +1316,32 @@ mod tests { BackingKind::Threshold, ); - assert!(Inclusion::process_candidates( - vec![backed], - vec![chain_b_assignment.clone()], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_b_assignment.clone()], + &group_validators, + ), + Err(Error::::UnscheduledCandidate.into()), + ); } // candidates out of order. { - let mut candidate_a = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; - let mut candidate_b = AbridgedCandidateReceipt { - parachain_index: chain_b, + }.build(); + let mut candidate_b = TestCandidateBuilder { + para_id: chain_b, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([2; 32]), + pov_hash: Hash::from([2; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, @@ -1111,21 +1369,26 @@ mod tests { BackingKind::Threshold, ); - assert!(Inclusion::process_candidates( - vec![backed_b, backed_a], - vec![chain_a_assignment.clone(), chain_b_assignment.clone()], - &group_validators, - ).is_err()); + // out-of-order manifests as unscheduled. + assert_eq!( + Inclusion::process_candidates( + vec![backed_b, backed_a], + vec![chain_a_assignment.clone(), chain_b_assignment.clone()], + &group_validators, + ), + Err(Error::::UnscheduledCandidate.into()), + ); } // candidate not backed. { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, &mut candidate, @@ -1139,11 +1402,14 @@ mod tests { BackingKind::Lacking, ); - assert!(Inclusion::process_candidates( - vec![backed], - vec![chain_a_assignment.clone()], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::InsufficientBacking.into()), + ); } // candidate not in parent context. @@ -1151,12 +1417,13 @@ mod tests { let wrong_parent_hash = Hash::from([222; 32]); assert!(System::parent_hash() != wrong_parent_hash); - let mut candidate = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate = TestCandidateBuilder { + para_id: chain_a, relay_parent: wrong_parent_hash, - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, &mut candidate, @@ -1170,21 +1437,25 @@ mod tests { BackingKind::Threshold, ); - assert!(Inclusion::process_candidates( - vec![backed], - vec![chain_a_assignment.clone()], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::CandidateNotInParentContext.into()), + ); } // candidate has wrong collator. { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: thread_a, + let mut candidate = TestCandidateBuilder { + para_id: thread_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), ..Default::default() - }; + }.build(); assert!(CollatorId::from(Sr25519Keyring::One.public()) != thread_collator); collator_sign_candidate( @@ -1200,25 +1471,29 @@ mod tests { BackingKind::Threshold, ); - assert!(Inclusion::process_candidates( - vec![backed], - vec![ - chain_a_assignment.clone(), - chain_b_assignment.clone(), - thread_a_assignment.clone(), - ], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![ + chain_a_assignment.clone(), + chain_b_assignment.clone(), + thread_a_assignment.clone(), + ], + &group_validators, + ), + Err(Error::::WrongCollator.into()), + ); } // candidate not well-signed by collator. { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: thread_a, + let mut candidate = TestCandidateBuilder { + para_id: thread_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), ..Default::default() - }; + }.build(); assert_eq!(CollatorId::from(Sr25519Keyring::Two.public()), thread_collator); collator_sign_candidate( @@ -1226,7 +1501,8 @@ mod tests { &mut candidate, ); - candidate.pov_block_hash = Hash::from([2; 32]); + // change the candidate after signing. + candidate.descriptor.pov_hash = Hash::from([2; 32]); let backed = back_candidate( candidate, @@ -1236,21 +1512,25 @@ mod tests { BackingKind::Threshold, ); - assert!(Inclusion::process_candidates( - vec![backed], - vec![thread_a_assignment.clone()], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![thread_a_assignment.clone()], + &group_validators, + ), + Err(Error::::NotCollatorSigned.into()), + ); } // para occupied - reject. { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, @@ -1265,35 +1545,77 @@ mod tests { BackingKind::Threshold, ); + let candidate = TestCandidateBuilder::default().build(); >::insert(&chain_a, CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: Default::default(), + descriptor: candidate.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 3, backed_in_number: 4, }); - - assert!(Inclusion::process_candidates( - vec![backed], - vec![chain_a_assignment.clone()], - &group_validators, - ).is_err()); + ::insert(&chain_a, candidate.commitments); + + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::CandidateScheduledBeforeParaFree.into()), + ); >::remove(&chain_a); + ::remove(&chain_a); + } + + // messed up commitments storage - do not panic - reject. + { + let mut candidate = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + ..Default::default() + }.build(); + + collator_sign_candidate( + Sr25519Keyring::One, + &mut candidate, + ); + + // this is not supposed to happen + ::insert(&chain_a, candidate.commitments.clone()); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &signing_context, + BackingKind::Threshold, + ); + + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::CandidateScheduledBeforeParaFree.into()), + ); + + ::remove(&chain_a); } // interfering code upgrade - reject { - let mut candidate = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), - commitments: CandidateCommitments { - new_validation_code: Some(vec![5, 6, 7, 8].into()), - ..Default::default() - }, + pov_hash: Hash::from([1; 32]), + new_validation_code: Some(vec![5, 6, 7, 8].into()), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, @@ -1316,11 +1638,47 @@ mod tests { assert_eq!(Paras::last_code_upgrade(chain_a, true), Some(10)); - assert!(Inclusion::process_candidates( - vec![backed], - vec![thread_a_assignment.clone()], - &group_validators, - ).is_err()); + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::PrematureCodeUpgrade.into()), + ); + } + + // Bad validation data hash - reject + { + let mut candidate = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: [42u8; 32].into(), + ..Default::default() + }.build(); + + collator_sign_candidate( + Sr25519Keyring::One, + &mut candidate, + ); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &signing_context, + BackingKind::Threshold, + ); + + assert_eq!( + Inclusion::process_candidates( + vec![backed], + vec![chain_a_assignment.clone()], + &group_validators, + ), + Err(Error::::ValidationDataHashMismatch.into()), + ); } }); } @@ -1382,34 +1740,37 @@ mod tests { group_idx: GroupIndex::from(2), }; - let mut candidate_a = AbridgedCandidateReceipt { - parachain_index: chain_a, + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([1; 32]), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, &mut candidate_a, ); - let mut candidate_b = AbridgedCandidateReceipt { - parachain_index: chain_b, + let mut candidate_b = TestCandidateBuilder { + para_id: chain_b, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([2; 32]), + pov_hash: Hash::from([2; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::One, &mut candidate_b, ); - let mut candidate_c = AbridgedCandidateReceipt { - parachain_index: thread_a, + let mut candidate_c = TestCandidateBuilder { + para_id: thread_a, relay_parent: System::parent_hash(), - pov_block_hash: Hash::from([3; 32]), + pov_hash: Hash::from([3; 32]), + persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), ..Default::default() - }; + }.build(); collator_sign_candidate( Sr25519Keyring::Two, &mut candidate_c, @@ -1455,34 +1816,131 @@ mod tests { >::get(&chain_a), Some(CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: candidate_a, + descriptor: candidate_a.descriptor, availability_votes: default_availability_votes(), relay_parent_number: System::block_number() - 1, backed_in_number: System::block_number(), }) ); + assert_eq!( + ::get(&chain_a), + Some(candidate_a.commitments), + ); assert_eq!( >::get(&chain_b), Some(CandidatePendingAvailability { core: CoreIndex::from(1), - receipt: candidate_b, + descriptor: candidate_b.descriptor, availability_votes: default_availability_votes(), relay_parent_number: System::block_number() - 1, backed_in_number: System::block_number(), }) ); + assert_eq!( + ::get(&chain_b), + Some(candidate_b.commitments), + ); assert_eq!( >::get(&thread_a), Some(CandidatePendingAvailability { core: CoreIndex::from(2), - receipt: candidate_c, + descriptor: candidate_c.descriptor, availability_votes: default_availability_votes(), relay_parent_number: System::block_number() - 1, backed_in_number: System::block_number(), }) ); + assert_eq!( + ::get(&thread_a), + Some(candidate_c.commitments), + ); + }); + } + + #[test] + fn can_include_candidate_with_ok_code_upgrade() { + let chain_a = ParaId::from(1); + + let paras = vec![(chain_a, true)]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let validator_public = validator_pubkeys(&validators); + + new_test_ext(genesis_config(paras)).execute_with(|| { + Validators::set(validator_public.clone()); + CurrentSessionIndex::set(5); + + run_to_block(5, |_| None); + + let signing_context = SigningContext { + parent_hash: System::parent_hash(), + session_index: 5, + }; + + let group_validators = |group_index: GroupIndex| match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1, 2, 3, 4]), + _ => panic!("Group index out of bounds for 1 parachain"), + }; + + let chain_a_assignment = CoreAssignment { + core: CoreIndex::from(0), + para_id: chain_a, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex::from(0), + }; + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::from([1; 32]), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + new_validation_code: Some(vec![1, 2, 3].into()), + ..Default::default() + }.build(); + collator_sign_candidate( + Sr25519Keyring::One, + &mut candidate_a, + ); + + let backed_a = back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &signing_context, + BackingKind::Threshold, + ); + + let occupied_cores = Inclusion::process_candidates( + vec![backed_a], + vec![ + chain_a_assignment.clone(), + ], + &group_validators, + ).expect("candidates scheduled, in order, and backed"); + + assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); + + assert_eq!( + >::get(&chain_a), + Some(CandidatePendingAvailability { + core: CoreIndex::from(0), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + }) + ); + assert_eq!( + ::get(&chain_a), + Some(candidate_a.commitments), + ); }); } @@ -1540,21 +1998,24 @@ mod tests { }, ); + let candidate = TestCandidateBuilder::default().build(); >::insert(&chain_a, CandidatePendingAvailability { core: CoreIndex::from(0), - receipt: Default::default(), + descriptor: candidate.descriptor.clone(), availability_votes: default_availability_votes(), relay_parent_number: 5, backed_in_number: 6, }); + ::insert(&chain_a, candidate.commitments.clone()); >::insert(&chain_b, CandidatePendingAvailability { core: CoreIndex::from(1), - receipt: Default::default(), + descriptor: candidate.descriptor, availability_votes: default_availability_votes(), relay_parent_number: 6, backed_in_number: 7, }); + ::insert(&chain_b, candidate.commitments); run_to_block(11, |_| None); @@ -1567,6 +2028,8 @@ mod tests { assert!(>::get(&chain_a).is_some()); assert!(>::get(&chain_b).is_some()); + assert!(::get(&chain_a).is_some()); + assert!(::get(&chain_b).is_some()); run_to_block(12, |n| match n { 12 => Some(SessionChangeNotification { @@ -1589,10 +2052,12 @@ mod tests { assert!(>::get(&chain_a).is_none()); assert!(>::get(&chain_b).is_none()); + assert!(::get(&chain_a).is_none()); + assert!(::get(&chain_b).is_none()); assert!(>::iter().collect::>().is_empty()); assert!(>::iter().collect::>().is_empty()); - + assert!(::iter().collect::>().is_empty()); }); } } diff --git a/runtime/parachains/src/inclusion_inherent.rs b/runtime/parachains/src/inclusion_inherent.rs index 46fe1fee469e185b871f3ba652dab0e33ee2e0b8..f9a7465d91282d60274c53d121af203df8aaaa2e 100644 --- a/runtime/parachains/src/inclusion_inherent.rs +++ b/runtime/parachains/src/inclusion_inherent.rs @@ -22,19 +22,23 @@ //! this module. use sp_std::prelude::*; -use primitives::{ - parachain::{BackedCandidate, SignedAvailabilityBitfields}, +use primitives::v1::{ + BackedCandidate, SignedAvailabilityBitfields, INCLUSION_INHERENT_IDENTIFIER, }; use frame_support::{ - decl_storage, decl_module, decl_error, ensure, + decl_error, decl_module, decl_storage, ensure, dispatch::DispatchResult, weights::{DispatchClass, Weight}, traits::Get, }; -use system::ensure_none; -use crate::{inclusion, scheduler::{self, FreedReason}}; +use frame_system::ensure_none; +use crate::{ + inclusion, + scheduler::{self, FreedReason}, +}; +use inherents::{InherentIdentifier, InherentData, MakeFatalError, ProvideInherent}; -pub trait Trait: inclusion::Trait + scheduler::Trait { } +pub trait Trait: inclusion::Trait + scheduler::Trait {} decl_storage! { trait Store for Module as ParaInclusionInherent { @@ -57,7 +61,7 @@ decl_error! { decl_module! { /// The inclusion inherent module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn on_initialize() -> Weight { @@ -118,3 +122,23 @@ decl_module! { } } } + +impl ProvideInherent for Module { + type Call = Call; + type Error = MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INCLUSION_INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + data.get_data(&Self::INHERENT_IDENTIFIER) + .expect("inclusion inherent data failed to decode") + .map(|(signed_bitfields, backed_candidates): (SignedAvailabilityBitfields, Vec>)| { + // Sanity check: session changes can invalidate an inherent, and we _really_ don't want that to happen. + // See github.com/paritytech/polkadot/issues/1327 + if Self::inclusion(frame_system::RawOrigin::None.into(), signed_bitfields.clone(), backed_candidates.clone()).is_ok() { + Call::inclusion(signed_bitfields, backed_candidates) + } else { + Call::inclusion(Vec::new().into(), Vec::new()) + } + }) + } +} diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 5668a216f061f6d1113be535e064a751e02bddb9..11481e7ffdf02696b549d85ee209b14a7f5f216e 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -21,12 +21,12 @@ use sp_std::prelude::*; use frame_support::weights::Weight; -use primitives::{ - parachain::{ValidatorId}, -}; +use primitives::v1::ValidatorId; use frame_support::{ decl_storage, decl_module, decl_error, traits::Randomness, }; +use sp_runtime::traits::One; +use codec::{Encode, Decode}; use crate::{configuration::{self, HostConfiguration}, paras, scheduler, inclusion}; /// Information about a session change that has just occurred. @@ -46,8 +46,16 @@ pub struct SessionChangeNotification { pub session_index: sp_staking::SessionIndex, } +#[derive(Encode, Decode)] +struct BufferedSessionChange { + apply_at: N, + validators: Vec, + queued: Vec, + session_index: sp_staking::SessionIndex, +} + pub trait Trait: - system::Trait + configuration::Trait + paras::Trait + scheduler::Trait + inclusion::Trait + frame_system::Trait + configuration::Trait + paras::Trait + scheduler::Trait + inclusion::Trait { /// A randomness beacon. type Randomness: Randomness; @@ -64,6 +72,14 @@ decl_storage! { /// them writes to the trie and one does not. This confusion makes `Option<()>` more suitable for /// the semantics of this variable. HasInitialized: Option<()>; + /// Buffered session changes along with the block number at which they should be applied. + /// + /// Typically this will be empty or one element long, with the single element having a block + /// number of the next block. + /// + /// However this is a `Vec` regardless to handle various edge cases that may occur at runtime + /// upgrade boundaries or if governance intervenes. + BufferedSessionChanges: Vec>; } } @@ -73,10 +89,25 @@ decl_error! { decl_module! { /// The initializer module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn on_initialize(now: T::BlockNumber) -> Weight { + // Apply buffered session changes before initializing modules, so they + // can be initialized with respect to the current validator set. + >::mutate(|v| { + let drain_up_to = v.iter().take_while(|b| b.apply_at <= now).count(); + + // apply only the last session as all others lasted less than a block (weirdly). + if let Some(buffered) = v.drain(..drain_up_to).last() { + Self::apply_new_session( + buffered.session_index, + buffered.validators, + buffered.queued, + ); + } + }); + // The other modules are initialized in this order: // - Configuration // - Paras @@ -106,27 +137,11 @@ decl_module! { } impl Module { - /// Should be called when a new session occurs. Forwards the session notification to all - /// wrapped modules. If `queued` is `None`, the `validators` are considered queued. - /// - /// Panics if the modules have already been initialized. - fn on_new_session<'a, I: 'a>( - _changed: bool, + fn apply_new_session( session_index: sp_staking::SessionIndex, - validators: I, - queued: Option, - ) - where I: Iterator - { - assert!(HasInitialized::get().is_none()); - - let validators: Vec<_> = validators.map(|(_, v)| v).collect(); - let queued: Vec<_> = if let Some(queued) = queued { - queued.map(|(_, v)| v).collect() - } else { - validators.clone() - }; - + validators: Vec, + queued: Vec, + ) { let prev_config = >::config(); let random_seed = { @@ -156,13 +171,38 @@ impl Module { scheduler::Module::::initializer_on_new_session(¬ification); inclusion::Module::::initializer_on_new_session(¬ification); } + + /// Should be called when a new session occurs. Buffers the session notification to be applied + /// at the next block. If `queued` is `None`, the `validators` are considered queued. + fn on_new_session<'a, I: 'a>( + _changed: bool, + session_index: sp_staking::SessionIndex, + validators: I, + queued: Option, + ) + where I: Iterator + { + let validators: Vec<_> = validators.map(|(_, v)| v).collect(); + let queued: Vec<_> = if let Some(queued) = queued { + queued.map(|(_, v)| v).collect() + } else { + validators.clone() + }; + + >::mutate(|v| v.push(BufferedSessionChange { + apply_at: >::block_number() + One::one(), + validators, + queued, + session_index, + })); + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = ValidatorId; } -impl session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = ValidatorId; fn on_genesis_session<'a, I: 'a>(_validators: I) @@ -174,7 +214,7 @@ impl session::OneSessionHandler for Mod fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued: I) where I: Iterator { - let session_index = >::current_index(); + let session_index = >::current_index(); >::on_new_session(changed, session_index, validators, Some(queued)); } @@ -184,21 +224,47 @@ impl session::OneSessionHandler for Mod #[cfg(test)] mod tests { use super::*; - use crate::mock::{new_test_ext, Initializer}; + use crate::mock::{new_test_ext, Initializer, Test, System}; use frame_support::traits::{OnFinalize, OnInitialize}; #[test] - #[should_panic] - fn panics_if_session_changes_after_on_initialize() { + fn session_change_before_initialize_is_still_buffered_after() { + new_test_ext(Default::default()).execute_with(|| { + Initializer::on_new_session( + false, + 1, + Vec::new().into_iter(), + Some(Vec::new().into_iter()), + ); + + let now = System::block_number(); + Initializer::on_initialize(now); + + let v = >::get(); + assert_eq!(v.len(), 1); + + let apply_at = now + 1; + assert_eq!(v[0].apply_at, apply_at); + }); + } + + #[test] + fn session_change_applied_on_initialize() { new_test_ext(Default::default()).execute_with(|| { Initializer::on_initialize(1); + + let now = System::block_number(); Initializer::on_new_session( false, 1, Vec::new().into_iter(), Some(Vec::new().into_iter()), ); + + Initializer::on_initialize(now + 1); + + assert!(>::get().is_empty()); }); } diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 44554322e4a4177f87b0ce208190ac699a87e8d4..b707c3e39eaf4ff1b6b7b8a83dd86f8a3249f1c0 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -20,13 +20,44 @@ //! particular the `Initializer` module, as it is responsible for initializing the state //! of the other modules. -mod configuration; -mod inclusion; -mod inclusion_inherent; -mod initializer; -mod paras; -mod scheduler; -mod validity; + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::result; +use sp_runtime::traits::BadOrigin; +use primitives::v1::Id as ParaId; +use codec::{Decode, Encode}; + +pub mod configuration; +pub mod inclusion; +pub mod inclusion_inherent; +pub mod initializer; +pub mod paras; +pub mod scheduler; +pub mod validity; + +pub mod runtime_api_impl; + +mod util; #[cfg(test)] mod mock; + +/// Origin for the parachains. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum Origin { + /// It comes from a parachain. + Parachain(ParaId), +} + +/// Ensure that the origin `o` represents a parachain. +/// Returns `Ok` with the parachain ID that effected the extrinsic or an `Err` otherwise. +pub fn ensure_parachain(o: OuterOrigin) -> result::Result + where OuterOrigin: Into> +{ + match o.into() { + Ok(Origin::Parachain(id)) => Ok(id), + _ => Err(BadOrigin), + } +} diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index fd0d8e75e20ddc87eddde55c98935fedc8729cf3..7001b1c1df9c09e4ce384f7b48b2dcfd808657b0 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -24,14 +24,12 @@ use sp_runtime::{ BlakeTwo256, IdentityLookup, }, }; -use primitives::{ - BlockNumber, - Header, -}; +use primitives::v1::{BlockNumber, Header}; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, parameter_types, + impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, weights::Weight, traits::Randomness as RandomnessT, }; +use crate::inclusion; /// A test runtime struct. #[derive(Clone, Eq, PartialEq)] @@ -47,6 +45,13 @@ impl_outer_dispatch! { } } +impl_outer_event! { + pub enum TestEvent for Test { + frame_system, + inclusion, + } +} + pub struct TestRandomness; impl RandomnessT for TestRandomness { @@ -62,7 +67,7 @@ parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } -impl system::Trait for Test { +impl frame_system::Trait for Test { type BaseCallFilter = (); type Origin = Origin; type Call = Call; @@ -73,7 +78,7 @@ impl system::Trait for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = TestEvent; type BlockHashCount = BlockHashCount; type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); @@ -84,9 +89,10 @@ impl system::Trait for Test { type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type ModuleToIndex = (); - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } impl crate::initializer::Trait for Test { @@ -99,9 +105,11 @@ impl crate::paras::Trait for Test { } impl crate::scheduler::Trait for Test { } -impl crate::inclusion::Trait for Test { } +impl crate::inclusion::Trait for Test { + type Event = TestEvent; +} -pub type System = system::Module; +pub type System = frame_system::Module; /// Mocked initializer. pub type Initializer = crate::initializer::Module; @@ -129,7 +137,7 @@ pub fn new_test_ext(state: GenesisConfig) -> TestExternalities { #[derive(Default)] pub struct GenesisConfig { - pub system: system::GenesisConfig, + pub system: frame_system::GenesisConfig, pub configuration: crate::configuration::GenesisConfig, pub paras: crate::paras::GenesisConfig, } diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index 3bd55e277803b284fe25df1933a5e61b987b6eb5..f2a64de6c3d0c877535d3f2dd36922859ebb5f57 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -24,11 +24,12 @@ //! only occur at session boundaries. use sp_std::prelude::*; +#[cfg(feature = "std")] use sp_std::marker::PhantomData; -use sp_runtime::traits::One; -use primitives::{ - parachain::{Id as ParaId, ValidationCode, HeadData}, +use primitives::v1::{ + Id as ParaId, ValidationCode, HeadData, }; +use sp_runtime::traits::One; use frame_support::{ decl_storage, decl_module, decl_error, traits::Get, @@ -36,11 +37,12 @@ use frame_support::{ }; use codec::{Encode, Decode}; use crate::{configuration, initializer::SessionChangeNotification}; +use sp_core::RuntimeDebug; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -pub trait Trait: system::Trait + configuration::Trait { } +pub trait Trait: frame_system::Trait + configuration::Trait { } // the two key times necessary to track for every code replacement. #[derive(Default, Encode, Decode)] @@ -155,7 +157,7 @@ impl ParaPastCodeMeta { } /// Arguments for initializing a para. -#[derive(Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct ParaGenesisArgs { /// The initial head data to use. @@ -223,7 +225,7 @@ fn build(config: &GenesisConfig) { .cloned() .collect(); - parachains.sort_unstable(); + parachains.sort(); parachains.dedup(); Parachains::put(¶chains); @@ -240,7 +242,7 @@ decl_error! { decl_module! { /// The parachains configuration module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; } } @@ -256,7 +258,7 @@ impl Module { /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session(_notification: &SessionChangeNotification) { - let now = >::block_number(); + let now = >::block_number(); let mut parachains = Self::clean_up_outgoing(now); Self::apply_incoming(&mut parachains); ::Parachains::set(parachains); @@ -387,8 +389,7 @@ impl Module { } /// Schedule a para to be initialized at the start of the next session. - #[allow(unused)] - pub(crate) fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { + pub fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { let dup = UpcomingParas::mutate(|v| { match v.binary_search(&id) { Ok(_) => true, @@ -410,9 +411,20 @@ impl Module { } /// Schedule a para to be cleaned up at the start of the next session. - #[allow(unused)] - pub(crate) fn schedule_para_cleanup(id: ParaId) -> Weight { - OutgoingParas::mutate(|v| { + pub fn schedule_para_cleanup(id: ParaId) -> Weight { + let upcoming_weight = UpcomingParas::mutate(|v| { + match v.binary_search(&id) { + Ok(i) => { + v.remove(i); + UpcomingParasGenesis::remove(id); + // If a para was only in the pending state it should not be moved to `Outgoing` + return T::DbWeight::get().reads_writes(2, 2); + } + Err(_) => T::DbWeight::get().reads_writes(1, 0), + } + }); + + let outgoing_weight = OutgoingParas::mutate(|v| { match v.binary_search(&id) { Ok(_) => T::DbWeight::get().reads_writes(1, 0), Err(i) => { @@ -420,7 +432,9 @@ impl Module { T::DbWeight::get().reads_writes(1, 1) } } - }) + }); + + outgoing_weight + upcoming_weight } /// Schedule a future code upgrade of the given parachain, to be applied after inclusion @@ -464,7 +478,7 @@ impl Module { CurrentCode::insert(&id, &new_code); // `now` is only used for registering pruning as part of `fn note_past_code` - let now = >::block_number(); + let now = >::block_number(); let weight = Self::note_past_code( id, @@ -496,7 +510,7 @@ impl Module { at: T::BlockNumber, assume_intermediate: Option, ) -> Option { - let now = >::block_number(); + let now = >::block_number(); let config = >::config(); if assume_intermediate.as_ref().map_or(false, |i| &at <= i) { @@ -541,7 +555,7 @@ impl Module { #[cfg(test)] mod tests { use super::*; - use primitives::BlockNumber; + use primitives::v1::BlockNumber; use frame_support::traits::{OnFinalize, OnInitialize}; use crate::mock::{new_test_ext, Paras, System, GenesisConfig as MockGenesisConfig}; @@ -1119,6 +1133,70 @@ mod tests { }) } + #[test] + fn para_cleanup_removes_upcoming() { + new_test_ext(Default::default()).execute_with(|| { + run_to_block(1, None); + + let b = ParaId::from(525); + let a = ParaId::from(999); + let c = ParaId::from(333); + + Paras::schedule_para_initialize( + b, + ParaGenesisArgs { + parachain: true, + genesis_head: vec![1].into(), + validation_code: vec![1].into(), + }, + ); + + Paras::schedule_para_initialize( + a, + ParaGenesisArgs { + parachain: false, + genesis_head: vec![2].into(), + validation_code: vec![2].into(), + }, + ); + + Paras::schedule_para_initialize( + c, + ParaGenesisArgs { + parachain: true, + genesis_head: vec![3].into(), + validation_code: vec![3].into(), + }, + ); + + assert_eq!(::UpcomingParas::get(), vec![c, b, a]); + assert!(::Parathreads::get(&a).is_none()); + + + // run to block without session change. + run_to_block(2, None); + + assert_eq!(Paras::parachains(), Vec::new()); + assert_eq!(::UpcomingParas::get(), vec![c, b, a]); + assert!(::Parathreads::get(&a).is_none()); + + Paras::schedule_para_cleanup(c); + + run_to_block(3, Some(vec![3])); + + assert_eq!(Paras::parachains(), vec![b]); + assert_eq!(::OutgoingParas::get(), vec![]); + assert_eq!(::UpcomingParas::get(), Vec::new()); + assert!(::UpcomingParasGenesis::get(a).is_none()); + + assert!(::Parathreads::get(&a).is_some()); + + assert_eq!(Paras::current_code(&a), Some(vec![2].into())); + assert_eq!(Paras::current_code(&b), Some(vec![1].into())); + assert!(Paras::current_code(&c).is_none()); + }); + } + #[test] fn code_at_with_intermediate() { let acceptance_period = 10; diff --git a/runtime/parachains/src/runtime_api_impl/mod.rs b/runtime/parachains/src/runtime_api_impl/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..35c26fe35def8fd5bccf8f62b0d61a13d70bc696 --- /dev/null +++ b/runtime/parachains/src/runtime_api_impl/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API implementations for Parachains. +//! +//! These are exposed as different modules using different sets of primitives. +//! At the moment there is only a v1 module and it is not completely clear how migration +//! to a v2 would be done. + +pub mod v1; diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs new file mode 100644 index 0000000000000000000000000000000000000000..716d3eed9cb1d0036a9921c2c24bfc3d759ac4d9 --- /dev/null +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -0,0 +1,268 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! A module exporting runtime API implementation functions for all runtime APIs using v1 +//! primitives. +//! +//! Runtimes implementing the v1 runtime API are recommended to forward directly to these +//! functions. + +use sp_std::prelude::*; +use primitives::v1::{ + ValidatorId, ValidatorIndex, GroupRotationInfo, CoreState, ValidationData, + Id as ParaId, OccupiedCoreAssumption, SessionIndex, ValidationCode, + CommittedCandidateReceipt, ScheduledCore, OccupiedCore, CoreOccupied, CoreIndex, + GroupIndex, CandidateEvent, PersistedValidationData, +}; +use sp_runtime::traits::Zero; +use frame_support::debug; +use crate::{initializer, inclusion, scheduler, configuration, paras}; + +/// Implementation for the `validators` function of the runtime API. +pub fn validators() -> Vec { + >::validators() +} + +/// Implementation for the `validator_groups` function of the runtime API. +pub fn validator_groups() -> ( + Vec>, + GroupRotationInfo, +) { + let groups = >::validator_groups(); + let rotation_info = >::group_rotation_info(); + + (groups, rotation_info) +} + +/// Implementation for the `availability_cores` function of the runtime API. +pub fn availability_cores() -> Vec> { + let cores = >::availability_cores(); + let parachains = >::parachains(); + let config = >::config(); + + let rotation_info = >::group_rotation_info(); + + let time_out_at = |backed_in_number, availability_period| { + let time_out_at = backed_in_number + availability_period; + + if rotation_info.group_rotation_frequency == Zero::zero() { + return time_out_at; + } + + let current_window = rotation_info.last_rotation_at() + availability_period; + let next_rotation = rotation_info.next_rotation_at(); + + // If we are within `period` blocks of rotation, timeouts are being checked + // actively. We could even time out this block. + if time_out_at < current_window { + time_out_at + } else if time_out_at <= next_rotation { + // Otherwise, it will time out at the sooner of the next rotation + next_rotation + } else { + // or the scheduled time-out. This is by definition within `period` blocks + // of `next_rotation` and is thus a valid timeout block. + time_out_at + } + }; + + let group_responsible_for = |backed_in_number, core_index| { + match >::group_assigned_to_core(core_index, backed_in_number) { + Some(g) => g, + None => { + debug::warn!("Could not determine the group responsible for core extracted \ + from list of cores for some prior block in same session"); + + GroupIndex(0) + } + } + }; + + let mut core_states: Vec<_> = cores.into_iter().enumerate().map(|(i, core)| match core { + Some(occupied) => { + CoreState::Occupied(match occupied { + CoreOccupied::Parachain => { + let para_id = parachains[i]; + let pending_availability = > + ::pending_availability(para_id) + .expect("Occupied core always has pending availability; qed"); + + let backed_in_number = pending_availability.backed_in_number().clone(); + OccupiedCore { + para_id, + next_up_on_available: >::next_up_on_available( + CoreIndex(i as u32) + ), + occupied_since: backed_in_number, + time_out_at: time_out_at( + backed_in_number, + config.chain_availability_period, + ), + next_up_on_time_out: >::next_up_on_time_out( + CoreIndex(i as u32) + ), + availability: pending_availability.availability_votes().clone(), + group_responsible: group_responsible_for( + backed_in_number, + pending_availability.core_occupied(), + ), + } + } + CoreOccupied::Parathread(p) => { + let para_id = p.claim.0; + let pending_availability = > + ::pending_availability(para_id) + .expect("Occupied core always has pending availability; qed"); + + let backed_in_number = pending_availability.backed_in_number().clone(); + OccupiedCore { + para_id, + next_up_on_available: >::next_up_on_available( + CoreIndex(i as u32) + ), + occupied_since: backed_in_number, + time_out_at: time_out_at( + backed_in_number, + config.thread_availability_period, + ), + next_up_on_time_out: >::next_up_on_time_out( + CoreIndex(i as u32) + ), + availability: pending_availability.availability_votes().clone(), + group_responsible: group_responsible_for( + backed_in_number, + pending_availability.core_occupied(), + ), + } + } + }) + } + None => CoreState::Free, + }).collect(); + + // This will overwrite only `Free` cores if the scheduler module is working as intended. + for scheduled in >::scheduled() { + core_states[scheduled.core.0 as usize] = CoreState::Scheduled(ScheduledCore { + para_id: scheduled.para_id, + collator: scheduled.required_collator().map(|c| c.clone()), + }); + } + + core_states +} + +fn with_assumption( + para_id: ParaId, + assumption: OccupiedCoreAssumption, + build: F, +) -> Option where + Trait: inclusion::Trait, + F: FnOnce() -> Option, +{ + match assumption { + OccupiedCoreAssumption::Included => { + >::force_enact(para_id); + build() + } + OccupiedCoreAssumption::TimedOut => { + build() + } + OccupiedCoreAssumption::Free => { + if >::pending_availability(para_id).is_some() { + None + } else { + build() + } + } + } +} + +/// Implementation for the `full_validation_data` function of the runtime API. +pub fn full_validation_data( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) + -> Option> +{ + with_assumption::( + para_id, + assumption, + || Some(ValidationData { + persisted: crate::util::make_persisted_validation_data::(para_id)?, + transient: crate::util::make_transient_validation_data::(para_id)?, + }), + ) +} + +/// Implementation for the `persisted_validation_data` function of the runtime API. +pub fn persisted_validation_data( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) -> Option> { + with_assumption::( + para_id, + assumption, + || crate::util::make_persisted_validation_data::(para_id), + ) +} + +/// Implementation for the `session_index_for_child` function of the runtime API. +pub fn session_index_for_child() -> SessionIndex { + // Just returns the session index from `inclusion`. Runtime APIs follow + // initialization so the initializer will have applied any pending session change + // which is expected at the child of the block whose context the runtime API was invoked + // in. + // + // Incidentally, this is also the rationale for why it is OK to query validators or + // occupied cores or etc. and expect the correct response "for child". + >::session_index() +} + +/// Implementation for the `validation_code` function of the runtime API. +pub fn validation_code( + para_id: ParaId, + assumption: OccupiedCoreAssumption, +) -> Option { + with_assumption::( + para_id, + assumption, + || >::current_code(¶_id), + ) +} + +/// Implementation for the `candidate_pending_availability` function of the runtime API. +pub fn candidate_pending_availability(para_id: ParaId) + -> Option> +{ + >::candidate_pending_availability(para_id) +} + +/// Implementation for the `candidate_events` function of the runtime API. +// NOTE: this runs without block initialization, as it accesses events. +// this means it can run in a different session than other runtime APIs at the same block. +pub fn candidate_events(extract_event: F) -> Vec> +where + T: initializer::Trait, + F: Fn(::Event) -> Option>, +{ + use inclusion::Event as RawEvent; + + >::events().into_iter() + .filter_map(|record| extract_event(record.event)) + .map(|event| match event { + RawEvent::::CandidateBacked(c, h) => CandidateEvent::CandidateBacked(c, h), + RawEvent::::CandidateIncluded(c, h) => CandidateEvent::CandidateIncluded(c, h), + RawEvent::::CandidateTimedOut(c, h) => CandidateEvent::CandidateTimedOut(c, h), + }) + .collect() +} diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 6539915946c845b5e328ad54c781058a912a763e..455a07f94e7493b890e83042f3a77e9610472a08 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -37,8 +37,9 @@ use sp_std::prelude::*; use sp_std::convert::TryInto; -use primitives::{ - parachain::{Id as ParaId, CollatorId, ValidatorIndex}, +use primitives::v1::{ + Id as ParaId, ValidatorIndex, CoreOccupied, CoreIndex, CollatorId, + GroupIndex, ParathreadClaim, ParathreadEntry, GroupRotationInfo, ScheduledCore, }; use frame_support::{ decl_storage, decl_module, decl_error, @@ -52,41 +53,6 @@ use rand_chacha::ChaCha20Rng; use crate::{configuration, paras, initializer::SessionChangeNotification}; -/// The unique (during session) index of a core. -#[derive(Encode, Decode, Default, PartialOrd, Ord, Eq, PartialEq, Clone, Copy)] -#[cfg_attr(test, derive(Debug))] -pub struct CoreIndex(u32); - -impl From for CoreIndex { - fn from(i: u32) -> CoreIndex { - CoreIndex(i) - } -} - -/// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct GroupIndex(u32); - -impl From for GroupIndex { - fn from(i: u32) -> GroupIndex { - GroupIndex(i) - } -} - -/// A claim on authoring the next block for a given parathread. -#[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct ParathreadClaim(pub ParaId, pub CollatorId); - -/// An entry tracking a claim to ensure it does not pass the maximum number of retries. -#[derive(Clone, Encode, Decode, Default)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub struct ParathreadEntry { - claim: ParathreadClaim, - retries: u32, -} - /// A queued parathread entry, pre-assigned to a core. #[derive(Encode, Decode, Default)] #[cfg_attr(test, derive(PartialEq, Debug))] @@ -118,32 +84,41 @@ impl ParathreadClaimQueue { }) } - // Take next queued entry with given core offset, if any. + /// Take next queued entry with given core offset, if any. fn take_next_on_core(&mut self, core_offset: u32) -> Option { let pos = self.queue.iter().position(|queued| queued.core_offset == core_offset); pos.map(|i| self.queue.remove(i).claim) } + + /// Get the next queued entry with given core offset, if any. + fn get_next_on_core(&self, core_offset: u32) -> Option<&ParathreadEntry> { + let pos = self.queue.iter().position(|queued| queued.core_offset == core_offset); + pos.map(|i| &self.queue[i].claim) + } } -/// What is occupying a specific availability core. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] -pub(crate) enum CoreOccupied { - Parathread(ParathreadEntry), - Parachain, +/// Reasons a core might be freed +pub enum FreedReason { + /// The core's work concluded and the parablock assigned to it is considered available. + Concluded, + /// The core's work timed out. + TimedOut, } + /// The assignment type. #[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub enum AssignmentKind { + /// A parachain. Parachain, + /// A parathread. Parathread(CollatorId, u32), } /// How a free core is scheduled to be assigned. #[derive(Clone, Encode, Decode)] -#[cfg_attr(test, derive(PartialEq, Debug))] +#[cfg_attr(feature = "std", derive(PartialEq, Debug))] pub struct CoreAssignment { /// The core that is assigned. pub core: CoreIndex, @@ -157,14 +132,15 @@ pub struct CoreAssignment { impl CoreAssignment { /// Get the ID of a collator who is required to collate this block. - pub(crate) fn required_collator(&self) -> Option<&CollatorId> { + pub fn required_collator(&self) -> Option<&CollatorId> { match self.kind { AssignmentKind::Parachain => None, AssignmentKind::Parathread(ref id, _) => Some(id), } } - fn to_core_occupied(&self) -> CoreOccupied { + /// Get the `CoreOccupied` from this. + pub fn to_core_occupied(&self) -> CoreOccupied { match self.kind { AssignmentKind::Parachain => CoreOccupied::Parachain, AssignmentKind::Parathread(ref collator, retries) => CoreOccupied::Parathread( @@ -177,15 +153,7 @@ impl CoreAssignment { } } -/// Reasons a core might be freed -pub enum FreedReason { - /// The core's work concluded and the parablock assigned to it is considered available. - Concluded, - /// The core's work timed out. - TimedOut, -} - -pub trait Trait: system::Trait + configuration::Trait + paras::Trait { } +pub trait Trait: frame_system::Trait + configuration::Trait + paras::Trait { } decl_storage! { trait Store for Module as ParaScheduler { @@ -193,7 +161,7 @@ decl_storage! { /// /// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers. /// Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k. - ValidatorGroups: Vec>; + ValidatorGroups get(fn validator_groups): Vec>; /// A queue of upcoming claims and which core they should be mapped onto. /// @@ -206,14 +174,14 @@ decl_storage! { /// parathread-multiplexers. /// /// Bounded by the number of cores: one for each parachain and parathread multiplexer. - AvailabilityCores: Vec>; + AvailabilityCores get(fn availability_cores): Vec>; /// An index used to ensure that only one claim on a parathread exists in the queue or is /// currently being handled by an occupied core. /// /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500. ParathreadClaimIndex: Vec; /// The block number where the session start occurred. Used to track how many group rotations have occurred. - SessionStartBlock: T::BlockNumber; + SessionStartBlock get(fn session_start_block): T::BlockNumber; /// Currently scheduled cores - free but up to be occupied. Ephemeral storage item that's wiped on finalization. /// /// Bounded by the number of cores: one for each parachain and parathread multiplexer. @@ -227,7 +195,7 @@ decl_error! { decl_module! { /// The scheduler module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; } } @@ -275,7 +243,7 @@ impl Module { let n_parachains = >::parachains().len() as u32; let n_cores = n_parachains + config.parathread_cores; - >::set(>::block_number()); + >::set(>::block_number()); AvailabilityCores::mutate(|cores| { // clear all occupied cores. for maybe_occupied in cores.iter_mut() { @@ -435,7 +403,7 @@ impl Module { let parachains = >::parachains(); let mut scheduled = Scheduled::get(); let mut parathread_queue = ParathreadQueue::get(); - let now = >::block_number(); + let now = >::block_number(); if ValidatorGroups::get().is_empty() { return } @@ -590,7 +558,7 @@ impl Module { if at < session_start_block { return None } - if config.parachain_rotation_frequency.is_zero() { + if config.group_rotation_frequency.is_zero() { // interpret this as "no rotations" return Some(GroupIndex(core.0)); } @@ -600,7 +568,7 @@ impl Module { if core.0 as usize >= validator_groups.len() { return None } let rotations_since_session_start: T::BlockNumber = - (at - session_start_block) / config.parachain_rotation_frequency.into(); + (at - session_start_block) / config.group_rotation_frequency.into(); let rotations_since_session_start = match >::try_into(rotations_since_session_start) @@ -619,18 +587,26 @@ impl Module { /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the /// block number since which it has been occupied, and the respective parachain and parathread /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` - /// of the last rotation would this return `Some`. + /// of the last rotation would this return `Some`, unless there are no rotations. + /// + /// If there are no rotations (config.group_rotation_frequency == 0), + /// availability timeouts can occur at any block. /// /// This really should not be a box, but is working around a compiler limitation filed here: /// https://github.com/rust-lang/rust/issues/73226 /// which prevents us from testing the code if using `impl Trait`. pub(crate) fn availability_timeout_predicate() -> Option bool>> { - let now = >::block_number(); + let now = >::block_number(); let config = >::config(); let session_start = >::get(); let blocks_since_session_start = now.saturating_sub(session_start); - let blocks_since_last_rotation = blocks_since_session_start % config.parachain_rotation_frequency; + let no_rotation = config.group_rotation_frequency.is_zero(); + let blocks_since_last_rotation = if no_rotation { + ::zero() + } else { + blocks_since_session_start % config.group_rotation_frequency + }; let absolute_cutoff = sp_std::cmp::max( config.chain_availability_period, @@ -664,13 +640,93 @@ impl Module { })) } } + + /// Returns a helper for determining group rotation. + pub(crate) fn group_rotation_info() -> GroupRotationInfo { + let session_start_block = Self::session_start_block(); + let now = >::block_number(); + let group_rotation_frequency = >::config() + .group_rotation_frequency; + + GroupRotationInfo { + session_start_block, + now, + group_rotation_frequency, + } + } + + /// Return the next thing that will be scheduled on this core assuming it is currently + /// occupied and the candidate occupying it became available. + /// + /// For parachains, this is always the ID of the parachain and no specified collator. + /// For parathreads, this is based on the next item in the ParathreadQueue assigned to that + /// core, and is None if there isn't one. + pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { + let parachains = >::parachains(); + if (core.0 as usize) < parachains.len() { + Some(ScheduledCore { + para_id: parachains[core.0 as usize], + collator: None, + }) + } else { + let queue = ParathreadQueue::get(); + let core_offset = (core.0 as usize - parachains.len()) as u32; + queue.get_next_on_core(core_offset).map(|entry| ScheduledCore { + para_id: entry.claim.0, + collator: Some(entry.claim.1.clone()), + }) + } + } + + /// Return the next thing that will be scheduled on this core assuming it is currently + /// occupied and the candidate occupying it became available. + /// + /// For parachains, this is always the ID of the parachain and no specified collator. + /// For parathreads, this is based on the next item in the ParathreadQueue assigned to that + /// core, or if there isn't one, the claim that is currently occupying the core, as long + /// as the claim's retries would not exceed the limit. Otherwise None. + pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { + let parachains = >::parachains(); + if (core.0 as usize) < parachains.len() { + Some(ScheduledCore { + para_id: parachains[core.0 as usize], + collator: None, + }) + } else { + let queue = ParathreadQueue::get(); + + // This is the next scheduled para on this core. + let core_offset = (core.0 as usize - parachains.len()) as u32; + queue.get_next_on_core(core_offset) + .map(|entry| ScheduledCore { + para_id: entry.claim.0, + collator: Some(entry.claim.1.clone()), + }) + .or_else(|| { + // Or, if none, the claim currently occupying the core, + // as it would be put back on the queue after timing out. + let cores = AvailabilityCores::get(); + cores.get(core.0 as usize).and_then(|c| c.as_ref()).and_then(|o| { + match o { + CoreOccupied::Parathread(entry) => { + Some(ScheduledCore { + para_id: entry.claim.0, + collator: Some(entry.claim.1.clone()), + }) + } + CoreOccupied::Parachain => None, // defensive; not possible. + } + }) + }) + } + } } #[cfg(test)] mod tests { use super::*; - use primitives::{BlockNumber, parachain::ValidatorId}; + use primitives::v1::{BlockNumber, ValidatorId, CollatorId}; use frame_support::traits::{OnFinalize, OnInitialize}; use keyring::Sr25519Keyring; @@ -707,7 +763,7 @@ mod tests { fn default_config() -> HostConfiguration { HostConfiguration { parathread_cores: 3, - parachain_rotation_frequency: 10, + group_rotation_frequency: 10, chain_availability_period: 3, thread_availability_period: 5, scheduling_lookahead: 2, @@ -1278,12 +1334,12 @@ mod tests { let mut config = default_config(); // make sure parathread requests don't retry-out - config.parathread_retries = config.parachain_rotation_frequency * 3; + config.parathread_retries = config.group_rotation_frequency * 3; config.parathread_cores = 2; config }; - let rotation_frequency = config.parachain_rotation_frequency; + let rotation_frequency = config.group_rotation_frequency; let parathread_cores = config.parathread_cores; let genesis_config = MockGenesisConfig { @@ -1429,7 +1485,7 @@ mod tests { }; let HostConfiguration { - parachain_rotation_frequency, + group_rotation_frequency, chain_availability_period, thread_availability_period, .. @@ -1437,7 +1493,7 @@ mod tests { let collator = CollatorId::from(Sr25519Keyring::Alice.public()); assert!(chain_availability_period < thread_availability_period && - thread_availability_period < parachain_rotation_frequency); + thread_availability_period < group_rotation_frequency); let chain_a = ParaId::from(1); let thread_a = ParaId::from(2); @@ -1482,7 +1538,7 @@ mod tests { run_to_block(1 + thread_availability_period, |_| None); assert!(Scheduler::availability_timeout_predicate().is_none()); - run_to_block(1 + parachain_rotation_frequency, |_| None); + run_to_block(1 + group_rotation_frequency, |_| None); { let pred = Scheduler::availability_timeout_predicate() @@ -1509,7 +1565,7 @@ mod tests { assert!(!pred(CoreIndex(1), now - thread_availability_period + 1)); } - run_to_block(1 + parachain_rotation_frequency + chain_availability_period, |_| None); + run_to_block(1 + group_rotation_frequency + chain_availability_period, |_| None); { let pred = Scheduler::availability_timeout_predicate() @@ -1521,9 +1577,392 @@ mod tests { assert!(pred(CoreIndex(1), would_be_timed_out)); // but threads can. } - run_to_block(1 + parachain_rotation_frequency + thread_availability_period, |_| None); + run_to_block(1 + group_rotation_frequency + thread_availability_period, |_| None); assert!(Scheduler::availability_timeout_predicate().is_none()); }); } + + #[test] + fn availability_predicate_no_rotation() { + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: HostConfiguration { + group_rotation_frequency: 0, // no rotation + ..default_config() + }, + ..Default::default() + }, + ..Default::default() + }; + let HostConfiguration { + chain_availability_period, + thread_availability_period, + .. + } = default_config(); + let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + + let chain_a = ParaId::from(1); + let thread_a = ParaId::from(2); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(chain_a, true); + schedule_blank_para(thread_a, false); + + // start a new session with our chain & thread registered. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: HostConfiguration{ + // Note: the `group_rotation_frequency` config change + // is not accounted for on session change + // group_rotation_frequency: 0, + ..default_config() + }, + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + // assign some availability cores. + { + AvailabilityCores::mutate(|cores| { + cores[0] = Some(CoreOccupied::Parachain); + cores[1] = Some(CoreOccupied::Parathread(ParathreadEntry { + claim: ParathreadClaim(thread_a, collator), + retries: 0, + })) + }); + } + run_to_block(1 + 1, |_| None); + run_to_block(1 + 1 + 100500, |_| None); + { + let pred = Scheduler::availability_timeout_predicate() + .expect("predicate exists with no rotation"); + + let now = System::block_number(); + + assert!(!pred(CoreIndex(0), now)); // assigned: chain + assert!(!pred(CoreIndex(1), now)); // assigned: thread + assert!(pred(CoreIndex(2), now)); + + // check the tighter bound on chains vs threads. + assert!(pred(CoreIndex(0), now - chain_availability_period)); + assert!(pred(CoreIndex(1), now - thread_availability_period)); + + // check the threshold is exact. + assert!(!pred(CoreIndex(0), now - chain_availability_period + 1)); + assert!(!pred(CoreIndex(1), now - thread_availability_period + 1)); + } + }); + } + + #[test] + fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { + let mut config = default_config(); + config.parathread_cores = 1; + + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: config.clone(), + ..Default::default() + }, + ..Default::default() + }; + + let thread_a = ParaId::from(1); + let thread_b = ParaId::from(2); + + let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(thread_a, false); + schedule_blank_para(thread_b, false); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); + let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); + + Scheduler::add_parathread_claim(thread_claim_a.clone()); + + run_to_block(2, |_| None); + + { + assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); + + Scheduler::occupied(&[CoreIndex(0)]); + + let cores = Scheduler::availability_cores(); + match cores[0].as_ref().unwrap() { + CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), + _ => panic!("with no chains, only core should be a thread core"), + } + + assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); + + Scheduler::add_parathread_claim(thread_claim_b); + + let queue = ParathreadQueue::get(); + assert_eq!( + queue.get_next_on_core(0).unwrap().claim, + ParathreadClaim(thread_b, collator.clone()), + ); + + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { + para_id: thread_b, + collator: Some(collator.clone()), + } + ); + } + }); + } + + #[test] + fn next_up_on_time_out_reuses_claim_if_nothing_queued() { + let mut config = default_config(); + config.parathread_cores = 1; + + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: config.clone(), + ..Default::default() + }, + ..Default::default() + }; + + let thread_a = ParaId::from(1); + let thread_b = ParaId::from(2); + + let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(thread_a, false); + schedule_blank_para(thread_b, false); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); + let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); + + Scheduler::add_parathread_claim(thread_claim_a.clone()); + + run_to_block(2, |_| None); + + { + assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); + + Scheduler::occupied(&[CoreIndex(0)]); + + let cores = Scheduler::availability_cores(); + match cores[0].as_ref().unwrap() { + CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), + _ => panic!("with no chains, only core should be a thread core"), + } + + let queue = ParathreadQueue::get(); + assert!(queue.get_next_on_core(0).is_none()); + assert_eq!( + Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), + ScheduledCore { + para_id: thread_a, + collator: Some(collator.clone()), + } + ); + + Scheduler::add_parathread_claim(thread_claim_b); + + let queue = ParathreadQueue::get(); + assert_eq!( + queue.get_next_on_core(0).unwrap().claim, + ParathreadClaim(thread_b, collator.clone()), + ); + + // Now that there is an earlier next-up, we use that. + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { + para_id: thread_b, + collator: Some(collator.clone()), + } + ); + } + }); + } + + #[test] + fn next_up_on_available_is_parachain_always() { + let mut config = default_config(); + config.parathread_cores = 0; + + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: config.clone(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_a = ParaId::from(1); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(chain_a, true); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + run_to_block(2, |_| None); + + { + assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); + + Scheduler::occupied(&[CoreIndex(0)]); + + let cores = Scheduler::availability_cores(); + match cores[0].as_ref().unwrap() { + CoreOccupied::Parachain => {}, + _ => panic!("with no threads, only core should be a chain core"), + } + + // Now that there is an earlier next-up, we use that. + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { + para_id: chain_a, + collator: None, + } + ); + } + }); + } + + #[test] + fn next_up_on_time_out_is_parachain_always() { + let mut config = default_config(); + config.parathread_cores = 0; + + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: config.clone(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_a = ParaId::from(1); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(chain_a, true); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + run_to_block(2, |_| None); + + { + assert_eq!(Scheduler::scheduled().len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); + + Scheduler::occupied(&[CoreIndex(0)]); + + let cores = Scheduler::availability_cores(); + match cores[0].as_ref().unwrap() { + CoreOccupied::Parachain => {}, + _ => panic!("with no threads, only core should be a chain core"), + } + + // Now that there is an earlier next-up, we use that. + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { + para_id: chain_a, + collator: None, + } + ); + } + }); + } } diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..028d65c5d9579dd0332e35c6fec206048e100d5f --- /dev/null +++ b/runtime/parachains/src/util.rs @@ -0,0 +1,71 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Utilities that don't belong to any particular module but may draw +//! on all modules. + +use sp_runtime::traits::{One, Saturating}; +use primitives::v1::{Id as ParaId, PersistedValidationData, TransientValidationData}; +use sp_std::prelude::*; + +use crate::{configuration, paras}; + +/// Make the persisted validation data for a particular parachain. +/// +/// This ties together the storage of several modules. +pub fn make_persisted_validation_data( + para_id: ParaId, +) -> Option> { + let relay_parent_number = >::block_number() - One::one(); + + Some(PersistedValidationData { + parent_head: >::para_head(¶_id)?, + block_number: relay_parent_number, + hrmp_mqc_heads: Vec::new(), + }) +} + +/// Make the transient validation data for a particular parachain. +/// +/// This ties together the storage of several modules. +pub fn make_transient_validation_data( + para_id: ParaId, +) -> Option> { + let config = >::config(); + let relay_parent_number = >::block_number() - One::one(); + + let freq = config.validation_upgrade_frequency; + let delay = config.validation_upgrade_delay; + + let last_code_upgrade = >::last_code_upgrade(para_id, true); + let can_upgrade_code = last_code_upgrade.map_or( + true, + |l| { l <= relay_parent_number && relay_parent_number.saturating_sub(l) >= freq }, + ); + + let code_upgrade_allowed = if can_upgrade_code { + Some(relay_parent_number + delay) + } else { + None + }; + + Some(TransientValidationData { + max_code_size: config.max_code_size, + max_head_data_size: config.max_head_data_size, + balance: 0, + code_upgrade_allowed, + }) +} diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index ae3a7e05b8ace41fbaf1a3c0e86593b0eb21c230..42e679a87700da5d068542a2800e6f4caf25a99c 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -1,73 +1,73 @@ [package] name = "polkadot-runtime" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } serde_derive = { version = "1.0.102", optional = true } static_assertions = "1.1.0" -smallvec = "1.4.0" +smallvec = "1.4.1" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -version = { package = "sp-version", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authority-discovery = { package = "pallet-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment = { package = "pallet-transaction-payment", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment-rpc-runtime-api = { package = "pallet-transaction-payment-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -collective = { package = "pallet-collective", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -democracy = { package = "pallet-democracy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -executive = { package = "frame-executive", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -finality-tracker = { package = "pallet-finality-tracker", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -grandpa = { package = "pallet-grandpa", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -identity = { package = "pallet-identity", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -im-online = { package = "pallet-im-online", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -indices = { package = "pallet-indices", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -membership = { package = "pallet-membership", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -multisig = { package = "pallet-multisig", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -nicks = { package = "pallet-nicks", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -proxy = { package = "pallet-proxy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -scheduler = { package = "pallet-scheduler", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-finality-tracker = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -treasury = { package = "pallet-treasury", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sudo = { package = "pallet-sudo", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -utility = { package = "pallet-utility", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -hex-literal = { version = "0.2.1", optional = true } +hex-literal = { version = "0.2.1" } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -78,7 +78,7 @@ libsecp256k1 = "0.3.2" tiny-keccak = "1.5.0" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -trie-db = "0.20.0" +trie-db = "0.22.0" serde_json = "1.0.41" [build-dependencies] @@ -90,7 +90,7 @@ no_std = [] only-staking = [] std = [ "authority-discovery-primitives/std", - "authority-discovery/std", + "pallet-authority-discovery/std", "bitvec/std", "primitives/std", "rustc-hex/std", @@ -103,64 +103,70 @@ std = [ "offchain-primitives/std", "sp-std/std", "frame-support/std", - "authorship/std", - "balances/std", - "transaction-payment/std", - "transaction-payment-rpc-runtime-api/std", - "collective/std", - "elections-phragmen/std", - "democracy/std", - "executive/std", - "finality-tracker/std", - "grandpa/std", - "identity/std", - "im-online/std", - "indices/std", - "membership/std", - "multisig/std", - "nicks/std", - "offences/std", - "proxy/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-collective/std", + "pallet-elections-phragmen/std", + "pallet-democracy/std", + "frame-executive/std", + "pallet-finality-tracker/std", + "pallet-grandpa/std", + "pallet-identity/std", + "pallet-im-online/std", + "pallet-indices/std", + "pallet-membership/std", + "pallet-multisig/std", + "pallet-nicks/std", + "pallet-offences/std", + "pallet-proxy/std", "sp-runtime/std", "sp-staking/std", - "scheduler/std", - "session/std", - "staking/std", - "system/std", - "system_rpc_runtime_api/std", - "timestamp/std", - "treasury/std", - "version/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-staking/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "pallet-timestamp/std", + "pallet-treasury/std", + "sp-version/std", "serde_derive", "serde/std", "log", - "babe/std", + "pallet-babe/std", "babe-primitives/std", "sp-session/std", - "randomness-collective-flip/std", + "pallet-randomness-collective-flip/std", "runtime-common/std", - "sudo/std", - "vesting/std", - "utility/std", + "pallet-vesting/std", + "pallet-utility/std", ] runtime-benchmarks = [ "runtime-common/runtime-benchmarks", "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system-benchmarking", - "system/runtime-benchmarks", + "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "balances/runtime-benchmarks", - "collective/runtime-benchmarks", - "democracy/runtime-benchmarks", - "elections-phragmen/runtime-benchmarks", - "im-online/runtime-benchmarks", - "scheduler/runtime-benchmarks", - "staking/runtime-benchmarks", - "timestamp/runtime-benchmarks", - "treasury/runtime-benchmarks", - "vesting/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-democracy/runtime-benchmarks", + "pallet-elections-phragmen/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", "pallet-offences-benchmarking", "pallet-session-benchmarking", - "hex-literal", + # renable when optional + # "hex-literal", ] +# When enabled, the runtime api will not be build. +# +# This is required by Cumulus to access certain types of the +# runtime without clashing with the runtime api exported functions +# in WASM. +disable-runtime-api = [] diff --git a/runtime/polkadot/build.rs b/runtime/polkadot/build.rs index 4ad34b2b5293d25d67ef971531a3963080489e96..f65f04914e538504463cacf744926cb97d0fd574 100644 --- a/runtime/polkadot/build.rs +++ b/runtime/polkadot/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .import_memory() .export_heap_base() .build() diff --git a/runtime/polkadot/src/constants.rs b/runtime/polkadot/src/constants.rs index 331d97f364cf52ac7597c571d5f7e93ba661c0c1..f784e9fca1ac43e3fb3c0864df2dd4876bf6a61a 100644 --- a/runtime/polkadot/src/constants.rs +++ b/runtime/polkadot/src/constants.rs @@ -16,7 +16,7 @@ /// Money matters. pub mod currency { - use primitives::Balance; + use primitives::v0::Balance; pub const DOTS: Balance = 1_000_000_000_000; pub const DOLLARS: Balance = DOTS / 100; // 10_000_000_000 @@ -30,7 +30,7 @@ pub mod currency { /// Time and blocks. pub mod time { - use primitives::{Moment, BlockNumber}; + use primitives::v0::{Moment, BlockNumber}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 4 * HOURS; @@ -47,7 +47,7 @@ pub mod time { /// Fee-related. pub mod fee { pub use sp_runtime::Perbill; - use primitives::Balance; + use primitives::v0::Balance; use runtime_common::ExtrinsicBaseWeight; use frame_support::weights::{ WeightToFeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, system::MaximumBlockWeight] + /// - [0, frame_system::MaximumBlockWeight] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 38ca6527facf77ec35c4531fecdc54466a26e650..6db036ab566645c672b1b81fc0f702b7ab428b25 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -21,66 +21,61 @@ #![recursion_limit="256"] use runtime_common::{ - attestations, claims, parachains, registrar, slots, SlowAdjustingFeeUpdate, + dummy, claims, SlowAdjustingFeeUpdate, impls::{CurrencyToVoteHandler, ToAuthor}, NegativeImbalance, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, - MaximumExtrinsicWeight, + MaximumExtrinsicWeight, purchase, ParachainSessionKeyPlaceholder, }; use sp_std::prelude::*; use sp_core::u32_trait::{_1, _2, _3, _4, _5}; use codec::{Encode, Decode}; -use primitives::{ +use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, - parachain::{self, ActiveParas, AbridgedCandidateReceipt, SigningContext}, -}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, ModuleId, - ApplyExtrinsicResult, KeyTypeId, Percent, Permill, Perbill, - transaction_validity::{ - TransactionValidity, TransactionSource, TransactionPriority, - }, - curve::PiecewiseLinear, - traits::{ - BlakeTwo256, Block as BlockT, OpaqueKeys, ConvertInto, IdentityLookup, - Extrinsic as ExtrinsicT, SaturatedConversion, Verify, - }, }; +use primitives::v0 as p_v0; +use sp_runtime::{create_runtime_str, generic, impl_opaque_keys, ModuleId, ApplyExtrinsicResult, KeyTypeId, Percent, Permill, Perbill, transaction_validity::{ + TransactionValidity, TransactionSource, TransactionPriority, +}, curve::PiecewiseLinear, traits::{ + BlakeTwo256, Block as BlockT, OpaqueKeys, ConvertInto, IdentityLookup, + Extrinsic as ExtrinsicT, SaturatedConversion, Verify, +}}; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::RuntimeString; -use version::RuntimeVersion; -use grandpa::{AuthorityId as GrandpaId, fg_primitives}; +use sp_version::RuntimeVersion; +use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; #[cfg(any(feature = "std", test))] -use version::NativeVersion; +use sp_version::NativeVersion; use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ - parameter_types, construct_runtime, debug, RuntimeDebug, + parameter_types, ord_parameter_types, construct_runtime, debug, RuntimeDebug, traits::{KeyOwnerProofSystem, SplitTwoWays, Randomness, LockIdentifier, Filter}, weights::Weight, }; -use system::{EnsureRoot, EnsureOneOf}; -use im_online::sr25519::AuthorityId as ImOnlineId; +use frame_system::{EnsureRoot, EnsureOneOf, EnsureSignedBy}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -use session::historical as session_historical; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use pallet_session::historical as session_historical; use static_assertions::const_assert; #[cfg(feature = "std")] -pub use staking::StakerStatus; +pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use attestations::{Call as AttestationsCall, MORE_ATTESTATIONS_IDENTIFIER}; -pub use parachains::Call as ParachainsCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*, fee::*}; use frame_support::traits::InstanceFilter; +// Weights used in the runtime. +mod weights; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); @@ -91,10 +86,13 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("polkadot"), impl_name: create_runtime_str!("parity-polkadot"), authoring_version: 0, - spec_version: 12, + spec_version: 23, impl_version: 0, + #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, - transaction_version: 0, + #[cfg(feature = "disable-runtime-api")] + apis: version::create_apis_vec![[]], + transaction_version: 5, }; /// Native version. @@ -110,26 +108,21 @@ pub struct BaseFilter; impl Filter for BaseFilter { fn filter(call: &Call) -> bool { match call { - Call::Parachains(parachains::Call::set_heads(..)) => true, - - // Governance stuff - Call::Democracy(_) | Call::Council(_) | Call::TechnicalCommittee(_) | - Call::ElectionsPhragmen(_) | Call::TechnicalMembership(_) | Call::Treasury(_) | // Parachains stuff - Call::Parachains(_) | Call::Attestations(_) | Call::Slots(_) | Call::Registrar(_) | - // Balances and Vesting's transfer (which can be used to transfer) - Call::Balances(_) | Call::Vesting(vesting::Call::vested_transfer(..)) | - Call::Indices(indices::Call::transfer(..)) => + Call::DummyParachains(_) | Call::DummyAttestations(_) | Call::DummySlots(_) | Call::DummyRegistrar(_) => false, // These modules are all allowed to be called by transactions: + Call::Democracy(_) | Call::Council(_) | Call::TechnicalCommittee(_) | + Call::TechnicalMembership(_) | Call::Treasury(_) | Call::ElectionsPhragmen(_) | Call::System(_) | Call::Scheduler(_) | Call::Indices(_) | - Call::Babe(_) | Call::Timestamp(_) | + Call::Babe(_) | Call::Timestamp(_) | Call::Balances(_) | Call::Authorship(_) | Call::Staking(_) | Call::Offences(_) | Call::Session(_) | Call::FinalityTracker(_) | Call::Grandpa(_) | Call::ImOnline(_) | Call::AuthorityDiscovery(_) | - Call::Utility(_) | Call::Claims(_) | Call::Vesting(_) | Call::Sudo(_) | - Call::Identity(_) | Call::Proxy(_) | Call::Multisig(_) => + Call::Utility(_) | Call::Claims(_) | Call::Vesting(_) | + Call::Identity(_) | Call::Proxy(_) | Call::Multisig(_) | + Call::Purchase(_) => true, } } @@ -138,14 +131,14 @@ impl Filter for BaseFilter { type MoreThanHalfCouncil = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> >; parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl system::Trait for Runtime { +impl frame_system::Trait for Runtime { type BaseCallFilter = BaseFilter; type Origin = Origin; type Call = Call; @@ -167,16 +160,20 @@ impl system::Trait for Runtime { type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type ModuleToIndex = ModuleToIndex; - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = weights::frame_system::WeightInfo; } -impl scheduler::Trait for Runtime { +impl pallet_scheduler::Trait for Runtime { type Event = Event; type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumBlockWeight; + type ScheduleOrigin = EnsureRoot; + type WeightInfo = (); } parameter_types! { @@ -184,23 +181,39 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl babe::Trait for Runtime { +impl pallet_babe::Trait for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // session module is the trigger - type EpochChangeTrigger = babe::ExternalTrigger; + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = + pallet_babe::EquivocationHandler; } parameter_types! { pub const IndexDeposit: Balance = 10 * DOLLARS; } -impl indices::Trait for Runtime { +impl pallet_indices::Trait for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; + type WeightInfo = (); } parameter_types! { @@ -215,20 +228,20 @@ pub type DealWithFees = SplitTwoWays< _1, ToAuthor, // 1 part (20%) goes to the block author. >; -impl balances::Trait for Runtime { +impl pallet_balances::Trait for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; } parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } - -impl transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = DealWithFees; type TransactionByteFee = TransactionByteFee; @@ -239,10 +252,11 @@ impl transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl timestamp::Trait for Runtime { +impl pallet_timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = weights::pallet_timestamp::WeightInfo; } parameter_types! { @@ -250,8 +264,8 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl authorship::Trait for Runtime { - type FindAuthor = session::FindAccountFromAuthorIndex; +impl pallet_authorship::Trait for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = (Staking, ImOnline); @@ -262,7 +276,7 @@ impl_opaque_keys! { pub grandpa: Grandpa, pub babe: Babe, pub im_online: ImOnline, - pub parachain_validator: Parachains, + pub parachain_validator: ParachainSessionKeyPlaceholder, pub authority_discovery: AuthorityDiscovery, } } @@ -271,21 +285,22 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl session::Trait for Runtime { +impl pallet_session::Trait for Runtime { type Event = Event; type ValidatorId = AccountId; - type ValidatorIdOf = staking::StashOf; + type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; - type SessionManager = session::historical::NoteHistoricalRoot; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } -impl session::historical::Trait for Runtime { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; +impl pallet_session::historical::Trait for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } // TODO #6469: This shouldn't be static, but a lazily cached value, not built unless needed, and @@ -308,11 +323,11 @@ parameter_types! { // Six sessions in an era (24 hours). pub const SessionsPerEra: SessionIndex = 6; // 28 eras for unbonding (28 days). - pub const BondingDuration: staking::EraIndex = 28; - pub const SlashDeferDuration: staking::EraIndex = 28; + pub const BondingDuration: pallet_staking::EraIndex = 28; + pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; - // quarter of the last session will be for election. + // last 15 minutes of the last session will be for election. pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 16; pub const MaxIterations: u32 = 10; pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); @@ -321,10 +336,10 @@ parameter_types! { type SlashCancelOrigin = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> >; -impl staking::Trait for Runtime { +impl pallet_staking::Trait for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; @@ -346,6 +361,7 @@ impl staking::Trait for Runtime { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; type MinSolutionScoreBump = MinSolutionScoreBump; + type WeightInfo = (); } parameter_types! { @@ -358,7 +374,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl identity::Trait for Runtime { +impl pallet_identity::Trait for Runtime { type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; @@ -370,6 +386,7 @@ impl identity::Trait for Runtime { type Slashed = Treasury; type ForceOrigin = MoreThanHalfCouncil; type RegistrarOrigin = MoreThanHalfCouncil; + type WeightInfo = (); } parameter_types! { @@ -377,7 +394,7 @@ parameter_types! { pub const VotingPeriod: BlockNumber = 28 * DAYS; pub const FastTrackVotingPeriod: BlockNumber = 3 * HOURS; pub const MinimumDeposit: Balance = 100 * DOLLARS; - pub const EnactmentPeriod: BlockNumber = 8 * DAYS; + pub const EnactmentPeriod: BlockNumber = 28 * DAYS; pub const CooloffPeriod: BlockNumber = 7 * DAYS; // One cent: $10,000 / MB pub const PreimageByteDeposit: Balance = 1 * CENTS; @@ -385,7 +402,7 @@ parameter_types! { pub const MaxVotes: u32 = 100; } -impl democracy::Trait for Runtime { +impl pallet_democracy::Trait for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -394,29 +411,49 @@ impl democracy::Trait for Runtime { type VotingPeriod = VotingPeriod; type MinimumDeposit = MinimumDeposit; /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + type ExternalOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; /// A 60% super-majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>; + type ExternalMajorityOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; /// A unanimous council can have the next scheduled referendum be a straight default-carries /// (NTB) vote. - type ExternalDefaultOrigin = collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; + type ExternalDefaultOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type InstantOrigin = collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type FastTrackOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; + type InstantOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; type InstantAllowed = InstantAllowed; type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; + type CancellationOrigin = frame_system::EnsureOneOf, + frame_system::EnsureRoot, + >; // Any single technical committee member may veto a coming council proposal, however they can // only do it once and it lasts only for the cooloff period. - type VetoOrigin = collective::EnsureMember; + type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; type PreimageByteDeposit = PreimageByteDeposit; - type OperationalPreimageOrigin = collective::EnsureMember; + type OperationalPreimageOrigin = pallet_collective::EnsureMember; type Slash = Treasury; type Scheduler = Scheduler; + type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; + type WeightInfo = weights::pallet_democracy::WeightInfo; } parameter_types! { @@ -424,19 +461,20 @@ parameter_types! { pub const CouncilMaxProposals: u32 = 100; } -type CouncilCollective = collective::Instance1; -impl collective::Trait for Runtime { +type CouncilCollective = pallet_collective::Instance1; +impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; + type WeightInfo = (); } parameter_types! { pub const CandidacyBond: Balance = 100 * DOLLARS; pub const VotingBond: Balance = 5 * DOLLARS; - /// Weekly council elections initially, later monthly. + /// Weekly council elections; scaling up to monthly eventually. pub const TermDuration: BlockNumber = 7 * DAYS; /// 13 members initially, to be increased to 23 eventually. pub const DesiredMembers: u32 = 13; @@ -444,9 +482,9 @@ parameter_types! { pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; } // Make sure that there are no more than MAX_MEMBERS members elected via phragmen. -const_assert!(DesiredMembers::get() <= collective::MAX_MEMBERS); +const_assert!(DesiredMembers::get() <= pallet_collective::MAX_MEMBERS); -impl elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Trait for Runtime { type Event = Event; type ModuleId = ElectionsPhragmenModuleId; type Currency = Balances; @@ -461,6 +499,7 @@ impl elections_phragmen::Trait for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; + type WeightInfo = (); } parameter_types! { @@ -468,16 +507,17 @@ parameter_types! { pub const TechnicalMaxProposals: u32 = 100; } -type TechnicalCollective = collective::Instance2; -impl collective::Trait for Runtime { +type TechnicalCollective = pallet_collective::Instance2; +impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; + type WeightInfo = (); } -impl membership::Trait for Runtime { +impl pallet_membership::Trait for Runtime { type Event = Event; type AddOrigin = MoreThanHalfCouncil; type RemoveOrigin = MoreThanHalfCouncil; @@ -504,10 +544,10 @@ parameter_types! { type ApproveOrigin = EnsureOneOf< AccountId, EnsureRoot, - collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> >; -impl treasury::Trait for Runtime { +impl pallet_treasury::Trait for Runtime { type ModuleId = TreasuryModuleId; type Currency = Balances; type ApproveOrigin = ApproveOrigin; @@ -523,20 +563,23 @@ impl treasury::Trait for Runtime { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; + type BurnDestination = (); + type WeightInfo = (); } parameter_types! { pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); } -impl offences::Trait for Runtime { +impl pallet_offences::Trait for Runtime { type Event = Event; - type IdentificationTuple = session::historical::IdentificationTuple; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } -impl authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Trait for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -547,20 +590,21 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl im_online::Trait for Runtime { +impl pallet_im_online::Trait for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type SessionDuration = SessionDuration; type ReportUnresponsiveness = Offences; type UnsignedPriority = ImOnlineUnsignedPriority; + type WeightInfo = (); } -impl grandpa::Trait for Runtime { +impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; type KeyOwnerProof = - >::Proof; + >::Proof; type KeyOwnerIdentification = ; + type HandleEquivocation = pallet_grandpa::EquivocationHandler; } parameter_types! { - pub WindowSize: BlockNumber = finality_tracker::DEFAULT_WINDOW_SIZE.into(); - pub ReportLatency: BlockNumber = finality_tracker::DEFAULT_REPORT_LATENCY.into(); + pub WindowSize: BlockNumber = pallet_finality_tracker::DEFAULT_WINDOW_SIZE.into(); + pub ReportLatency: BlockNumber = pallet_finality_tracker::DEFAULT_REPORT_LATENCY.into(); } -impl finality_tracker::Trait for Runtime { +impl pallet_finality_tracker::Trait for Runtime { type OnFinalizationStalled = (); type WindowSize = WindowSize; type ReportLatency = ReportLatency; } -parameter_types! { - pub const AttestationPeriod: BlockNumber = 50; -} - -impl attestations::Trait for Runtime { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = parachains::ValidatorIdentities; - type RewardAttestation = Staking; -} - -parameter_types! { - pub const MaxCodeSize: u32 = 10 * 1024 * 1024; // 10 MB - pub const MaxHeadDataSize: u32 = 20 * 1024; // 20 KB - - pub const ValidationUpgradeFrequency: BlockNumber = 7 * DAYS; - pub const ValidationUpgradeDelay: BlockNumber = 1 * DAYS; - pub const SlashPeriod: BlockNumber = 28 * DAYS; -} - -impl parachains::Trait for Runtime { - type AuthorityId = primitives::fisherman::FishermanAppCrypto; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = Balances; - type BlockNumberConversion = sp_runtime::traits::Identity; - type Randomness = RandomnessCollectiveFlip; - type ActiveParachains = Registrar; - type Registrar = Registrar; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - - type Proof = sp_session::MembershipProof; - type KeyOwnerProofSystem = session::historical::Module; - type IdentificationTuple = )>>::IdentificationTuple; - type ReportOffence = Offences; - type BlockHashConversion = sp_runtime::traits::Identity; -} - /// Submits a transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. -impl system::offchain::CreateSignedTransaction for Runtime where +impl frame_system::offchain::CreateSignedTransaction for Runtime where Call: From, { - fn create_transaction>( + fn create_transaction>( call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -654,16 +651,13 @@ impl system::offchain::CreateSignedTransaction for Runtime .saturating_sub(1); let tip = 0; let extra: SignedExtra = ( - system::CheckSpecVersion::::new(), - system::CheckTxVersion::::new(), - system::CheckGenesis::::new(), - system::CheckMortality::::from(generic::Era::mortal(period, current_block)), - system::CheckNonce::::from(nonce), - system::CheckWeight::::new(), - transaction_payment::ChargeTransactionPayment::::from(tip), - registrar::LimitParathreadCommits::::new(), - parachains::ValidateDoubleVoteReports::::new(), - grandpa::ValidateEquivocationReport::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), claims::PrevalidateAttests::::new(), ); let raw_payload = SignedPayload::new(call, extra).map_err(|e| { @@ -677,12 +671,12 @@ impl system::offchain::CreateSignedTransaction for Runtime } } -impl system::offchain::SigningTypes for Runtime { +impl frame_system::offchain::SigningTypes for Runtime { type Public = ::Signer; type Signature = Signature; } -impl system::offchain::SendTransactionTypes for Runtime where Call: From { +impl frame_system::offchain::SendTransactionTypes for Runtime where Call: From { type Extrinsic = UncheckedExtrinsic; type OverarchingCall = Call; } @@ -693,30 +687,6 @@ parameter_types! { pub const MaxRetries: u32 = 3; } -impl registrar::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = Slots; - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; -} - -parameter_types! { - pub const LeasePeriod: BlockNumber = 100_000; - pub const EndingPeriod: BlockNumber = 1000; -} - -impl slots::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type Parachains = Registrar; - type EndingPeriod = EndingPeriod; - type LeasePeriod = LeasePeriod; - type Randomness = RandomnessCollectiveFlip; -} - parameter_types! { pub Prefix: &'static [u8] = b"Pay DOTs to the Polkadot account:"; } @@ -726,23 +696,25 @@ impl claims::Trait for Runtime { type VestingSchedule = Vesting; type Prefix = Prefix; /// At least 3/4 of the council must agree to a claim move before it can happen. - type MoveClaimOrigin = collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; + type MoveClaimOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; } parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl vesting::Trait for Runtime { +impl pallet_vesting::Trait for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } -impl utility::Trait for Runtime { +impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; + type WeightInfo = weights::pallet_utility::WeightInfo; } parameter_types! { @@ -753,18 +725,14 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl multisig::Trait for Runtime { +impl pallet_multisig::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; -} - -impl sudo::Trait for Runtime { - type Event = Event; - type Call = Call; + type WeightInfo = (); } parameter_types! { @@ -773,17 +741,53 @@ parameter_types! { // Additional storage item size of 33 bytes. pub const ProxyDepositFactor: Balance = deposit(0, 33); pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; } +impl dummy::Trait for Runtime { } + /// The type used to represent the kinds of proxying allowed. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] pub enum ProxyType { - Any, - NonTransfer, - Governance, - Staking, - SudoBalances, + Any = 0, + NonTransfer = 1, + Governance = 2, + Staking = 3, + // Skip 4 as it is now removed (was SudoBalances) + IdentityJudgement = 5, +} + +#[cfg(test)] +mod proxt_type_tests { + use super::*; + + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug)] + pub enum OldProxyType { + Any, + NonTransfer, + Governance, + Staking, + SudoBalances, + IdentityJudgement, + } + + #[test] + fn proxy_type_decodes_correctly() { + for (i, j) in vec![ + (OldProxyType::Any, ProxyType::Any), + (OldProxyType::NonTransfer, ProxyType::NonTransfer), + (OldProxyType::Governance, ProxyType::Governance), + (OldProxyType::Staking, ProxyType::Staking), + (OldProxyType::IdentityJudgement, ProxyType::IdentityJudgement), + ].into_iter() { + assert_eq!(i.encode(), j.encode()); + } + assert!(ProxyType::decode(&mut &OldProxyType::SudoBalances.encode()[..]).is_err()); + } } + impl Default for ProxyType { fn default() -> Self { Self::Any } } impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { @@ -794,9 +798,9 @@ impl InstanceFilter for ProxyType { Call::Scheduler(..) | Call::Babe(..) | Call::Timestamp(..) | - Call::Indices(indices::Call::claim(..)) | - Call::Indices(indices::Call::free(..)) | - Call::Indices(indices::Call::freeze(..)) | + Call::Indices(pallet_indices::Call::claim(..)) | + Call::Indices(pallet_indices::Call::free(..)) | + Call::Indices(pallet_indices::Call::freeze(..)) | // Specifically omitting Indices `transfer`, `force_transfer` // Specifically omitting the entire Balances pallet Call::Authorship(..) | @@ -813,35 +817,30 @@ impl InstanceFilter for ProxyType { Call::ElectionsPhragmen(..) | Call::TechnicalMembership(..) | Call::Treasury(..) | - Call::Parachains(..) | - Call::Attestations(..) | - Call::Slots(..) | - Call::Registrar(..) | + Call::DummyParachains(..) | + Call::DummyAttestations(..) | + Call::DummySlots(..) | + Call::DummyRegistrar(..) | Call::Claims(..) | - Call::Vesting(vesting::Call::vest(..)) | - Call::Vesting(vesting::Call::vest_other(..)) | + Call::Vesting(pallet_vesting::Call::vest(..)) | + Call::Vesting(pallet_vesting::Call::vest_other(..)) | // Specifically omitting Vesting `vested_transfer`, and `force_vested_transfer` Call::Utility(..) | - // Specifically omitting Sudo pallet Call::Identity(..) | Call::Proxy(..) | Call::Multisig(..) ), ProxyType::Governance => matches!(c, Call::Democracy(..) | Call::Council(..) | Call::TechnicalCommittee(..) - | Call::ElectionsPhragmen(..) | Call::Treasury(..) - | Call::Utility(utility::Call::batch(..)) - | Call::Utility(utility::Call::as_limited_sub(..)) + | Call::ElectionsPhragmen(..) | Call::Treasury(..) | Call::Utility(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(utility::Call::batch(..)) - | Call::Utility(utility::Call::as_limited_sub(..)) + Call::Staking(..) | Call::Utility(pallet_utility::Call::batch(..)) | Call::Utility(..) ), - ProxyType::SudoBalances => match c { - Call::Sudo(sudo::Call::sudo(ref x)) => matches!(x.as_ref(), &Call::Balances(..)), - Call::Utility(utility::Call::batch(..)) => true, - _ => false, - }, + ProxyType::IdentityJudgement => matches!(c, + Call::Identity(pallet_identity::Call::provide_judgement(..)) + | Call::Utility(pallet_utility::Call::batch(..)) + ) } } fn is_superset(&self, o: &Self) -> bool { @@ -855,7 +854,7 @@ impl InstanceFilter for ProxyType { } } -impl proxy::Trait for Runtime { +impl pallet_proxy::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -863,73 +862,222 @@ impl proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +pub struct CustomOnRuntimeUpgrade; +impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + use frame_support::storage::{StorageMap, IterableStorageMap}; + use pallet_democracy::{VotingOf, Conviction, Voting::Direct, AccountVote::Standard}; + // Cancel convictions for Referendum Zero (for removing Sudo - this is something we would + // have done anyway). + for (who, mut voting) in VotingOf::::iter() { + if let Direct { ref mut votes, .. } = voting { + if let Some((0, Standard { ref mut vote, .. })) = votes.first_mut() { + vote.conviction = Conviction::None + } + } + VotingOf::::insert(who, voting); + } + + ::MaximumBlockWeight::get() + } +} + +#[test] +fn test_rm_ref_0() { + use sp_runtime::AccountId32; + use frame_support::{traits::OnRuntimeUpgrade, storage::StorageMap}; + use pallet_democracy::{VotingOf, Vote, Voting::{Direct, Delegating}, AccountVote::{Standard, Split}}; + use pallet_democracy::Conviction::{Locked1x, Locked2x, Locked3x, None as NoConviction}; + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + let a = |i| AccountId32::from([i; 32]); + VotingOf::::insert(a(1), Direct { + votes: vec![(0, Standard { + vote: Vote { aye: true, conviction: Locked1x }, + balance: 1, + })], + delegations: Default::default(), + prior: Default::default(), + }); + VotingOf::::insert(a(2), Direct { + votes: vec![ + (0, Standard { vote: Vote { aye: true, conviction: Locked2x }, balance: 2 }), + (1, Standard { vote: Vote { aye: true, conviction: Locked2x }, balance: 2 }) + ], + delegations: Default::default(), + prior: Default::default(), + }); + VotingOf::::insert(a(3), Direct { + votes: vec![(1, Standard { vote: Vote { aye: true, conviction: Locked3x }, balance: 3 })], + delegations: Default::default(), + prior: Default::default(), + }); + VotingOf::::insert(a(4), Direct { + votes: vec![], + delegations: Default::default(), + prior: Default::default(), + }); + VotingOf::::insert(a(5), Delegating { + balance: 5, + target: a(0), + conviction: Locked1x, + delegations: Default::default(), + prior: Default::default(), + }); + VotingOf::::insert(a(6), Direct { + votes: vec![(0, Split { aye: 6, nay: 6 }), (1, Split { aye: 6, nay: 6 })], + delegations: Default::default(), + prior: Default::default(), + }); + CustomOnRuntimeUpgrade::on_runtime_upgrade(); + assert_eq!(VotingOf::::get(a(1)), Direct { + votes: vec![(0, Standard { vote: Vote { aye: true, conviction: NoConviction }, balance: 1, })], + delegations: Default::default(), + prior: Default::default(), + }); + assert_eq!(VotingOf::::get(a(2)), Direct { + votes: vec![ + (0, Standard { vote: Vote { aye: true, conviction: NoConviction }, balance: 2, }), + (1, Standard { vote: Vote { aye: true, conviction: Locked2x }, balance: 2, }) + ], + delegations: Default::default(), + prior: Default::default(), + }); + assert_eq!(VotingOf::::get(a(3)), Direct { + votes: vec![(1, Standard { vote: Vote { aye: true, conviction: Locked3x }, balance: 3, })], + delegations: Default::default(), + prior: Default::default(), + }); + assert_eq!(VotingOf::::get(a(4)), Direct { + votes: vec![], + delegations: Default::default(), + prior: Default::default(), + }); + assert_eq!(VotingOf::::get(a(5)), Delegating { + balance: 5, + target: a(0), + conviction: Locked1x, + delegations: Default::default(), + prior: Default::default(), + }); + assert_eq!(VotingOf::::get(a(6)), Direct { + votes: vec![(0, Split { aye: 6, nay: 6 }), (1, Split { aye: 6, nay: 6 })], + delegations: Default::default(), + prior: Default::default(), + }); + }); +} + +parameter_types! { + pub const MaxStatementLength: usize = 1_000; + pub const UnlockedProportion: Permill = Permill::zero(); + pub const MaxUnlocked: Balance = 0; +} + +ord_parameter_types! { + pub const W3FValidity: AccountId = AccountId::from( + // 142wAF65SK7PxhyzzrWz5m5PXDtooehgePBd7rc2NWpfc8Wa + hex_literal::hex!("862e432e0cf75693899c62691ac0f48967f815add97ae85659dcde8332708551") + ); + pub const W3FConfiguration: AccountId = AccountId::from( + // 1KvKReVmUiTc2LW2a4qyHsaJJ9eE9LRsywZkMk5hyBeyHgw + hex_literal::hex!("0e6de68b13b82479fbe988ab9ecb16bad446b67b993cdd9198cd41c7c6259c49") + ); +} + +type ValidityOrigin = EnsureOneOf< + AccountId, + EnsureRoot, + EnsureSignedBy, +>; + +type ConfigurationOrigin = EnsureOneOf< + AccountId, + EnsureRoot, + EnsureSignedBy, +>; + +impl purchase::Trait for Runtime { + type Event = Event; + type Currency = Balances; + type VestingSchedule = Vesting; + type ValidityOrigin = ValidityOrigin; + type ConfigurationOrigin = ConfigurationOrigin; + type MaxStatementLength = MaxStatementLength; + type UnlockedProportion = UnlockedProportion; + type MaxUnlocked = MaxUnlocked; } construct_runtime! { pub enum Runtime where Block = Block, - NodeBlock = primitives::Block, + NodeBlock = primitives::v1::Block, UncheckedExtrinsic = UncheckedExtrinsic { // Basic stuff; balances is uncallable initially. - System: system::{Module, Call, Storage, Config, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Storage}, - Scheduler: scheduler::{Module, Call, Storage, Event}, + System: frame_system::{Module, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Storage}, + Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, // Must be before session. - Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, - Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Indices: indices::{Module, Call, Storage, Config, Event}, - Balances: balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: transaction_payment::{Module, Storage}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, // Consensus support. - Authorship: authorship::{Module, Call, Storage}, - Staking: staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: offences::{Module, Call, Storage, Event}, + Authorship: pallet_authorship::{Module, Call, Storage}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, Historical: session_historical::{Module}, - Session: session::{Module, Call, Storage, Event, Config}, - FinalityTracker: finality_tracker::{Module, Call, Storage, Inherent}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - ImOnline: im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: authority_discovery::{Module, Call, Config}, - - // Governance stuff; uncallable initially. Calls should be uncommented once we're ready to - // enable governance. - Democracy: democracy::{Module, Call, Storage, Config, Event}, - Council: collective::::{Module, Call, Storage, Origin, Event, Config}, - TechnicalCommittee: collective::::{Module, Call, Storage, Origin, Event, Config}, - ElectionsPhragmen: elections_phragmen::{Module, Call, Storage, Event, Config}, - TechnicalMembership: membership::::{Module, Call, Storage, Event, Config}, - Treasury: treasury::{Module, Call, Storage, Event}, - - // Parachains stuff; slots are disabled (no auctions initially). The rest are safe as they - // have no public dispatchables. Disabled `Call` on all of them, but this should be - // uncommented once we're ready to start parachains. - Parachains: parachains::{Module, Call, Storage, Config, Inherent, Origin}, - Attestations: attestations::{Module, Call, Storage}, - Slots: slots::{Module, Call, Storage, Event}, - Registrar: registrar::{Module, Call, Storage, Event, Config}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + FinalityTracker: pallet_finality_tracker::{Module, Call, Storage, Inherent}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, + + // Governance stuff. + Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, + Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, + TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, + ElectionsPhragmen: pallet_elections_phragmen::{Module, Call, Storage, Event, Config}, + TechnicalMembership: pallet_membership::::{Module, Call, Storage, Event, Config}, + Treasury: pallet_treasury::{Module, Call, Storage, Event}, + + // Old parachains stuff. All dummies to avoid messing up the transaction indices. + DummyParachains: dummy::::{Module, Call}, + DummyAttestations: dummy::::{Module, Call}, + DummySlots: dummy::::{Module, Call}, + DummyRegistrar: dummy::::{Module, Call}, // Claims. Usable initially. Claims: claims::{Module, Call, Storage, Event, Config, ValidateUnsigned}, // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: vesting::{Module, Call, Storage, Event, Config}, + Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, // Cunning utilities. Usable initially. - Utility: utility::{Module, Call, Event}, + Utility: pallet_utility::{Module, Call, Event}, - // Sudo. Last module. Usable initially, but removed once governance enabled. - Sudo: sudo::{Module, Call, Storage, Config, Event}, + // DOT Purchase module. Late addition; this is in place of Sudo. + Purchase: purchase::{Module, Call, Storage, Event}, // Identity. Late addition. - Identity: identity::{Module, Call, Storage, Event}, + Identity: pallet_identity::{Module, Call, Storage, Event}, // Proxy module. Late addition. - Proxy: proxy::{Module, Call, Storage, Event}, + Proxy: pallet_proxy::{Module, Call, Storage, Event}, // Multisig dispatch. Late addition. - Multisig: multisig::{Module, Call, Storage, Event}, + Multisig: pallet_multisig::{Module, Call, Storage, Event}, } } @@ -945,16 +1093,13 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - system::CheckSpecVersion, - system::CheckTxVersion, - system::CheckGenesis, - system::CheckMortality, - system::CheckNonce, - system::CheckWeight, - transaction_payment::ChargeTransactionPayment, - registrar::LimitParathreadCommits, - parachains::ValidateDoubleVoteReports, - grandpa::ValidateEquivocationReport, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, claims::PrevalidateAttests, ); /// Unchecked extrinsic type as expected by this runtime. @@ -962,10 +1107,18 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllModules, + CustomOnRuntimeUpgrade +>; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; +#[cfg(not(feature = "disable-runtime-api"))] sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -1026,43 +1179,55 @@ sp_api::impl_runtime_apis! { Executive::offchain_worker(header) } } - - impl parachain::ParachainHost for Runtime { - fn validators() -> Vec { - Parachains::authorities() + // Dummy implementation to continue supporting old parachains runtime temporarily. + impl p_v0::ParachainHost for Runtime { + fn validators() -> Vec { + // this is a compile-time check of size equality. note that we don't invoke + // the function and nothing here is unsafe. + let _ = core::mem::transmute::; + + // Yes, these aren't actually the parachain session keys. + // It doesn't matter, but we shouldn't return a zero-sized vector here. + // As there are no parachains + Session::validators() + .into_iter() + .map(|k| k.using_encoded(|s| Decode::decode(&mut &s[..])) + .expect("correct size and raw-bytes; qed")) + .collect() } - fn duty_roster() -> parachain::DutyRoster { - Parachains::calculate_duty_roster().0 + fn duty_roster() -> p_v0::DutyRoster { + let v = Session::validators(); + p_v0::DutyRoster { validator_duty: (0..v.len()).map(|_| p_v0::Chain::Relay).collect() } } - fn active_parachains() -> Vec<(parachain::Id, Option<(parachain::CollatorId, parachain::Retriable)>)> { - Registrar::active_paras() + fn active_parachains() -> Vec<(p_v0::Id, Option<(p_v0::CollatorId, p_v0::Retriable)>)> { + Vec::new() } - fn global_validation_schedule() -> parachain::GlobalValidationSchedule { - Parachains::global_validation_schedule() + fn global_validation_data() -> p_v0::GlobalValidationData { + p_v0::GlobalValidationData { + max_code_size: 1, + max_head_data_size: 1, + block_number: System::block_number().saturating_sub(1), + } } - fn local_validation_data(id: parachain::Id) -> Option { - Parachains::current_local_validation_data(&id) + fn local_validation_data(_id: p_v0::Id) -> Option { + None } - fn parachain_code(id: parachain::Id) -> Option { - Parachains::parachain_code(&id) + fn parachain_code(_id: p_v0::Id) -> Option { + None } - fn get_heads(extrinsics: Vec<::Extrinsic>) - -> Option> + fn get_heads(_extrinsics: Vec<::Extrinsic>) + -> Option> { - extrinsics - .into_iter() - .find_map(|ex| match UncheckedExtrinsic::decode(&mut ex.encode().as_slice()) { - Ok(ex) => match ex.function { - Call::Parachains(ParachainsCall::set_heads(heads)) => { - Some(heads.into_iter().map(|c| c.candidate).collect()) - } - _ => None, - } - Err(_) => None, - }) + None } - fn signing_context() -> SigningContext { - Parachains::signing_context() + fn signing_context() -> p_v0::SigningContext { + p_v0::SigningContext { + parent_hash: System::parent_hash(), + session_index: Session::current_index(), + } + } + fn downward_messages(_id: p_v0::Id) -> Vec { + Vec::new() } } @@ -1071,7 +1236,7 @@ sp_api::impl_runtime_apis! { Grandpa::grandpa_authorities() } - fn submit_report_equivocation_extrinsic( + fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: fg_primitives::EquivocationProof< ::Hash, sp_runtime::traits::NumberFor, @@ -1080,7 +1245,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Grandpa::submit_report_equivocation_extrinsic( + Grandpa::submit_unsigned_equivocation_report( equivocation_proof, key_owner_proof, ) @@ -1118,6 +1283,29 @@ sp_api::impl_runtime_apis! { fn current_epoch_start() -> babe_primitives::SlotNumber { Babe::current_epoch_start() } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + authority_id: babe_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(babe_primitives::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: babe_primitives::EquivocationProof<::Header>, + key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } } impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { @@ -1138,18 +1326,17 @@ sp_api::impl_runtime_apis! { } } - impl system_rpc_runtime_api::AccountNonceApi for Runtime { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } - impl transaction_payment_rpc_runtime_api::TransactionPaymentApi< + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, - UncheckedExtrinsic, > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } } @@ -1157,14 +1344,9 @@ sp_api::impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, + config: frame_benchmarking::BenchmarkConfig ) -> Result, RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. // To get around that, we separated the Session benchmarks into its own crate, which is why // we need these two lines below. @@ -1176,42 +1358,39 @@ sp_api::impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} - let whitelist: Vec> = vec![ + let whitelist: Vec = vec![ // Block Number - // frame_system::Number::::hashed_key().to_vec(), - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); + let params = (&config, &whitelist); // Polkadot - add_benchmark!(params, batches, b"claims", Claims); + add_benchmark!(params, batches, claims, Claims); // Substrate - add_benchmark!(params, batches, b"balances", Balances); - add_benchmark!(params, batches, b"collective", Council); - add_benchmark!(params, batches, b"democracy", Democracy); - add_benchmark!(params, batches, b"elections-phragmen", ElectionsPhragmen); - add_benchmark!(params, batches, b"im-online", ImOnline); - add_benchmark!(params, batches, b"offences", OffencesBench::); - add_benchmark!(params, batches, b"scheduler", Scheduler); - add_benchmark!(params, batches, b"session", SessionBench::); - add_benchmark!(params, batches, b"staking", Staking); - add_benchmark!(params, batches, b"system", SystemBench::); - add_benchmark!(params, batches, b"timestamp", Timestamp); - add_benchmark!(params, batches, b"treasury", Treasury); - add_benchmark!(params, batches, b"vesting", Vesting); + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_collective, Council); + add_benchmark!(params, batches, pallet_democracy, Democracy); + add_benchmark!(params, batches, pallet_elections_phragmen, ElectionsPhragmen); + add_benchmark!(params, batches, pallet_im_online, ImOnline); + add_benchmark!(params, batches, pallet_offences, OffencesBench::); + add_benchmark!(params, batches, pallet_scheduler, Scheduler); + add_benchmark!(params, batches, pallet_session, SessionBench::); + add_benchmark!(params, batches, pallet_staking, Staking); + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_treasury, Treasury); + add_benchmark!(params, batches, pallet_vesting, Vesting); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/runtime/polkadot/src/weights/frame_system.rs b/runtime/polkadot/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..9522fa75203906ab3c7264154a4b33835375843c --- /dev/null +++ b/runtime/polkadot/src/weights/frame_system.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl frame_system::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/runtime/polkadot/src/weights/mod.rs b/runtime/polkadot/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a54417c2d0de4b280952f03db51edd61c73ac29d --- /dev/null +++ b/runtime/polkadot/src/weights/mod.rs @@ -0,0 +1,24 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// A collection of weight modules used for pallets in the runtime. + +pub mod frame_system; +pub mod pallet_balances; +pub mod pallet_democracy; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_proxy; diff --git a/runtime/polkadot/src/weights/pallet_balances.rs b/runtime/polkadot/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..152f8cb8e981726052c2ce16b2087f54f1245556 --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_balances.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +pub struct WeightInfo; +impl pallet_balances::WeightInfo for WeightInfo { + fn transfer() -> Weight { + (65949000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer_keep_alive() -> Weight { + (46665000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_creating() -> Weight { + (27086000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_killing() -> Weight { + (33424000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (65343000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/runtime/polkadot/src/weights/pallet_democracy.rs b/runtime/polkadot/src/weights/pallet_democracy.rs new file mode 100644 index 0000000000000000000000000000000000000000..676281309c3fac02e24e52dc74ba710fc33a4802 --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_democracy.rs @@ -0,0 +1,156 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Weights for the Democracy Pallet +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_democracy::WeightInfo for WeightInfo { + fn propose() -> Weight { + (49113000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn second(s: u32, ) -> Weight { + (42067000 as Weight) + .saturating_add((220000 as Weight).saturating_mul(s as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn vote_new(r: u32, ) -> Weight { + (54159000 as Weight) + .saturating_add((252000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn vote_existing(r: u32, ) -> Weight { + (54145000 as Weight) + .saturating_add((262000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn emergency_cancel() -> Weight { + (31071000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn external_propose(v: u32, ) -> Weight { + (14282000 as Weight) + .saturating_add((109000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_majority() -> Weight { + (3478000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn external_propose_default() -> Weight { + (3442000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn fast_track() -> Weight { + (30820000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn veto_external(v: u32, ) -> Weight { + (30971000 as Weight) + .saturating_add((184000 as Weight).saturating_mul(v as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn cancel_referendum() -> Weight { + (20431000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn cancel_queued(r: u32, ) -> Weight { + (42438000 as Weight) + .saturating_add((3284000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn on_initialize_base(r: u32, ) -> Weight { + (70826000 as Weight) + .saturating_add((10716000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(6 as Weight)) + .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(5 as Weight)) + } + fn delegate(r: u32, ) -> Weight { + (72046000 as Weight) + .saturating_add((7837000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(4 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(4 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn undelegate(r: u32, ) -> Weight { + (41028000 as Weight) + .saturating_add((7810000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(DbWeight::get().writes(2 as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + } + fn clear_public_proposals() -> Weight { + (3643000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_preimage(b: u32, ) -> Weight { + (46629000 as Weight) + .saturating_add((4000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn note_imminent_preimage(b: u32, ) -> Weight { + (31147000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn reap_preimage(b: u32, ) -> Weight { + (42848000 as Weight) + .saturating_add((3000 as Weight).saturating_mul(b as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn unlock_remove(r: u32, ) -> Weight { + (45333000 as Weight) + .saturating_add((171000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn unlock_set(r: u32, ) -> Weight { + (44424000 as Weight) + .saturating_add((291000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(3 as Weight)) + } + fn remove_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_other_vote(r: u32, ) -> Weight { + (28250000 as Weight) + .saturating_add((283000 as Weight).saturating_mul(r as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/runtime/polkadot/src/weights/pallet_proxy.rs b/runtime/polkadot/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d8655e6c3b0fa31d618b6b112e75c44eaf64f23 --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_proxy.rs @@ -0,0 +1,86 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_proxy::WeightInfo for WeightInfo { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/runtime/polkadot/src/weights/pallet_timestamp.rs b/runtime/polkadot/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..cfd5f192d35298b512ee75e4d26acf11355ce3ba --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_timestamp.rs @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_timestamp::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/runtime/polkadot/src/weights/pallet_utility.rs b/runtime/polkadot/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ae0d7d2333b19bec65e4f5c1556df65b21e086 --- /dev/null +++ b/runtime/polkadot/src/weights/pallet_utility.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_utility::WeightInfo for WeightInfo { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/runtime/polkadot/tests/weights.rs b/runtime/polkadot/tests/weights.rs index 533783a4e4917b9a92a40c6c5550b528c299e78f..3583a614e3c0386901eb6742033f0d97e9ccd65e 100644 --- a/runtime/polkadot/tests/weights.rs +++ b/runtime/polkadot/tests/weights.rs @@ -19,7 +19,7 @@ //! //! These test are not meant to be exhaustive, as it is inevitable that //! weights in Substrate will change. Instead they are supposed to provide -//! some sort of indicator that calls we consider important (e.g Balances::transfer) +//! some sort of indicator that calls we consider important (e.g pallet_balances::transfer) //! have not suddenly changed from under us. use frame_support::{ @@ -29,17 +29,16 @@ use frame_support::{ use keyring::AccountKeyring; use polkadot_runtime::constants::currency::*; use polkadot_runtime::{self, Runtime}; -use primitives::AccountId; +use primitives::v0::AccountId; use runtime_common::MaximumBlockWeight; -use democracy::Call as DemocracyCall; -use elections_phragmen::Call as PhragmenCall; -use session::Call as SessionCall; -use staking::Call as StakingCall; -use system::Call as SystemCall; -use treasury::Call as TreasuryCall; +use pallet_elections_phragmen::Call as PhragmenCall; +use pallet_session::Call as SessionCall; +use pallet_staking::Call as StakingCall; +use frame_system::Call as SystemCall; +use pallet_treasury::Call as TreasuryCall; -type DbWeight = ::DbWeight; +type DbWeight = ::DbWeight; #[test] fn sanity_check_weight_per_time_constants_are_as_expected() { @@ -51,38 +50,6 @@ fn sanity_check_weight_per_time_constants_are_as_expected() { assert_eq!(WEIGHT_PER_NANOS, WEIGHT_PER_MICROS / 1000); } -#[test] -fn weight_of_balances_transfer_is_correct() { - // #[weight = T::DbWeight::get().reads_writes(1, 1) + 70_000_000] - let expected_weight = DbWeight::get().read + DbWeight::get().write + 70_000_000; - - let weight = polkadot_runtime::BalancesCall::transfer::(Default::default(), Default::default()) - .get_dispatch_info() - .weight; - assert_eq!(weight, expected_weight); -} - -#[test] -fn weight_of_balances_transfer_keep_alive_is_correct() { - // #[weight = T::DbWeight::get().reads_writes(1, 1) + 50_000_000] - let expected_weight = DbWeight::get().read + DbWeight::get().write + 50_000_000; - - let weight = polkadot_runtime::BalancesCall::transfer_keep_alive::(Default::default(), Default::default()) - .get_dispatch_info() - .weight; - - assert_eq!(weight, expected_weight); -} - -#[test] -fn weight_of_timestamp_set_is_correct() { - // #[weight = T::DbWeight::get().reads_writes(2, 1) + 8_000_000] - let expected_weight = (2 * DbWeight::get().read) + DbWeight::get().write + 8_000_000; - let weight = polkadot_runtime::TimestampCall::set::(Default::default()).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - #[test] fn weight_of_staking_bond_is_correct() { let controller: AccountId = AccountKeyring::Alice.into(); @@ -129,34 +96,6 @@ fn weight_of_system_set_code_is_correct() { assert_eq!(weight, expected_weight); } -#[test] -fn weight_of_system_set_storage_is_correct() { - let storage_items = vec![(vec![12], vec![34]), (vec![45], vec![83])]; - let len = storage_items.len() as Weight; - - // #[weight = FunctionOf( - // |(items,): (&Vec,)| { - // T::DbWeight::get().writes(items.len() as Weight) - // .saturating_add((items.len() as Weight).saturating_mul(600_000)) - // }, - // DispatchClass::Operational, - // Pays::Yes, - // )] - let expected_weight = (DbWeight::get().write * len).saturating_add(len.saturating_mul(600_000)); - let weight = SystemCall::set_storage::(storage_items).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - -#[test] -fn weight_of_system_remark_is_correct() { - // #[weight = 700_000] - let expected_weight = 700_000; - let weight = SystemCall::remark::(vec![]).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - #[test] fn weight_of_session_set_keys_is_correct() { // #[weight = 200_000_000 @@ -182,40 +121,6 @@ fn weight_of_session_purge_keys_is_correct() { assert_eq!(weight, expected_weight); } -#[test] -fn weight_of_democracy_propose_is_correct() { - // #[weight = 50_000_000 + T::DbWeight::get().reads_writes(2, 3)] - let expected_weight = 50_000_000 + (DbWeight::get().read * 2) + (DbWeight::get().write * 3); - let weight = DemocracyCall::propose::(Default::default(), Default::default()).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - -#[test] -fn weight_of_democracy_vote_is_correct() { - use democracy::AccountVote; - let vote = AccountVote::Standard { vote: Default::default(), balance: Default::default() }; - - // #[weight = 50_000_000 + 350_000 * Weight::from(T::MaxVotes::get()) + T::DbWeight::get().reads_writes(3, 3)] - let expected_weight = 50_000_000 - + 350_000 * (Weight::from(polkadot_runtime::MaxVotes::get())) - + (DbWeight::get().read * 3) - + (DbWeight::get().write * 3); - let weight = DemocracyCall::vote::(Default::default(), vote).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - -#[test] -fn weight_of_democracy_enact_proposal_is_correct() { - // #[weight = T::MaximumBlockWeight::get()] - let expected_weight = MaximumBlockWeight::get(); - let weight = - DemocracyCall::enact_proposal::(Default::default(), Default::default()).get_dispatch_info().weight; - - assert_eq!(weight, expected_weight); -} - #[test] fn weight_of_phragmen_vote_is_correct() { // #[weight = 100_000_000] @@ -236,7 +141,7 @@ fn weight_of_phragmen_submit_candidacy_is_correct() { #[test] fn weight_of_phragmen_renounce_candidacy_is_correct() { let expected_weight = 46 * WEIGHT_PER_MICROS + DbWeight::get().reads_writes(2, 2); - let weight = PhragmenCall::renounce_candidacy::(elections_phragmen::Renouncing::Member) + let weight = PhragmenCall::renounce_candidacy::(pallet_elections_phragmen::Renouncing::Member) .get_dispatch_info().weight; assert_eq!(weight, expected_weight); @@ -263,7 +168,7 @@ fn weight_of_treasury_approve_proposal_is_correct() { #[test] fn weight_of_treasury_tip_is_correct() { - let max_len: Weight = ::Tippers::max_len() as Weight; + let max_len: Weight = ::Tippers::max_len() as Weight; // #[weight = 68_000_000 + 2_000_000 * T::Tippers::max_len() as Weight // + T::DbWeight::get().reads_writes(2, 1)] diff --git a/runtime/rococo-v1/Cargo.toml b/runtime/rococo-v1/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..25e49e6e29d9249938cd4e8f1cec251920bb9195 --- /dev/null +++ b/runtime/rococo-v1/Cargo.toml @@ -0,0 +1,106 @@ +[package] +name = "rococo-v1-runtime" +version = "0.8.22" +authors = ["Parity Technologies "] +edition = "2018" +build = "build.rs" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +serde = { version = "1.0.102", default-features = false } +serde_derive = { version = "1.0.102", optional = true } +smallvec = "1.4.1" + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } +primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } +polkadot-parachain = { path = "../../parachain", default-features = false } +runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } + +[build-dependencies] +wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "1.0.6" } + +[features] +default = ["std"] +no_std = [] +std = [ + "authority-discovery-primitives/std", + "pallet-authority-discovery/std", + "pallet-authorship/std", + "pallet-babe/std", + "babe-primitives/std", + "pallet-balances/std", + "codec/std", + "frame-executive/std", + "pallet-grandpa/std", + "pallet-indices/std", + "pallet-im-online/std", + "inherents/std", + "frame-support/std", + "polkadot-parachain/std", + "primitives/std", + "runtime-common/std", + "runtime-parachains/std", + "pallet-session/std", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-session/std", + "sp-staking/std", + "sp-std/std", + "pallet-staking/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "offchain-primitives/std", + "pallet-offences/std", + "pallet-timestamp/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "block-builder-api/std", + "tx-pool-api/std", + "sp-version/std", + "serde_derive", + "serde/std", +] +# When enabled, the runtime api will not be build. +# +# This is required by Cumulus to access certain types of the +# runtime without clashing with the runtime api exported functions +# in WASM. +disable-runtime-api = [] diff --git a/parachain/test-parachains/code-upgrader/build.rs b/runtime/rococo-v1/build.rs similarity index 88% rename from parachain/test-parachains/code-upgrader/build.rs rename to runtime/rococo-v1/build.rs index 9a2e2c8fddbe3fe521d58a223f621eaaee6ac93a..dff1419829974d5c86b0c765974413d040d661a8 100644 --- a/parachain/test-parachains/code-upgrader/build.rs +++ b/runtime/rococo-v1/build.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Substrate is free software: you can redistribute it and/or modify @@ -19,7 +19,8 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") + .import_memory() .export_heap_base() .build() } diff --git a/runtime/rococo-v1/src/constants.rs b/runtime/rococo-v1/src/constants.rs new file mode 100644 index 0000000000000000000000000000000000000000..a565ca4dbcfde6dc9d1e625e938bf69928b017b1 --- /dev/null +++ b/runtime/rococo-v1/src/constants.rs @@ -0,0 +1,113 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// Money matters. +pub mod currency { + use primitives::v0::Balance; + + pub const DOTS: Balance = 1_000_000_000_000; + pub const DOLLARS: Balance = DOTS; + pub const CENTS: Balance = DOLLARS / 100; + pub const MILLICENTS: Balance = CENTS / 1_000; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * DOLLARS + (bytes as Balance) * 5 * MILLICENTS + } +} + +/// Time and blocks. +pub mod time { + use primitives::v0::{Moment, BlockNumber}; + pub const MILLISECS_PER_BLOCK: Moment = 6000; + pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 1 * HOURS; + + // These time units are defined in number of blocks. + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; + + // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. + pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); +} + +/// Fee-related. +pub mod fee { + pub use sp_runtime::Perbill; + use primitives::v0::Balance; + use runtime_common::ExtrinsicBaseWeight; + use frame_support::weights::{ + WeightToFeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, + }; + use smallvec::smallvec; + + /// The block saturation level. Fees will be updates based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - [0, frame_system::MaximumBlockWeight] + /// - [Balance::min, Balance::max] + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + let p = super::currency::CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational_approximation(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +#[cfg(test)] +mod tests { + use frame_support::weights::WeightToFeePolynomial; + use runtime_common::{MaximumBlockWeight, ExtrinsicBaseWeight}; + use super::fee::WeightToFee; + use super::currency::{CENTS, DOLLARS, MILLICENTS}; + + #[test] + // This function tests that the fee for `MaximumBlockWeight` of weight is correct + fn full_block_fee_is_correct() { + // A full block should cost 16 DOLLARS + println!("Base: {}", ExtrinsicBaseWeight::get()); + let x = WeightToFee::calc(&MaximumBlockWeight::get()); + let y = 16 * DOLLARS; + assert!(x.max(y) - x.min(y) < MILLICENTS); + } + + #[test] + // This function tests that the fee for `ExtrinsicBaseWeight` of weight is correct + fn extrinsic_base_fee_is_correct() { + // `ExtrinsicBaseWeight` should cost 1/10 of a CENT + println!("Base: {}", ExtrinsicBaseWeight::get()); + let x = WeightToFee::calc(&ExtrinsicBaseWeight::get()); + let y = CENTS / 10; + assert!(x.max(y) - x.min(y) < MILLICENTS); + } +} diff --git a/runtime/rococo-v1/src/lib.rs b/runtime/rococo-v1/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..fe812a1389c918865db4f855736622096d3516c3 --- /dev/null +++ b/runtime/rococo-v1/src/lib.rs @@ -0,0 +1,730 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Rococo runtime for v1 parachains. + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit="256"] + +use sp_std::prelude::*; +use codec::Encode; +use primitives::v1::{ + AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, + GroupRotationInfo, CoreState, Id, ValidationData, ValidationCode, CandidateEvent, + ValidatorId, ValidatorIndex, CommittedCandidateReceipt, OccupiedCoreAssumption, + PersistedValidationData, +}; +use runtime_common::{ + SlowAdjustingFeeUpdate, + impls::{CurrencyToVoteHandler, ToAuthor}, + BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, MaximumExtrinsicWeight, +}; +use runtime_parachains::{ + self, + runtime_api_impl::v1 as runtime_api_impl, +}; +use frame_support::{ + parameter_types, construct_runtime, debug, + traits::{KeyOwnerProofSystem, Filter}, + weights::Weight, +}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + ApplyExtrinsicResult, KeyTypeId, Perbill, curve::PiecewiseLinear, + transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}, + traits::{ + BlakeTwo256, Block as BlockT, OpaqueKeys, IdentityLookup, + Extrinsic as ExtrinsicT, SaturatedConversion, Verify, + }, +}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; +use sp_version::RuntimeVersion; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; +use sp_core::OpaqueMetadata; +use sp_staking::SessionIndex; +use pallet_session::historical as session_historical; +use frame_system::EnsureRoot; +use runtime_common::paras_sudo_wrapper as paras_sudo_wrapper; + +use runtime_parachains::configuration as parachains_configuration; +use runtime_parachains::inclusion as parachains_inclusion; +use runtime_parachains::inclusion_inherent as parachains_inclusion_inherent; +use runtime_parachains::initializer as parachains_initializer; +use runtime_parachains::paras as parachains_paras; +use runtime_parachains::scheduler as parachains_scheduler; + +pub use pallet_balances::Call as BalancesCall; + +/// Constant values used within the runtime. +pub mod constants; +use constants::{time::*, currency::*, fee::*}; + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +/// The address format for describing accounts. +pub type Address = AccountId; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +#[cfg(not(feature = "disable-runtime-api"))] +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: inherents::InherentData, + ) -> inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + Babe::randomness().into() + } + } + + impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl primitives::v1::ParachainHost for Runtime { + fn validators() -> Vec { + runtime_api_impl::validators::() + } + + fn validator_groups() -> (Vec>, GroupRotationInfo) { + runtime_api_impl::validator_groups::() + } + + fn availability_cores() -> Vec> { + runtime_api_impl::availability_cores::() + } + + fn full_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option> { + runtime_api_impl::full_validation_data::(para_id, assumption) + } + + fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option> { + runtime_api_impl::persisted_validation_data::(para_id, assumption) + } + + fn session_index_for_child() -> SessionIndex { + runtime_api_impl::session_index_for_child::() + } + + fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option { + runtime_api_impl::validation_code::(para_id, assumption) + } + + fn candidate_pending_availability(para_id: Id) -> Option> { + runtime_api_impl::candidate_pending_availability::(para_id) + } + + fn candidate_events() -> Vec> { + runtime_api_impl::candidate_events::(|ev| { + match ev { + Event::parachains_inclusion(ev) => { + Some(ev) + } + _ => None, + } + }) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> Vec<(GrandpaId, u64)> { + Grandpa::grandpa_authorities() + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + sp_runtime::traits::NumberFor, + >, + key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Grandpa::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + authority_id: fg_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((fg_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(fg_primitives::OpaqueKeyOwnershipProof::new) + } + } + + impl babe_primitives::BabeApi for Runtime { + fn configuration() -> babe_primitives::BabeGenesisConfiguration { + // The choice of `c` parameter (where `1 - c` represents the + // probability of a slot being empty), is done in accordance to the + // slot duration and expected target block time, for safely + // resisting network delays of maximum two seconds. + // + babe_primitives::BabeGenesisConfiguration { + slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), + c: PRIMARY_PROBABILITY, + genesis_authorities: Babe::authorities(), + randomness: Babe::randomness(), + allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryPlainSlots, + } + } + + fn current_epoch_start() -> babe_primitives::SlotNumber { + Babe::current_epoch_start() + } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + authority_id: babe_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(babe_primitives::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: babe_primitives::EquivocationProof<::Header>, + key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + } + + impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + AuthorityDiscovery::authorities() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + } +} +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive, Runtime, AllModules>; +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; + +impl_opaque_keys! { + pub struct SessionKeys { + pub babe: Babe, + pub im_online: ImOnline, + pub parachain_validator: Initializer, + } +} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = primitives::v1::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Storage, Config, Event}, + + // Must be before session. + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, + + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, + + // Consensus support. + Authorship: pallet_authorship::{Module, Call, Storage}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, + Historical: session_historical::{Module}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, + + // Parachains modules. + Config: parachains_configuration::{Module, Call, Storage}, + Inclusion: parachains_inclusion::{Module, Call, Storage, Event}, + InclusionInherent: parachains_inclusion_inherent::{Module, Call, Storage}, + Scheduler: parachains_scheduler::{Module, Call, Storage}, + Paras: parachains_paras::{Module, Call, Storage}, + Initializer: parachains_initializer::{Module, Call, Storage}, + + ParasSudoWrapper: paras_sudo_wrapper::{Module, Call}, + } +} + +pub struct BaseFilter; +impl Filter for BaseFilter { + fn filter(_call: &Call) -> bool { + true + } +} + +/// Runtime version (Rococo). +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("rococo-v1"), + impl_name: create_runtime_str!("parity-rococo-v1"), + authoring_version: 0, + spec_version: 1, + impl_version: 0, + #[cfg(not(feature = "disable-runtime-api"))] + apis: RUNTIME_API_VERSIONS, + #[cfg(feature = "disable-runtime-api")] + apis: sp_version::create_apis_vec![[]], + transaction_version: 2, +}; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; +} + +impl frame_system::Trait for Runtime { + type BaseCallFilter = BaseFilter; + type Origin = Origin; + type Call = Call; + type Index = Nonce; + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = generic::Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = RocksDbWeight; + type BlockExecutionWeight = BlockExecutionWeight; + type ExtrinsicBaseWeight = ExtrinsicBaseWeight; + type MaximumExtrinsicWeight = MaximumExtrinsicWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = Version; + type ModuleToIndex = ModuleToIndex; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} + +parameter_types! { + pub const MaxCodeSize: u32 = 10 * 1024 * 1024; // 10 MB + pub const MaxHeadDataSize: u32 = 20 * 1024; // 20 KB + pub const ValidationUpgradeFrequency: BlockNumber = 2 * DAYS; + pub const ValidationUpgradeDelay: BlockNumber = 8 * HOURS; + pub const SlashPeriod: BlockNumber = 7 * DAYS; +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed extension +/// format of the chain. +impl frame_system::offchain::CreateSignedTransaction for Runtime where + Call: From, +{ + fn create_transaction>( + call: Call, + public: ::Signer, + account: AccountId, + nonce: ::Index, + ) -> Option<(Call, ::SignaturePayload)> { + // take the biggest period possible. + let period = BlockHashCount::get() + .checked_next_power_of_two() + .map(|c| c / 2) + .unwrap_or(2) as u64; + + let current_block = System::block_number() + .saturated_into::() + // The `System::block_number` is initialized with `n+1`, + // so the actual block number is `n`. + .saturating_sub(1); + let tip = 0; + let extra: SignedExtra = ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + ); + let raw_payload = SignedPayload::new(call, extra).map_err(|e| { + debug::warn!("Unable to create signed payload: {:?}", e); + }).ok()?; + let signature = raw_payload.using_encoded(|payload| { + C::sign(payload, public) + })?; + let (call, extra, _) = raw_payload.deconstruct(); + Some((call, (account, signature, extra))) + } +} + +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl pallet_session::historical::Trait for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + // Six sessions in an era (6 hours). + pub const SessionsPerEra: SessionIndex = 6; + // 28 eras for unbonding (7 days). + pub const BondingDuration: pallet_staking::EraIndex = 28; + // 27 eras in which slashes can be cancelled (~7 days). + pub const SlashDeferDuration: pallet_staking::EraIndex = 27; + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub const MaxNominatorRewardedPerValidator: u32 = 64; + // quarter of the last session will be for election. + pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4; + pub const MaxIterations: u32 = 10; + pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); +} + +parameter_types! { + pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; +} + +parameter_types! { + pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; + pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); +} + +impl pallet_im_online::Trait for Runtime { + type AuthorityId = ImOnlineId; + type Event = Event; + type ReportUnresponsiveness = Offences; + type SessionDuration = SessionDuration; + type UnsignedPriority = StakingUnsignedPriority; + type WeightInfo = (); +} + +impl pallet_staking::Trait for Runtime { + type Currency = Balances; + type UnixTime = Timestamp; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = (); + type Event = Event; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + // A majority of the council can cancel the slash. + type SlashCancelOrigin = EnsureRoot; + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type UnsignedPriority = StakingUnsignedPriority; + type MaxIterations = MaxIterations; + type MinSolutionScoreBump = MinSolutionScoreBump; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = 1 * CENTS; +} + +impl pallet_balances::Trait for Runtime { + type Balance = Balance; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Runtime where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = UncheckedExtrinsic; +} + +parameter_types! { + pub const ParathreadDeposit: Balance = 5 * DOLLARS; + pub const QueueSize: usize = 2; + pub const MaxRetries: u32 = 3; +} + +parameter_types! { + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); +} + +impl pallet_offences::Trait for Runtime { + type Event = Event; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; + type OnOffenceHandler = Staking; + type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); +} + +impl pallet_authority_discovery::Trait for Runtime {} + +parameter_types! { + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; +} +impl pallet_timestamp::Trait for Runtime { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const TransactionByteFee: Balance = 10 * MILLICENTS; +} + +impl pallet_transaction_payment::Trait for Runtime { + type Currency = Balances; + type OnTransactionPayment = ToAuthor; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = WeightToFee; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; +} + +parameter_types! { + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); +} + +impl pallet_session::Trait for Runtime { + type Event = Event; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type ShouldEndSession = Babe; + type NextSessionRotation = Babe; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); +} + +parameter_types! { + pub const EpochDuration: u64 = EPOCH_DURATION_IN_BLOCKS as u64; + pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; +} + +impl pallet_babe::Trait for Runtime { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + + // session module is the trigger + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = + pallet_babe::EquivocationHandler; +} + +parameter_types! { + pub const IndexDeposit: Balance = 1 * DOLLARS; +} + +impl pallet_indices::Trait for Runtime { + type AccountIndex = AccountIndex; + type Currency = Balances; + type Deposit = IndexDeposit; + type Event = Event; + type WeightInfo = (); +} + +parameter_types! { + pub const AttestationPeriod: BlockNumber = 50; +} + +impl pallet_grandpa::Trait for Runtime { + type Event = Event; + type Call = Call; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = + >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = pallet_grandpa::EquivocationHandler; +} + +parameter_types! { + pub const UncleGenerations: u32 = 0; +} + +// TODO: substrate#2986 implement this properly +impl pallet_authorship::Trait for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type UncleGenerations = UncleGenerations; + type FilterUncle = (); + type EventHandler = (Staking, ImOnline); +} + +impl parachains_configuration::Trait for Runtime { } + +impl parachains_inclusion::Trait for Runtime { + type Event = Event; +} + +impl parachains_paras::Trait for Runtime { } + +impl parachains_inclusion_inherent::Trait for Runtime { } + +impl parachains_scheduler::Trait for Runtime { } + +impl parachains_initializer::Trait for Runtime { + type Randomness = Babe; +} + +impl paras_sudo_wrapper::Trait for Runtime { } diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index 4326e11dffd3d1b40242a8089aef4fc7b5b93bb9..4dc975c68992b8be3f55dc75baa7adeea359c5a9 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,52 +1,56 @@ [package] name = "polkadot-test-runtime" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } serde_derive = { version = "1.0.102", optional = true } -smallvec = "1.4.0" +smallvec = "1.4.1" +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } rstd = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -version = { package = "sp-version", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment = { package = "pallet-transaction-payment", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -executive = { package = "frame-executive", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -finality-tracker = { package = "pallet-finality-tracker", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -grandpa = { package = "pallet-grandpa", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -indices = { package = "pallet-indices", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -nicks = { package = "pallet-nicks", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-finality-tracker = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -68,6 +72,8 @@ default = ["std"] no_std = [] only-staking = [] std = [ + "authority-discovery-primitives/std", + "pallet-authority-discovery/std", "bitvec/std", "primitives/std", "rustc-hex/std", @@ -78,34 +84,35 @@ std = [ "sp-api/std", "tx-pool-api/std", "block-builder-api/std", + "offchain-primitives/std", "rstd/std", "sp-io/std", "frame-support/std", - "authorship/std", - "balances/std", - "transaction-payment/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-transaction-payment/std", "pallet-transaction-payment-rpc-runtime-api/std", - "executive/std", - "finality-tracker/std", - "grandpa/std", - "indices/std", - "nicks/std", - "offences/std", + "frame-executive/std", + "pallet-finality-tracker/std", + "pallet-grandpa/std", + "pallet-indices/std", + "pallet-nicks/std", + "pallet-offences/std", "sp-runtime/std", "sp-staking/std", - "session/std", - "staking/std", - "system/std", - "system_rpc_runtime_api/std", - "timestamp/std", - "version/std", - "vesting/std", + "pallet-session/std", + "pallet-staking/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "pallet-timestamp/std", + "sp-version/std", + "pallet-vesting/std", "serde_derive", "serde/std", "log", - "babe/std", + "pallet-babe/std", "babe-primitives/std", "sp-session/std", - "randomness-collective-flip/std", + "pallet-randomness-collective-flip/std", "runtime-common/std", ] diff --git a/runtime/test-runtime/build.rs b/runtime/test-runtime/build.rs index 56051bd627f6c37b55324eaa425df106bbdaa2cb..af219a29319898d2f6180ef13bbe5263cd114727 100644 --- a/runtime/test-runtime/build.rs +++ b/runtime/test-runtime/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .import_memory() .export_heap_base() .build() diff --git a/runtime/test-runtime/client/Cargo.toml b/runtime/test-runtime/client/Cargo.toml index a9439e060968a1a4c02c0a4a9329aaecc6f8be5d..ce00e4a51d375d6dc8d65a1309ccd8b658f777ec 100644 --- a/runtime/test-runtime/client/Cargo.toml +++ b/runtime/test-runtime/client/Cargo.toml @@ -6,19 +6,24 @@ edition = "2018" license = "GPL-3.0" [dependencies] +futures = "0.3.1" +codec = { package = "parity-scale-codec", version = "1.3.4" } + +# Polkadot dependencies +polkadot-primitives = { path = "../../../primitives" } +polkadot-runtime-common = { path = "../../common" } +polkadot-test-runtime = { path = ".." } +polkadot-test-service = { path = "../../../node/test-service" } + +# Substrate dependencies +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-light = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["test-helpers"], default-features = false } -substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -polkadot-test-runtime = { path = ".." } -polkadot-runtime-common = { path = "../../common" } -polkadot-primitives = { path = "../../../primitives" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = "0.3.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/runtime/test-runtime/client/src/lib.rs b/runtime/test-runtime/client/src/lib.rs index 2e026647195fa5f8a1b160739597481936fd5df4..698de3d3cf1f15f0b93665a6fd782417d46583ca 100644 --- a/runtime/test-runtime/client/src/lib.rs +++ b/runtime/test-runtime/client/src/lib.rs @@ -20,13 +20,18 @@ use std::sync::Arc; use std::collections::BTreeMap; +use std::convert::TryFrom; pub use substrate_test_client::*; pub use polkadot_test_runtime as runtime; -use sp_core::{sr25519, ChangesTrieConfiguration, map, twox_128}; +use sp_core::{ChangesTrieConfiguration, map, twox_128}; use sp_core::storage::{ChildInfo, Storage, StorageChild}; -use polkadot_test_runtime::genesismap::GenesisConfig; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, HashFor}; +use polkadot_test_runtime::GenesisConfig; +use polkadot_test_service::polkadot_local_testnet_genesis; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, Hash as HashT, HashFor}, + BuildStorage, +}; use sc_consensus::LongestChain; use sc_client_api::light::{RemoteCallRequest, RemoteBodyRequest}; use sc_service::client::{ @@ -89,16 +94,10 @@ pub struct GenesisParameters { impl GenesisParameters { fn genesis_config(&self) -> GenesisConfig { - GenesisConfig::new( - self.changes_trie_config.clone(), - vec![ - sr25519::Public::from(Sr25519Keyring::Alice).into(), - sr25519::Public::from(Sr25519Keyring::Bob).into(), - sr25519::Public::from(Sr25519Keyring::Charlie).into(), - ], - 1000, - self.extra_storage.clone(), - ) + let config = polkadot_local_testnet_genesis(self.changes_trie_config.clone()); + config.assimilate_storage(&mut self.extra_storage.clone()).expect("Adding `system::GensisConfig` to the genesis"); + + config } } @@ -112,7 +111,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> Storage { use codec::Encode; - let mut storage = self.genesis_config().genesis_map(); + let mut storage = self.genesis_config().build_storage().unwrap(); let child_roots = storage.children_default.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( @@ -296,7 +295,7 @@ pub fn new_light() -> ( let local_call_executor = LocalCallExecutor::new( backend.clone(), executor, - sp_core::tasks::executor(), + Box::new(sp_core::testing::TaskExecutor::new()), Default::default() ); let call_executor = LightExecutor::new( @@ -323,20 +322,21 @@ pub fn new_native_executor() -> sc_executor::NativeExecutor { } /// Extrinsics that must be included in each block. -pub fn needed_extrinsics(heads: Vec) -> Vec { - use polkadot_runtime_common::parachains; +/// +/// The index of the block must be provided to calculate a valid timestamp for the block. The value starts at 0 and +/// should be incremented by one for every block produced. +pub fn needed_extrinsics( + i: u64, +) -> Vec { + let timestamp = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) + .expect("now always later than unix epoch; qed") + .as_millis() + (i * polkadot_test_runtime::constants::time::SLOT_DURATION / 2) as u128; vec![ polkadot_test_runtime::UncheckedExtrinsic { - function: polkadot_test_runtime::Call::Parachains(parachains::Call::set_heads(heads)), - signature: None, - }, - polkadot_test_runtime::UncheckedExtrinsic { - function: polkadot_test_runtime::Call::Timestamp(pallet_timestamp::Call::set({ - std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH) - .expect("now always later than unix epoch; qed") - .as_millis() as u64 - })), + function: polkadot_test_runtime::Call::Timestamp(pallet_timestamp::Call::set( + u64::try_from(timestamp).expect("unexpected big timestamp"), + )), signature: None, } ] diff --git a/runtime/test-runtime/src/constants.rs b/runtime/test-runtime/src/constants.rs index b0431e55f26820379765bdad0a89efebf431cf42..b18501b714b1f903d9d1144d833f96adff55db83 100644 --- a/runtime/test-runtime/src/constants.rs +++ b/runtime/test-runtime/src/constants.rs @@ -16,7 +16,7 @@ /// Money matters. pub mod currency { - use primitives::Balance; + use primitives::v0::Balance; pub const DOTS: Balance = 1_000_000_000_000; pub const DOLLARS: Balance = DOTS; @@ -26,7 +26,7 @@ pub mod currency { /// Time and blocks. pub mod time { - use primitives::{Moment, BlockNumber}; + use primitives::v0::{Moment, BlockNumber}; // Testnet pub const MILLISECS_PER_BLOCK: Moment = 1000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; @@ -45,7 +45,7 @@ pub mod time { /// Fee-related. pub mod fee { pub use sp_runtime::Perbill; - use primitives::Balance; + use primitives::v0::Balance; use runtime_common::ExtrinsicBaseWeight; use frame_support::weights::{ WeightToFeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, @@ -59,7 +59,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, system::MaximumBlockWeight] + /// - [0, frame_system::MaximumBlockWeight] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/test-runtime/src/genesismap.rs b/runtime/test-runtime/src/genesismap.rs deleted file mode 100644 index f3ae9693d149ef5290372eefe77b3bc8b542d936..0000000000000000000000000000000000000000 --- a/runtime/test-runtime/src/genesismap.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Tool for creating the genesis block. - -use std::collections::BTreeMap; -use super::{AccountId, WASM_BINARY, constants::currency}; -use sp_core::ChangesTrieConfiguration; -use sp_core::storage::Storage; -use sp_runtime::BuildStorage; - -/// Configuration of a general Substrate test genesis block. -pub struct GenesisConfig { - changes_trie_config: Option, - balances: Vec<(AccountId, u128)>, - /// Additional storage key pairs that will be added to the genesis map. - extra_storage: Storage, -} - -impl GenesisConfig { - pub fn new( - changes_trie_config: Option, - endowed_accounts: Vec, - balance: u128, - extra_storage: Storage, - ) -> Self { - GenesisConfig { - changes_trie_config, - balances: endowed_accounts.into_iter().map(|a| (a, balance * currency::DOLLARS)).collect(), - extra_storage, - } - } - - pub fn genesis_map(&self) -> Storage { - // Assimilate the system genesis config. - let mut storage = Storage { - top: BTreeMap::new(), - children_default: self.extra_storage.children_default.clone(), - }; - let config = crate::GenesisConfig { - system: Some(system::GenesisConfig { - changes_trie_config: self.changes_trie_config.clone(), - code: WASM_BINARY.to_vec(), - }), - babe: None, - indices: None, - balances: Some(balances::GenesisConfig { - balances: self.balances.clone() - }), - staking: None, - session: None, - grandpa: None, - claims: None, - parachains: None, - registrar: None, - vesting: None, - }; - config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); - - storage - } -} diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 8b79ff22d66db85463ca888df24c1a19de2c0e36..ce20c720e177636a91ec67785bf7adbda4d7356e 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -22,32 +22,31 @@ use rstd::prelude::*; use codec::{Encode, Decode}; -use primitives::{ +use primitives::v0 as p_v0; +use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, Hash as HashT, Nonce, Signature, Moment, - parachain::{self, ActiveParas, AbridgedCandidateReceipt, SigningContext}, ValidityError, }; use runtime_common::{ - attestations, claims, parachains, registrar, slots, SlowAdjustingFeeUpdate, - impls::CurrencyToVoteHandler, + claims, SlowAdjustingFeeUpdate, impls::CurrencyToVoteHandler, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, - MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, + MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, ParachainSessionKeyPlaceholder, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - ApplyExtrinsicResult, Perbill, RuntimeDebug, KeyTypeId, + ApplyExtrinsicResult, Perbill, KeyTypeId, transaction_validity::{ - TransactionValidity, InvalidTransaction, TransactionValidityError, TransactionSource, TransactionPriority, + TransactionValidity, TransactionSource, TransactionPriority, }, curve::PiecewiseLinear, traits::{ - BlakeTwo256, Block as BlockT, StaticLookup, SignedExtension, OpaqueKeys, ConvertInto, - DispatchInfoOf, Extrinsic as ExtrinsicT, SaturatedConversion, Verify, + BlakeTwo256, Block as BlockT, StaticLookup, OpaqueKeys, ConvertInto, + Extrinsic as ExtrinsicT, SaturatedConversion, Verify, }, }; -use version::RuntimeVersion; -use grandpa::{AuthorityId as GrandpaId, fg_primitives}; +use sp_version::RuntimeVersion; +use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; #[cfg(any(feature = "std", test))] -use version::NativeVersion; +use sp_version::NativeVersion; use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ @@ -55,34 +54,31 @@ use frame_support::{ traits::{KeyOwnerProofSystem, Randomness}, weights::Weight, }; +use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -use session::historical as session_historical; +use pallet_session::historical as session_historical; #[cfg(feature = "std")] -pub use staking::StakerStatus; +pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use attestations::{Call as AttestationsCall, MORE_ATTESTATIONS_IDENTIFIER}; -pub use parachains::Call as ParachainsCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; /// Constant values used within the runtime. pub mod constants; -#[cfg(feature = "std")] -pub mod genesismap; use constants::{time::*, currency::*, fee::*}; // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -/// Runtime version (Kusama). +/// Runtime version (Test). pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("polkadot-test-runtime"), impl_name: create_runtime_str!("parity-polkadot-test-runtime"), authoring_version: 2, - spec_version: 1053, + spec_version: 1054, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -97,40 +93,11 @@ pub fn native_version() -> NativeVersion { } } -/// Avoid processing transactions from slots and parachain registrar. -#[derive(Default, Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug)] -pub struct RestrictFunctionality; -impl SignedExtension for RestrictFunctionality { - const IDENTIFIER: &'static str = "RestrictFunctionality"; - type AccountId = AccountId; - type Call = Call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) } - - fn validate( - &self, - _: &Self::AccountId, - call: &Self::Call, - _: &DispatchInfoOf, - _: usize - ) - -> TransactionValidity - { - match call { - Call::Slots(_) | Call::Registrar(_) - => Err(InvalidTransaction::Custom(ValidityError::NoPermission.into()).into()), - _ => Ok(Default::default()), - } - } -} - parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl system::Trait for Runtime { +impl frame_system::Trait for Runtime { type BaseCallFilter = (); type Origin = Origin; type Call = Call; @@ -152,12 +119,13 @@ impl system::Trait for Runtime { type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type ModuleToIndex = ModuleToIndex; - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = (); } -impl system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime where Call: From, { type OverarchingCall = Call; @@ -165,46 +133,62 @@ impl system::offchain::SendTransactionTypes for Runtime where } parameter_types! { - pub const EpochDuration: u64 = EPOCH_DURATION_IN_BLOCKS as u64; - pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; + pub storage EpochDuration: u64 = EPOCH_DURATION_IN_BLOCKS as u64; + pub storage ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl babe::Trait for Runtime { +impl pallet_babe::Trait for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // session module is the trigger - type EpochChangeTrigger = babe::ExternalTrigger; + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = (); + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = (); } parameter_types! { - pub const IndexDeposit: Balance = 1 * DOLLARS; + pub storage IndexDeposit: Balance = 1 * DOLLARS; } -impl indices::Trait for Runtime { +impl pallet_indices::Trait for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; + type WeightInfo = (); } parameter_types! { - pub const ExistentialDeposit: Balance = 1 * CENTS; + pub storage ExistentialDeposit: Balance = 1 * CENTS; } -impl balances::Trait for Runtime { +impl pallet_balances::Trait for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = (); } parameter_types! { - pub const TransactionByteFee: Balance = 10 * MILLICENTS; + pub storage TransactionByteFee: Balance = 10 * MILLICENTS; } -impl transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = (); type TransactionByteFee = TransactionByteFee; @@ -213,58 +197,61 @@ impl transaction_payment::Trait for Runtime { } parameter_types! { - pub const MinimumPeriod: u64 = 0; + pub storage SlotDuration: u64 = SLOT_DURATION; + pub storage MinimumPeriod: u64 = SlotDuration::get() / 2; } -impl timestamp::Trait for Runtime { +impl pallet_timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); } parameter_types! { - pub const UncleGenerations: u32 = 0; + pub storage UncleGenerations: u32 = 0; } // TODO: substrate#2986 implement this properly -impl authorship::Trait for Runtime { - type FindAuthor = session::FindAccountFromAuthorIndex; +impl pallet_authorship::Trait for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = Staking; } parameter_types! { - pub const Period: BlockNumber = 10 * MINUTES; - pub const Offset: BlockNumber = 0; + pub storage Period: BlockNumber = 10 * MINUTES; + pub storage Offset: BlockNumber = 0; } impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub parachain_validator: Parachains, + pub parachain_validator: ParachainSessionKeyPlaceholder, } } parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); + pub storage DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl session::Trait for Runtime { +impl pallet_session::Trait for Runtime { type Event = Event; type ValidatorId = AccountId; - type ValidatorIdOf = staking::StashOf; + type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; type SessionManager = Staking; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } -impl session::historical::Trait for Runtime { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; +impl pallet_session::historical::Trait for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } pallet_staking_reward_curve::build! { @@ -280,20 +267,20 @@ pallet_staking_reward_curve::build! { parameter_types! { // Six sessions in an era (6 hours). - pub const SessionsPerEra: SessionIndex = 6; + pub storage SessionsPerEra: SessionIndex = 6; // 28 eras for unbonding (7 days). - pub const BondingDuration: staking::EraIndex = 28; - // 28 eras in which slashes can be cancelled (7 days). - pub const SlashDeferDuration: staking::EraIndex = 28; + pub storage BondingDuration: pallet_staking::EraIndex = 28; + // 27 eras in which slashes can be cancelled (a bit less than 7 days). + pub storage SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const ElectionLookahead: BlockNumber = 0; - pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; - pub const MaxIterations: u32 = 10; + pub storage MaxNominatorRewardedPerValidator: u32 = 64; + pub storage ElectionLookahead: BlockNumber = 0; + pub storage StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; + pub storage MaxIterations: u32 = 10; pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); } -impl staking::Trait for Runtime { +impl pallet_staking::Trait for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; @@ -305,7 +292,7 @@ impl staking::Trait for Runtime { type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; // A majority of the council can cancel the slash. - type SlashCancelOrigin = system::EnsureNever<()>; + type SlashCancelOrigin = frame_system::EnsureNever<()>; type SessionInterface = Self; type RewardCurve = RewardCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; @@ -315,9 +302,11 @@ impl staking::Trait for Runtime { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; type MinSolutionScoreBump = MinSolutionScoreBump; + type WeightInfo = (); + } -impl grandpa::Trait for Runtime { +impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; @@ -334,58 +323,14 @@ impl grandpa::Trait for Runtime { type HandleEquivocation = (); } -parameter_types! { - pub const AttestationPeriod: BlockNumber = 50; -} - -impl attestations::Trait for Runtime { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = parachains::ValidatorIdentities; - type RewardAttestation = Staking; -} - -parameter_types! { - pub const MaxCodeSize: u32 = 10 * 1024 * 1024; // 10 MB - pub const MaxHeadDataSize: u32 = 20 * 1024; // 20 KB - - pub const ValidationUpgradeFrequency: BlockNumber = 2; - pub const ValidationUpgradeDelay: BlockNumber = 1; - pub const SlashPeriod: BlockNumber = 1 * MINUTES; -} - -impl parachains::Trait for Runtime { - type AuthorityId = primitives::fisherman::FishermanAppCrypto; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = Balances; - type BlockNumberConversion = sp_runtime::traits::Identity; - type Randomness = RandomnessCollectiveFlip; - type ActiveParachains = Registrar; - type Registrar = Registrar; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - - type Proof = sp_session::MembershipProof; - type KeyOwnerProofSystem = session::historical::Module; - type IdentificationTuple = < - Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, Vec)> - >::IdentificationTuple; - type ReportOffence = Offences; - type BlockHashConversion = sp_runtime::traits::Identity; -} - -impl system::offchain::CreateSignedTransaction for Runtime where +impl frame_system::offchain::CreateSignedTransaction for Runtime where Call: From, { - fn create_transaction>( + fn create_transaction>( call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { let period = BlockHashCount::get() .checked_next_power_of_two() @@ -397,16 +342,13 @@ impl system::offchain::CreateSignedTransaction for Runtime .saturating_sub(1); let tip = 0; let extra: SignedExtra = ( - RestrictFunctionality, - system::CheckSpecVersion::::new(), - system::CheckTxVersion::::new(), - system::CheckGenesis::::new(), - system::CheckMortality::::from(generic::Era::mortal(period, current_block)), - system::CheckNonce::::from(nonce), - system::CheckWeight::::new(), - transaction_payment::ChargeTransactionPayment::::from(tip), - registrar::LimitParathreadCommits::::new(), - parachains::ValidateDoubleVoteReports::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); let raw_payload = SignedPayload::new(call, extra).map_err(|e| { debug::warn!("Unable to create signed payload: {:?}", e); @@ -420,50 +362,28 @@ impl system::offchain::CreateSignedTransaction for Runtime } } -impl system::offchain::SigningTypes for Runtime { +impl frame_system::offchain::SigningTypes for Runtime { type Public = ::Signer; type Signature = Signature; } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub storage OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); } -impl offences::Trait for Runtime { +impl pallet_offences::Trait for Runtime { type Event = Event; - type IdentificationTuple = session::historical::IdentificationTuple; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } -parameter_types! { - pub const ParathreadDeposit: Balance = 5 * DOLLARS; - pub const QueueSize: usize = 2; - pub const MaxRetries: u32 = 3; -} - -impl registrar::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = Slots; - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; -} +impl pallet_authority_discovery::Trait for Runtime {} parameter_types! { - pub const LeasePeriod: BlockNumber = 100_000; - pub const EndingPeriod: BlockNumber = 1000; -} - -impl slots::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type Parachains = Registrar; - type LeasePeriod = LeasePeriod; - type EndingPeriod = EndingPeriod; - type Randomness = RandomnessCollectiveFlip; + pub storage LeasePeriod: BlockNumber = 100_000; + pub storage EndingPeriod: BlockNumber = 1000; } parameter_types! { @@ -474,58 +394,61 @@ impl claims::Trait for Runtime { type Event = Event; type VestingSchedule = Vesting; type Prefix = Prefix; - type MoveClaimOrigin = system::EnsureRoot; + type MoveClaimOrigin = frame_system::EnsureRoot; } parameter_types! { - pub const MinVestedTransfer: Balance = 100 * DOLLARS; + pub storage MinVestedTransfer: Balance = 100 * DOLLARS; } -impl vesting::Trait for Runtime { +impl pallet_vesting::Trait for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); +} + +impl pallet_sudo::Trait for Runtime { + type Event = Event; + type Call = Call; } construct_runtime! { pub enum Runtime where Block = Block, - NodeBlock = primitives::Block, + NodeBlock = primitives::v1::Block, UncheckedExtrinsic = UncheckedExtrinsic { // Basic stuff; balances is uncallable initially. - System: system::{Module, Call, Storage, Config, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Storage}, + System: frame_system::{Module, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Storage}, // Must be before session. - Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent}, - Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Indices: indices::{Module, Call, Storage, Config, Event}, - Balances: balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: transaction_payment::{Module, Storage}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, // Consensus support. - Authorship: authorship::{Module, Call, Storage}, - Staking: staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: offences::{Module, Call, Storage, Event}, + Authorship: pallet_authorship::{Module, Call, Storage}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, Historical: session_historical::{Module}, - Session: session::{Module, Call, Storage, Event, Config}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, // Claims. Usable initially. Claims: claims::{Module, Call, Storage, Event, Config, ValidateUnsigned}, - // Parachains stuff; slots are disabled (no auctions initially). The rest are safe as they - // have no public dispatchables. - Parachains: parachains::{Module, Call, Storage, Config, Inherent, Origin}, - Attestations: attestations::{Module, Call, Storage}, - Slots: slots::{Module, Call, Storage, Event}, - Registrar: registrar::{Module, Call, Storage, Event, Config}, - // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: vesting::{Module, Call, Storage, Event, Config}, + Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, + + // Sudo. Last module. + Sudo: pallet_sudo::{Module, Call, Storage, Config, Event}, } } @@ -541,23 +464,20 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - RestrictFunctionality, - system::CheckSpecVersion, - system::CheckTxVersion, - system::CheckGenesis, - system::CheckMortality, - system::CheckNonce, - system::CheckWeight, - transaction_payment::ChargeTransactionPayment::, - registrar::LimitParathreadCommits, - parachains::ValidateDoubleVoteReports, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment::, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive, Runtime, AllModules>; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; @@ -619,42 +539,67 @@ sp_api::impl_runtime_apis! { } } - impl parachain::ParachainHost for Runtime { - fn validators() -> Vec { - Parachains::authorities() + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) } - fn duty_roster() -> parachain::DutyRoster { - Parachains::calculate_duty_roster().0 + } + + impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + Vec::new() } - fn active_parachains() -> Vec<(parachain::Id, Option<(parachain::CollatorId, parachain::Retriable)>)> { - Registrar::active_paras() + } + + // Dummy implementation to continue supporting old parachains runtime temporarily. + impl p_v0::ParachainHost for Runtime { + fn validators() -> Vec { + // this is a compile-time check of size equality. note that we don't invoke + // the function and nothing here is unsafe. + let _ = core::mem::transmute::; + + // Yes, these aren't actually the parachain session keys. + // It doesn't matter, but we shouldn't return a zero-sized vector here. + // As there are no parachains + Session::validators() + .into_iter() + .map(|k| k.using_encoded(|s| Decode::decode(&mut &s[..])) + .expect("correct size and raw-bytes; qed")) + .collect() } - fn global_validation_schedule() -> parachain::GlobalValidationSchedule { - Parachains::global_validation_schedule() + fn duty_roster() -> p_v0::DutyRoster { + let v = Session::validators(); + p_v0::DutyRoster { validator_duty: (0..v.len()).map(|_| p_v0::Chain::Relay).collect() } } - fn local_validation_data(id: parachain::Id) -> Option { - Parachains::current_local_validation_data(&id) + fn active_parachains() -> Vec<(p_v0::Id, Option<(p_v0::CollatorId, p_v0::Retriable)>)> { + Vec::new() } - fn parachain_code(id: parachain::Id) -> Option { - Parachains::parachain_code(&id) + fn global_validation_data() -> p_v0::GlobalValidationData { + p_v0::GlobalValidationData { + max_code_size: 1, + max_head_data_size: 1, + block_number: System::block_number().saturating_sub(1), + } } - fn get_heads(extrinsics: Vec<::Extrinsic>) - -> Option> + fn local_validation_data(_id: p_v0::Id) -> Option { + None + } + fn parachain_code(_id: p_v0::Id) -> Option { + None + } + fn get_heads(_extrinsics: Vec<::Extrinsic>) + -> Option> { - extrinsics - .into_iter() - .find_map(|ex| match UncheckedExtrinsic::decode(&mut ex.encode().as_slice()) { - Ok(ex) => match ex.function { - Call::Parachains(ParachainsCall::set_heads(heads)) => { - Some(heads.into_iter().map(|c| c.candidate).collect()) - } - _ => None, - } - Err(_) => None, - }) - } - fn signing_context() -> SigningContext { - Parachains::signing_context() + None + } + fn signing_context() -> p_v0::SigningContext { + p_v0::SigningContext { + parent_hash: System::parent_hash(), + session_index: Session::current_index(), + } + } + fn downward_messages(_id: p_v0::Id) -> Vec { + Vec::new() } } @@ -663,7 +608,7 @@ sp_api::impl_runtime_apis! { Grandpa::grandpa_authorities() } - fn submit_report_equivocation_extrinsic( + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< ::Hash, sp_runtime::traits::NumberFor, @@ -701,6 +646,20 @@ sp_api::impl_runtime_apis! { fn current_epoch_start() -> babe_primitives::SlotNumber { Babe::current_epoch_start() } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + _authority_id: babe_primitives::AuthorityId, + ) -> Option { + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: babe_primitives::EquivocationProof<::Header>, + _key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } } impl sp_session::SessionKeys for Runtime { @@ -715,7 +674,7 @@ sp_api::impl_runtime_apis! { } } - impl system_rpc_runtime_api::AccountNonceApi for Runtime { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } @@ -724,9 +683,8 @@ sp_api::impl_runtime_apis! { impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, - UncheckedExtrinsic, > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } } diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index d32def670f354b75c119035d4cfd68f55b7a4ad2..739bbcc8eaadd9ad41079b22b13eb84dc5fa7427 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -1,18 +1,18 @@ [package] name = "westend-runtime" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } log = { version = "0.3.9", optional = true } rustc-hex = { version = "2.0.1", default-features = false } serde = { version = "1.0.102", default-features = false } serde_derive = { version = "1.0.102", optional = true } -smallvec = "1.4.0" +smallvec = "1.4.1" static_assertions = "1.1.0" authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -26,51 +26,51 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -version = { package = "sp-version", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authority-discovery = { package = "pallet-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authorship = { package = "pallet-authorship", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -balances = { package = "pallet-balances", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment = { package = "pallet-transaction-payment", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -transaction-payment-rpc-runtime-api = { package = "pallet-transaction-payment-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -collective = { package = "pallet-collective", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -democracy = { package = "pallet-democracy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -executive = { package = "frame-executive", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -finality-tracker = { package = "pallet-finality-tracker", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -grandpa = { package = "pallet-grandpa", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -identity = { package = "pallet-identity", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -im-online = { package = "pallet-im-online", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -indices = { package = "pallet-indices", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -membership = { package = "pallet-membership", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -multisig = { package = "pallet-multisig", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -nicks = { package = "pallet-nicks", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offences = { package = "pallet-offences", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -proxy = { package = "pallet-proxy", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -randomness-collective-flip = { package = "pallet-randomness-collective-flip", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -recovery = { package = "pallet-recovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -scheduler = { package = "pallet-scheduler", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -session = { package = "pallet-session", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -society = { package = "pallet-society", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-finality-tracker = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-society = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -staking = { package = "pallet-staking", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } -sudo = { package = "pallet-sudo", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system = { package = "frame-system", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -timestamp = { package = "pallet-timestamp", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -treasury = { package = "pallet-treasury", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -utility = { package = "pallet-utility", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -vesting = { package = "pallet-vesting", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -hex-literal = { version = "0.2.1", optional = true } +hex-literal = { version = "0.2.1" } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -93,7 +93,7 @@ no_std = [] only-staking = [] std = [ "authority-discovery-primitives/std", - "authority-discovery/std", + "pallet-authority-discovery/std", "bitvec/std", "primitives/std", "rustc-hex/std", @@ -108,46 +108,46 @@ std = [ "sp-std/std", "sp-io/std", "frame-support/std", - "authorship/std", - "balances/std", - "transaction-payment/std", - "transaction-payment-rpc-runtime-api/std", - "collective/std", - "elections-phragmen/std", - "democracy/std", - "executive/std", - "finality-tracker/std", - "grandpa/std", - "identity/std", - "im-online/std", - "indices/std", - "membership/std", - "multisig/std", - "nicks/std", - "offences/std", - "proxy/std", - "recovery/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-transaction-payment/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-collective/std", + "pallet-elections-phragmen/std", + "pallet-democracy/std", + "frame-executive/std", + "pallet-finality-tracker/std", + "pallet-grandpa/std", + "pallet-identity/std", + "pallet-im-online/std", + "pallet-indices/std", + "pallet-membership/std", + "pallet-multisig/std", + "pallet-nicks/std", + "pallet-offences/std", + "pallet-proxy/std", + "pallet-recovery/std", "sp-runtime/std", "sp-staking/std", - "scheduler/std", - "session/std", - "society/std", - "staking/std", - "sudo/std", - "system/std", - "system_rpc_runtime_api/std", - "timestamp/std", - "treasury/std", - "version/std", - "utility/std", - "vesting/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-society/std", + "pallet-staking/std", + "pallet-sudo/std", + "frame-system/std", + "frame-system-rpc-runtime-api/std", + "pallet-timestamp/std", + "pallet-treasury/std", + "sp-version/std", + "pallet-utility/std", + "pallet-vesting/std", "serde_derive", "serde/std", "log", - "babe/std", + "pallet-babe/std", "babe-primitives/std", "sp-session/std", - "randomness-collective-flip/std", + "pallet-randomness-collective-flip/std", "runtime-common/std", ] runtime-benchmarks = [ @@ -155,22 +155,29 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system-benchmarking", - "system/runtime-benchmarks", + "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", - "balances/runtime-benchmarks", - "collective/runtime-benchmarks", - "democracy/runtime-benchmarks", - "elections-phragmen/runtime-benchmarks", - "identity/runtime-benchmarks", - "im-online/runtime-benchmarks", - "scheduler/runtime-benchmarks", - "society/runtime-benchmarks", - "staking/runtime-benchmarks", - "timestamp/runtime-benchmarks", - "treasury/runtime-benchmarks", - "utility/runtime-benchmarks", - "vesting/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-democracy/runtime-benchmarks", + "pallet-elections-phragmen/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-society/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", "pallet-offences-benchmarking", "pallet-session-benchmarking", - "hex-literal", + # uncomment when it is made optional again + # "hex-literal", ] +# When enabled, the runtime api will not be build. +# +# This is required by Cumulus to access certain types of the +# runtime without clashing with the runtime api exported functions +# in WASM. +disable-runtime-api = [] diff --git a/runtime/westend/build.rs b/runtime/westend/build.rs index 56051bd627f6c37b55324eaa425df106bbdaa2cb..af219a29319898d2f6180ef13bbe5263cd114727 100644 --- a/runtime/westend/build.rs +++ b/runtime/westend/build.rs @@ -19,7 +19,7 @@ use wasm_builder_runner::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("1.0.11") + .with_wasm_builder_from_crates("2.0.0") .import_memory() .export_heap_base() .build() diff --git a/runtime/westend/src/constants.rs b/runtime/westend/src/constants.rs index 57250b6d9e595719399edb10a6ce4125782bf460..6fb7e934e1f1d166056ea21f6ca3fe9ff8199fb0 100644 --- a/runtime/westend/src/constants.rs +++ b/runtime/westend/src/constants.rs @@ -16,7 +16,7 @@ /// Money matters. pub mod currency { - use primitives::Balance; + use primitives::v0::Balance; pub const DOTS: Balance = 1_000_000_000_000; pub const DOLLARS: Balance = DOTS; @@ -24,13 +24,13 @@ pub mod currency { pub const MILLICENTS: Balance = CENTS / 1_000; pub const fn deposit(items: u32, bytes: u32) -> Balance { - items as Balance * 20 * DOLLARS + (bytes as Balance) * 100 * MILLICENTS + items as Balance * 1 * DOLLARS + (bytes as Balance) * 5 * MILLICENTS } } /// Time and blocks. pub mod time { - use primitives::{Moment, BlockNumber}; + use primitives::v0::{Moment, BlockNumber}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 1 * HOURS; @@ -47,7 +47,7 @@ pub mod time { /// Fee-related. pub mod fee { pub use sp_runtime::Perbill; - use primitives::Balance; + use primitives::v0::Balance; use runtime_common::ExtrinsicBaseWeight; use frame_support::weights::{ WeightToFeePolynomial, WeightToFeeCoefficient, WeightToFeeCoefficients, @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, system::MaximumBlockWeight] + /// - [0, frame_system::MaximumBlockWeight] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index bb3d74550302423ddc1318c4102cccbbdfda54e1..0581eb4ece24000d648aba0bdc7882b56fd2ef66 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -22,19 +22,20 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; -use primitives::{ +use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, - parachain::{self, ActiveParas, AbridgedCandidateReceipt, SigningContext}, }; +use primitives::v0 as p_v0; use runtime_common::{ - attestations, parachains, registrar, SlowAdjustingFeeUpdate, + dummy, purchase, SlowAdjustingFeeUpdate, impls::{CurrencyToVoteHandler, ToAuthor}, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, MaximumExtrinsicWeight, + ParachainSessionKeyPlaceholder, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - ApplyExtrinsicResult, KeyTypeId, Perbill, curve::PiecewiseLinear, + ApplyExtrinsicResult, KeyTypeId, Permill, Perbill, curve::PiecewiseLinear, transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}, traits::{ BlakeTwo256, Block as BlockT, OpaqueKeys, ConvertInto, IdentityLookup, @@ -43,48 +44,53 @@ use sp_runtime::{ }; #[cfg(feature = "runtime-benchmarks")] use sp_runtime::RuntimeString; -use version::RuntimeVersion; -use grandpa::{AuthorityId as GrandpaId, fg_primitives}; +use sp_version::RuntimeVersion; +use pallet_grandpa::{AuthorityId as GrandpaId, fg_primitives}; #[cfg(any(feature = "std", test))] -use version::NativeVersion; +use sp_version::NativeVersion; use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ - parameter_types, construct_runtime, debug, RuntimeDebug, + parameter_types, ord_parameter_types, construct_runtime, debug, RuntimeDebug, traits::{KeyOwnerProofSystem, Randomness, Filter, InstanceFilter}, weights::Weight, }; -use im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; -use transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -use session::historical as session_historical; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use pallet_session::historical as session_historical; +use frame_system::{EnsureRoot, EnsureSignedBy, EnsureOneOf}; #[cfg(feature = "std")] -pub use staking::StakerStatus; +pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; -pub use balances::Call as BalancesCall; -pub use attestations::{Call as AttestationsCall, MORE_ATTESTATIONS_IDENTIFIER}; -pub use parachains::Call as ParachainsCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use pallet_balances::Call as BalancesCall; /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*, fee::*}; +// Weights used in the runtime +mod weights; + // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -/// Runtime version (Kusama). +/// Runtime version (Westend). pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 32, + spec_version: 43, impl_version: 0, + #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + #[cfg(feature = "disable-runtime-api")] + apis: version::create_apis_vec![[]], + transaction_version: 3, }; /// Native version. @@ -96,11 +102,11 @@ pub fn native_version() -> NativeVersion { } } -/// Avoid processing transactions from slots and parachain registrar. +/// Accept all transactions. pub struct BaseFilter; impl Filter for BaseFilter { - fn filter(call: &Call) -> bool { - !matches!(call, Call::Registrar(_)) + fn filter(_: &Call) -> bool { + true } } @@ -108,7 +114,7 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl system::Trait for Runtime { +impl frame_system::Trait for Runtime { type BaseCallFilter = BaseFilter; type Origin = Origin; type Call = Call; @@ -130,16 +136,20 @@ impl system::Trait for Runtime { type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type ModuleToIndex = ModuleToIndex; - type AccountData = balances::AccountData; + type AccountData = pallet_balances::AccountData; type OnNewAccount = (); type OnKilledAccount = (); + type SystemWeightInfo = weights::frame_system::WeightInfo; } -impl scheduler::Trait for Runtime { +impl pallet_scheduler::Trait for Runtime { type Event = Event; type Origin = Origin; + type PalletsOrigin = OriginCaller; type Call = Call; type MaximumWeight = MaximumBlockWeight; + type ScheduleOrigin = EnsureRoot; + type WeightInfo = (); } parameter_types! { @@ -147,42 +157,59 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl babe::Trait for Runtime { +impl pallet_babe::Trait for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; // session module is the trigger - type EpochChangeTrigger = babe::ExternalTrigger; + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type KeyOwnerProofSystem = Historical; + + type KeyOwnerProof = >::Proof; + + type KeyOwnerIdentification = >::IdentificationTuple; + + type HandleEquivocation = + pallet_babe::EquivocationHandler; } parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl indices::Trait for Runtime { +impl pallet_indices::Trait for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; type Event = Event; + type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: Balance = 1 * CENTS; } -impl balances::Trait for Runtime { +impl pallet_balances::Trait for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; } parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl transaction_payment::Trait for Runtime { +impl pallet_transaction_payment::Trait for Runtime { type Currency = Balances; type OnTransactionPayment = ToAuthor; type TransactionByteFee = TransactionByteFee; @@ -193,10 +220,11 @@ impl transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl timestamp::Trait for Runtime { +impl pallet_timestamp::Trait for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; + type WeightInfo = weights::pallet_timestamp::WeightInfo; } parameter_types! { @@ -204,8 +232,8 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl authorship::Trait for Runtime { - type FindAuthor = session::FindAccountFromAuthorIndex; +impl pallet_authorship::Trait for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = (Staking, ImOnline); @@ -221,7 +249,7 @@ impl_opaque_keys! { pub grandpa: Grandpa, pub babe: Babe, pub im_online: ImOnline, - pub parachain_validator: Parachains, + pub parachain_validator: ParachainSessionKeyPlaceholder, pub authority_discovery: AuthorityDiscovery, } } @@ -230,21 +258,22 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl session::Trait for Runtime { +impl pallet_session::Trait for Runtime { type Event = Event; type ValidatorId = AccountId; - type ValidatorIdOf = staking::StashOf; + type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; - type SessionManager = session::historical::NoteHistoricalRoot; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type WeightInfo = (); } -impl session::historical::Trait for Runtime { - type FullIdentification = staking::Exposure; - type FullIdentificationOf = staking::ExposureOf; +impl pallet_session::historical::Trait for Runtime { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } pallet_staking_reward_curve::build! { @@ -262,9 +291,9 @@ parameter_types! { // Six sessions in an era (6 hours). pub const SessionsPerEra: SessionIndex = 6; // 28 eras for unbonding (7 days). - pub const BondingDuration: staking::EraIndex = 28; - // 28 eras in which slashes can be cancelled (7 days). - pub const SlashDeferDuration: staking::EraIndex = 28; + pub const BondingDuration: pallet_staking::EraIndex = 28; + // 27 eras in which slashes can be cancelled (slightly less than 7 days). + pub const SlashDeferDuration: pallet_staking::EraIndex = 27; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 64; // quarter of the last session will be for election. @@ -273,7 +302,7 @@ parameter_types! { pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); } -impl staking::Trait for Runtime { +impl pallet_staking::Trait for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; @@ -285,7 +314,7 @@ impl staking::Trait for Runtime { type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; // A majority of the council can cancel the slash. - type SlashCancelOrigin = system::EnsureRoot; + type SlashCancelOrigin = EnsureRoot; type SessionInterface = Self; type RewardCurve = RewardCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; @@ -295,6 +324,7 @@ impl staking::Trait for Runtime { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; type MinSolutionScoreBump = MinSolutionScoreBump; + type WeightInfo = (); } parameter_types! { @@ -313,14 +343,15 @@ parameter_types! { pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); } -impl offences::Trait for Runtime { +impl pallet_offences::Trait for Runtime { type Event = Event; - type IdentificationTuple = session::historical::IdentificationTuple; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; + type WeightInfo = (); } -impl authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Trait for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -331,15 +362,16 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl im_online::Trait for Runtime { +impl pallet_im_online::Trait for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type ReportUnresponsiveness = Offences; type SessionDuration = SessionDuration; type UnsignedPriority = StakingUnsignedPriority; + type WeightInfo = (); } -impl grandpa::Trait for Runtime { +impl pallet_grandpa::Trait for Runtime { type Event = Event; type Call = Call; @@ -353,76 +385,30 @@ impl grandpa::Trait for Runtime { GrandpaId, )>>::IdentificationTuple; - type HandleEquivocation = grandpa::EquivocationHandler< - Self::KeyOwnerIdentification, - primitives::fisherman::FishermanAppCrypto, - Runtime, - Offences, - >; + type HandleEquivocation = pallet_grandpa::EquivocationHandler; } parameter_types! { - pub WindowSize: BlockNumber = finality_tracker::DEFAULT_WINDOW_SIZE.into(); - pub ReportLatency: BlockNumber = finality_tracker::DEFAULT_REPORT_LATENCY.into(); + pub WindowSize: BlockNumber = pallet_finality_tracker::DEFAULT_WINDOW_SIZE.into(); + pub ReportLatency: BlockNumber = pallet_finality_tracker::DEFAULT_REPORT_LATENCY.into(); } -impl finality_tracker::Trait for Runtime { +impl pallet_finality_tracker::Trait for Runtime { type OnFinalizationStalled = (); type WindowSize = WindowSize; type ReportLatency = ReportLatency; } -parameter_types! { - pub const AttestationPeriod: BlockNumber = 50; -} - -impl attestations::Trait for Runtime { - type AttestationPeriod = AttestationPeriod; - type ValidatorIdentities = parachains::ValidatorIdentities; - type RewardAttestation = Staking; -} - -parameter_types! { - pub const MaxCodeSize: u32 = 10 * 1024 * 1024; // 10 MB - pub const MaxHeadDataSize: u32 = 20 * 1024; // 20 KB - pub const ValidationUpgradeFrequency: BlockNumber = 2 * DAYS; - pub const ValidationUpgradeDelay: BlockNumber = 8 * HOURS; - pub const SlashPeriod: BlockNumber = 7 * DAYS; -} - -impl parachains::Trait for Runtime { - type AuthorityId = primitives::fisherman::FishermanAppCrypto; - type Origin = Origin; - type Call = Call; - type ParachainCurrency = Balances; - type BlockNumberConversion = sp_runtime::traits::Identity; - type Randomness = RandomnessCollectiveFlip; - type ActiveParachains = Registrar; - type Registrar = Registrar; - type MaxCodeSize = MaxCodeSize; - type MaxHeadDataSize = MaxHeadDataSize; - - type ValidationUpgradeFrequency = ValidationUpgradeFrequency; - type ValidationUpgradeDelay = ValidationUpgradeDelay; - type SlashPeriod = SlashPeriod; - - type Proof = sp_session::MembershipProof; - type KeyOwnerProofSystem = session::historical::Module; - type IdentificationTuple = )>>::IdentificationTuple; - type ReportOffence = Offences; - type BlockHashConversion = sp_runtime::traits::Identity; -} - /// Submits a transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. -impl system::offchain::CreateSignedTransaction for Runtime where +impl frame_system::offchain::CreateSignedTransaction for Runtime where Call: From, { - fn create_transaction>( + fn create_transaction>( call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -437,16 +423,13 @@ impl system::offchain::CreateSignedTransaction for Runtime .saturating_sub(1); let tip = 0; let extra: SignedExtra = ( - system::CheckSpecVersion::::new(), - system::CheckTxVersion::::new(), - system::CheckGenesis::::new(), - system::CheckMortality::::from(generic::Era::mortal(period, current_block)), - system::CheckNonce::::from(nonce), - system::CheckWeight::::new(), - transaction_payment::ChargeTransactionPayment::::from(tip), - registrar::LimitParathreadCommits::::new(), - parachains::ValidateDoubleVoteReports::::new(), - grandpa::ValidateEquivocationReport::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); let raw_payload = SignedPayload::new(call, extra).map_err(|e| { debug::warn!("Unable to create signed payload: {:?}", e); @@ -459,34 +442,18 @@ impl system::offchain::CreateSignedTransaction for Runtime } } -impl system::offchain::SigningTypes for Runtime { +impl frame_system::offchain::SigningTypes for Runtime { type Public = ::Signer; type Signature = Signature; } -impl system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime where Call: From, { type OverarchingCall = Call; type Extrinsic = UncheckedExtrinsic; } -parameter_types! { - pub const ParathreadDeposit: Balance = 5 * DOLLARS; - pub const QueueSize: usize = 2; - pub const MaxRetries: u32 = 3; -} - -impl registrar::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type ParathreadDeposit = ParathreadDeposit; - type SwapAux = (); - type QueueSize = QueueSize; - type MaxRetries = MaxRetries; -} - parameter_types! { // Minimum 100 bytes/KSM deposited (1 CENT/byte) pub const BasicDeposit: Balance = 10 * DOLLARS; // 258 bytes on-chain @@ -497,7 +464,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl identity::Trait for Runtime { +impl pallet_identity::Trait for Runtime { type Event = Event; type Currency = Balances; type Slashed = (); @@ -507,13 +474,15 @@ impl identity::Trait for Runtime { type MaxSubAccounts = MaxSubAccounts; type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; - type RegistrarOrigin = system::EnsureRoot; - type ForceOrigin = system::EnsureRoot; + type RegistrarOrigin = frame_system::EnsureRoot; + type ForceOrigin = frame_system::EnsureRoot; + type WeightInfo = (); } -impl utility::Trait for Runtime { +impl pallet_utility::Trait for Runtime { type Event = Event; type Call = Call; + type WeightInfo = weights::pallet_utility::WeightInfo; } parameter_types! { @@ -524,13 +493,14 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl multisig::Trait for Runtime { +impl pallet_multisig::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; + type WeightInfo = (); } parameter_types! { @@ -540,7 +510,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl recovery::Trait for Runtime { +impl pallet_recovery::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -554,14 +524,15 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl vesting::Trait for Runtime { +impl pallet_vesting::Trait for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = (); } -impl sudo::Trait for Runtime { +impl pallet_sudo::Trait for Runtime { type Event = Event; type Call = Call; } @@ -572,6 +543,9 @@ parameter_types! { // Additional storage item size of 33 bytes. pub const ProxyDepositFactor: Balance = deposit(0, 33); pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; } /// The type used to represent the kinds of proxying allowed. @@ -581,6 +555,7 @@ pub enum ProxyType { NonTransfer, Staking, SudoBalances, + IdentityJudgement, } impl Default for ProxyType { fn default() -> Self { Self::Any } } impl InstanceFilter for ProxyType { @@ -591,9 +566,9 @@ impl InstanceFilter for ProxyType { Call::System(..) | Call::Babe(..) | Call::Timestamp(..) | - Call::Indices(indices::Call::claim(..)) | - Call::Indices(indices::Call::free(..)) | - Call::Indices(indices::Call::freeze(..)) | + Call::Indices(pallet_indices::Call::claim(..)) | + Call::Indices(pallet_indices::Call::free(..)) | + Call::Indices(pallet_indices::Call::freeze(..)) | // Specifically omitting Indices `transfer`, `force_transfer` // Specifically omitting the entire Balances pallet Call::Authorship(..) | @@ -604,20 +579,20 @@ impl InstanceFilter for ProxyType { Call::Grandpa(..) | Call::ImOnline(..) | Call::AuthorityDiscovery(..) | - Call::Parachains(..) | - Call::Attestations(..) | - Call::Registrar(..) | + Call::DummyParachains(..) | + Call::DummyAttestations(..) | + Call::DummyRegistrar(..) | Call::Utility(..) | Call::Identity(..) | - Call::Recovery(recovery::Call::as_recovered(..)) | - Call::Recovery(recovery::Call::vouch_recovery(..)) | - Call::Recovery(recovery::Call::claim_recovery(..)) | - Call::Recovery(recovery::Call::close_recovery(..)) | - Call::Recovery(recovery::Call::remove_recovery(..)) | - Call::Recovery(recovery::Call::cancel_recovered(..)) | + Call::Recovery(pallet_recovery::Call::as_recovered(..)) | + Call::Recovery(pallet_recovery::Call::vouch_recovery(..)) | + Call::Recovery(pallet_recovery::Call::claim_recovery(..)) | + Call::Recovery(pallet_recovery::Call::close_recovery(..)) | + Call::Recovery(pallet_recovery::Call::remove_recovery(..)) | + Call::Recovery(pallet_recovery::Call::cancel_recovered(..)) | // Specifically omitting Recovery `create_recovery`, `initiate_recovery` - Call::Vesting(vesting::Call::vest(..)) | - Call::Vesting(vesting::Call::vest_other(..)) | + Call::Vesting(pallet_vesting::Call::vest(..)) | + Call::Vesting(pallet_vesting::Call::vest_other(..)) | // Specifically omitting Vesting `vested_transfer`, and `force_vested_transfer` Call::Scheduler(..) | // Specifically omitting Sudo pallet @@ -625,14 +600,17 @@ impl InstanceFilter for ProxyType { Call::Multisig(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(utility::Call::batch(..)) - | Call::Utility(utility::Call::as_limited_sub(..)) + Call::Staking(..) | Call::Utility(..) ), ProxyType::SudoBalances => match c { - Call::Sudo(sudo::Call::sudo(ref x)) => matches!(x.as_ref(), &Call::Balances(..)), - Call::Utility(utility::Call::batch(..)) => true, + Call::Sudo(pallet_sudo::Call::sudo(ref x)) => matches!(x.as_ref(), &Call::Balances(..)), + Call::Utility(..) => true, _ => false, }, + ProxyType::IdentityJudgement => matches!(c, + Call::Identity(pallet_identity::Call::provide_judgement(..)) + | Call::Utility(pallet_utility::Call::batch(..)) + ) } } fn is_superset(&self, o: &Self) -> bool { @@ -646,7 +624,7 @@ impl InstanceFilter for ProxyType { } } -impl proxy::Trait for Runtime { +impl pallet_proxy::Trait for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -654,66 +632,115 @@ impl proxy::Trait for Runtime { type ProxyDepositBase = ProxyDepositBase; type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; } +parameter_types! { + pub const MaxStatementLength: usize = 1_000; + pub const UnlockedProportion: Permill = Permill::zero(); + pub const MaxUnlocked: Balance = 0; +} + +ord_parameter_types! { + pub const PurchaseValidity: AccountId = AccountId::from( + // 5CqSB6zNHcp3mvTAyh5Vr2MbSdb7DgLi9yWoAppHRveGcYQh + hex_literal::hex!("221d409ba60508368d4448ccda40182aca2744bcdfa0881944c08108a9fd966d") + ); + pub const PurchaseConfiguration: AccountId = AccountId::from( + // 5FUP4BwQzi8F5WBTmaHsoobGbMSUTiX7Exwb7QzTjgNQypo1 + hex_literal::hex!("96c34c8c60b3690701176bdbc9b16aced2898d754385a84ee0cfe7fb015db800") + ); +} + +type ValidityOrigin = EnsureOneOf< + AccountId, + EnsureRoot, + EnsureSignedBy, +>; + +type ConfigurationOrigin = EnsureOneOf< + AccountId, + EnsureRoot, + EnsureSignedBy, +>; + +impl purchase::Trait for Runtime { + type Event = Event; + type Currency = Balances; + type VestingSchedule = Vesting; + type ValidityOrigin = ValidityOrigin; + type ConfigurationOrigin = ConfigurationOrigin; + type MaxStatementLength = MaxStatementLength; + type UnlockedProportion = UnlockedProportion; + type MaxUnlocked = MaxUnlocked; +} + +impl dummy::Trait for Runtime { } + construct_runtime! { pub enum Runtime where Block = Block, - NodeBlock = primitives::Block, + NodeBlock = primitives::v1::Block, UncheckedExtrinsic = UncheckedExtrinsic { // Basic stuff; balances is uncallable initially. - System: system::{Module, Call, Storage, Config, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Storage}, + System: frame_system::{Module, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Storage}, // Must be before session. - Babe: babe::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, - Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Indices: indices::{Module, Call, Storage, Config, Event}, - Balances: balances::{Module, Call, Storage, Config, Event}, - TransactionPayment: transaction_payment::{Module, Storage}, + Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, + Indices: pallet_indices::{Module, Call, Storage, Config, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Module, Storage}, // Consensus support. - Authorship: authorship::{Module, Call, Storage}, - Staking: staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, - Offences: offences::{Module, Call, Storage, Event}, + Authorship: pallet_authorship::{Module, Call, Storage}, + Staking: pallet_staking::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + Offences: pallet_offences::{Module, Call, Storage, Event}, Historical: session_historical::{Module}, - Session: session::{Module, Call, Storage, Event, Config}, - FinalityTracker: finality_tracker::{Module, Call, Storage, Inherent}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - ImOnline: im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: authority_discovery::{Module, Call, Config}, - - // Parachains stuff; slots are disabled (no auctions initially). The rest are safe as they - // have no public dispatchables. - Parachains: parachains::{Module, Call, Storage, Config, Inherent, Origin}, - Attestations: attestations::{Module, Call, Storage}, - Registrar: registrar::{Module, Call, Storage, Event, Config}, + Session: pallet_session::{Module, Call, Storage, Event, Config}, + FinalityTracker: pallet_finality_tracker::{Module, Call, Storage, Inherent}, + Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event, ValidateUnsigned}, + ImOnline: pallet_im_online::{Module, Call, Storage, Event, ValidateUnsigned, Config}, + AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, + + // Old Parachains stuff. All dummies to avoid messing up the transaction indices. + DummyParachains: dummy::::{Module, Call}, + DummyAttestations: dummy::::{Module, Call}, + DummyRegistrar: dummy::::{Module, Call}, // Utility module. - Utility: utility::{Module, Call, Event}, + Utility: pallet_utility::{Module, Call, Event}, // Less simple identity module. - Identity: identity::{Module, Call, Storage, Event}, + Identity: pallet_identity::{Module, Call, Storage, Event}, // Social recovery module. - Recovery: recovery::{Module, Call, Storage, Event}, + Recovery: pallet_recovery::{Module, Call, Storage, Event}, // Vesting. Usable initially, but removed once all vesting is finished. - Vesting: vesting::{Module, Call, Storage, Event, Config}, + Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, // System scheduler. - Scheduler: scheduler::{Module, Call, Storage, Event}, + Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, // Sudo. - Sudo: sudo::{Module, Call, Storage, Event, Config}, + Sudo: pallet_sudo::{Module, Call, Storage, Event, Config}, // Proxy module. Late addition. - Proxy: proxy::{Module, Call, Storage, Event}, + Proxy: pallet_proxy::{Module, Call, Storage, Event}, // Multisig module. Late addition. - Multisig: multisig::{Module, Call, Storage, Event}, + Multisig: pallet_multisig::{Module, Call, Storage, Event}, + + // Purchase module. Late addition. + Purchase: purchase::{Module, Call, Storage, Event}, } } @@ -729,26 +756,24 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - system::CheckSpecVersion, - system::CheckTxVersion, - system::CheckGenesis, - system::CheckMortality, - system::CheckNonce, - system::CheckWeight, - transaction_payment::ChargeTransactionPayment, - registrar::LimitParathreadCommits, - parachains::ValidateDoubleVoteReports, - grandpa::ValidateEquivocationReport, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive, Runtime, AllModules>; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; +#[cfg(not(feature = "disable-runtime-api"))] sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -810,42 +835,55 @@ sp_api::impl_runtime_apis! { } } - impl parachain::ParachainHost for Runtime { - fn validators() -> Vec { - Parachains::authorities() + // Dummy implementation to continue supporting old parachains runtime temporarily. + impl p_v0::ParachainHost for Runtime { + fn validators() -> Vec { + // this is a compile-time check of size equality. note that we don't invoke + // the function and nothing here is unsafe. + let _ = core::mem::transmute::; + + // Yes, these aren't actually the parachain session keys. + // It doesn't matter, but we shouldn't return a zero-sized vector here. + // As there are no parachains + Session::validators() + .into_iter() + .map(|k| k.using_encoded(|s| Decode::decode(&mut &s[..])) + .expect("correct size and raw-bytes; qed")) + .collect() } - fn duty_roster() -> parachain::DutyRoster { - Parachains::calculate_duty_roster().0 + fn duty_roster() -> p_v0::DutyRoster { + let v = Session::validators(); + p_v0::DutyRoster { validator_duty: (0..v.len()).map(|_| p_v0::Chain::Relay).collect() } } - fn active_parachains() -> Vec<(parachain::Id, Option<(parachain::CollatorId, parachain::Retriable)>)> { - Registrar::active_paras() + fn active_parachains() -> Vec<(p_v0::Id, Option<(p_v0::CollatorId, p_v0::Retriable)>)> { + Vec::new() } - fn global_validation_schedule() -> parachain::GlobalValidationSchedule { - Parachains::global_validation_schedule() + fn global_validation_data() -> p_v0::GlobalValidationData { + p_v0::GlobalValidationData { + max_code_size: 1, + max_head_data_size: 1, + block_number: System::block_number().saturating_sub(1), + } } - fn local_validation_data(id: parachain::Id) -> Option { - Parachains::current_local_validation_data(&id) + fn local_validation_data(_id: p_v0::Id) -> Option { + None } - fn parachain_code(id: parachain::Id) -> Option { - Parachains::parachain_code(&id) + fn parachain_code(_id: p_v0::Id) -> Option { + None } - fn get_heads(extrinsics: Vec<::Extrinsic>) - -> Option> + fn get_heads(_extrinsics: Vec<::Extrinsic>) + -> Option> { - extrinsics - .into_iter() - .find_map(|ex| match UncheckedExtrinsic::decode(&mut ex.encode().as_slice()) { - Ok(ex) => match ex.function { - Call::Parachains(ParachainsCall::set_heads(heads)) => { - Some(heads.into_iter().map(|c| c.candidate).collect()) - } - _ => None, - } - Err(_) => None, - }) - } - fn signing_context() -> SigningContext { - Parachains::signing_context() + None + } + fn signing_context() -> p_v0::SigningContext { + p_v0::SigningContext { + parent_hash: System::parent_hash(), + session_index: Session::current_index(), + } + } + fn downward_messages(_id: p_v0::Id) -> Vec { + Vec::new() } } @@ -854,7 +892,7 @@ sp_api::impl_runtime_apis! { Grandpa::grandpa_authorities() } - fn submit_report_equivocation_extrinsic( + fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: fg_primitives::EquivocationProof< ::Hash, sp_runtime::traits::NumberFor, @@ -863,7 +901,7 @@ sp_api::impl_runtime_apis! { ) -> Option<()> { let key_owner_proof = key_owner_proof.decode()?; - Grandpa::submit_report_equivocation_extrinsic( + Grandpa::submit_unsigned_equivocation_report( equivocation_proof, key_owner_proof, ) @@ -901,6 +939,29 @@ sp_api::impl_runtime_apis! { fn current_epoch_start() -> babe_primitives::SlotNumber { Babe::current_epoch_start() } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + authority_id: babe_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(babe_primitives::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: babe_primitives::EquivocationProof<::Header>, + key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } } impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { @@ -921,18 +982,17 @@ sp_api::impl_runtime_apis! { } } - impl system_rpc_runtime_api::AccountNonceApi for Runtime { + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) } } - impl transaction_payment_rpc_runtime_api::TransactionPaymentApi< + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, - UncheckedExtrinsic, > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } } @@ -940,14 +1000,9 @@ sp_api::impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, + config: frame_benchmarking::BenchmarkConfig, ) -> Result, RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. // To get around that, we separated the Session benchmarks into its own crate, which is why // we need these two lines below. @@ -959,38 +1014,35 @@ sp_api::impl_runtime_apis! { impl pallet_offences_benchmarking::Trait for Runtime {} impl frame_system_benchmarking::Trait for Runtime {} - let whitelist: Vec> = vec![ + let whitelist: Vec = vec![ // Block Number - // frame_system::Number::::hashed_key().to_vec(), - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec(), + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec(), + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat, &whitelist); - - add_benchmark!(params, batches, b"balances", Balances); - add_benchmark!(params, batches, b"identity", Identity); - add_benchmark!(params, batches, b"im-online", ImOnline); - add_benchmark!(params, batches, b"offences", OffencesBench::); - add_benchmark!(params, batches, b"scheduler", Scheduler); - add_benchmark!(params, batches, b"session", SessionBench::); - add_benchmark!(params, batches, b"staking", Staking); - add_benchmark!(params, batches, b"system", SystemBench::); - add_benchmark!(params, batches, b"timestamp", Timestamp); - add_benchmark!(params, batches, b"utility", Utility); - add_benchmark!(params, batches, b"vesting", Vesting); + let params = (&config, &whitelist); + + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_identity, Identity); + add_benchmark!(params, batches, pallet_im_online, ImOnline); + add_benchmark!(params, batches, pallet_offences, OffencesBench::); + add_benchmark!(params, batches, pallet_scheduler, Scheduler); + add_benchmark!(params, batches, pallet_session, SessionBench::); + add_benchmark!(params, batches, pallet_staking, Staking); + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + add_benchmark!(params, batches, pallet_utility, Utility); + add_benchmark!(params, batches, pallet_vesting, Vesting); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) diff --git a/runtime/westend/src/weights/frame_system.rs b/runtime/westend/src/weights/frame_system.rs new file mode 100644 index 0000000000000000000000000000000000000000..9522fa75203906ab3c7264154a4b33835375843c --- /dev/null +++ b/runtime/westend/src/weights/frame_system.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl frame_system::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["b"] + fn remark() -> Weight { + (1305000 as Weight) + } + fn set_heap_pages() -> Weight { + (2023000 as Weight) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["d"] + fn set_changes_trie_config() -> Weight { + (10026000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn set_storage(i: u32, ) -> Weight { + (0 as Weight) + .saturating_add((656000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_storage(i: u32, ) -> Weight { + (4327000 as Weight) + .saturating_add((478000 as Weight).saturating_mul(i as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } + fn kill_prefix(p: u32, ) -> Weight { + (8349000 as Weight) + .saturating_add((838000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + } + fn suicide() -> Weight { + (29247000 as Weight) + } +} diff --git a/runtime/westend/src/weights/mod.rs b/runtime/westend/src/weights/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..76368abe6b437322cf7e197c127c6d906e78a1ad --- /dev/null +++ b/runtime/westend/src/weights/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// A collection of weight modules used for pallets in the runtime. + +pub mod frame_system; +pub mod pallet_balances; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_proxy; diff --git a/runtime/westend/src/weights/pallet_balances.rs b/runtime/westend/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000000000000000000000000000000..53431ba48f2f4d878476122b30daaf481ac03487 --- /dev/null +++ b/runtime/westend/src/weights/pallet_balances.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +/// Weights for the Balances Pallet + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +pub struct WeightInfo; +impl pallet_balances::WeightInfo for WeightInfo { + fn transfer() -> Weight { + (65949000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn transfer_keep_alive() -> Weight { + (46665000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_creating() -> Weight { + (27086000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn set_balance_killing() -> Weight { + (33424000 as Weight) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn force_transfer() -> Weight { + (65343000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } +} diff --git a/runtime/westend/src/weights/pallet_proxy.rs b/runtime/westend/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d8655e6c3b0fa31d618b6b112e75c44eaf64f23 --- /dev/null +++ b/runtime/westend/src/weights/pallet_proxy.rs @@ -0,0 +1,86 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_proxy::WeightInfo for WeightInfo { + fn proxy(p: u32, ) -> Weight { + (26127000 as Weight) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + } + fn proxy_announced(a: u32, p: u32, ) -> Weight { + (55405000 as Weight) + .saturating_add((774000 as Weight).saturating_mul(a as Weight)) + .saturating_add((209000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn remove_announcement(a: u32, p: u32, ) -> Weight { + (35879000 as Weight) + .saturating_add((783000 as Weight).saturating_mul(a as Weight)) + .saturating_add((20000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn reject_announcement(a: u32, p: u32, ) -> Weight { + (36097000 as Weight) + .saturating_add((780000 as Weight).saturating_mul(a as Weight)) + .saturating_add((12000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn announce(a: u32, p: u32, ) -> Weight { + (53769000 as Weight) + .saturating_add((675000 as Weight).saturating_mul(a as Weight)) + .saturating_add((214000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(3 as Weight)) + .saturating_add(DbWeight::get().writes(2 as Weight)) + } + fn add_proxy(p: u32, ) -> Weight { + (36082000 as Weight) + .saturating_add((234000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxy(p: u32, ) -> Weight { + (32885000 as Weight) + .saturating_add((267000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn remove_proxies(p: u32, ) -> Weight { + (31735000 as Weight) + .saturating_add((215000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn anonymous(p: u32, ) -> Weight { + (50907000 as Weight) + .saturating_add((61000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + fn kill_anonymous(p: u32, ) -> Weight { + (33926000 as Weight) + .saturating_add((208000 as Weight).saturating_mul(p as Weight)) + .saturating_add(DbWeight::get().reads(1 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } +} diff --git a/runtime/westend/src/weights/pallet_timestamp.rs b/runtime/westend/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000000000000000000000000000000..cfd5f192d35298b512ee75e4d26acf11355ce3ba --- /dev/null +++ b/runtime/westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,34 @@ +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_timestamp::WeightInfo for WeightInfo { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/runtime/westend/src/weights/pallet_utility.rs b/runtime/westend/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000000000000000000000000000000..c9ae0d7d2333b19bec65e4f5c1556df65b21e086 --- /dev/null +++ b/runtime/westend/src/weights/pallet_utility.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +pub struct WeightInfo; +impl pallet_utility::WeightInfo for WeightInfo { + fn batch(c: u32, ) -> Weight { + (16461000 as Weight) + .saturating_add((1982000 as Weight).saturating_mul(c as Weight)) + } + // WARNING! Some components were not used: ["u"] + fn as_derivative() -> Weight { + (4086000 as Weight) + } +} diff --git a/scripts/github/generate_release_text.rb b/scripts/github/generate_release_text.rb new file mode 100644 index 0000000000000000000000000000000000000000..d113c5b1f1bdce6ddbc23669614ffebdff7caf5c --- /dev/null +++ b/scripts/github/generate_release_text.rb @@ -0,0 +1,85 @@ +# frozen_string_literal: true + +require 'changelogerator' +require 'git' +require 'erb' +require 'toml' +require 'json' +require_relative './lib.rb' + +version = ENV['GITHUB_REF'] +token = ENV['GITHUB_TOKEN'] + +polkadot_path = ENV['GITHUB_WORKSPACE'] + '/polkadot/' +pg = Git.open(polkadot_path) + +# Generate an ERB renderer based on the template .erb file +renderer = ERB.new( + File.read(ENV['GITHUB_WORKSPACE'] + '/polkadot/scripts/github/polkadot_release.erb'), + trim_mode: '<>' +) + +# get last polkadot version. Use handy Gem::Version for sorting by version +last_version = pg + .tags + .map(&:name) + .grep(/^v\d+\.\d+\.\d+.*$/) + .sort_by { |v| Gem::Version.new(v.slice(1...)) }[-2] + +polkadot_cl = Changelog.new( + 'paritytech/polkadot', last_version, version, token: token +) + +# Get prev and cur substrate SHAs - parse the old and current Cargo.lock for +# polkadot and extract the sha that way. +prev_cargo = TOML::Parser.new(pg.show("#{last_version}:Cargo.lock")).parsed +current_cargo = TOML::Parser.new(pg.show("#{version}:Cargo.lock")).parsed + +substrate_prev_sha = prev_cargo['package'] + .find { |p| p['name'] == 'sc-cli' }['source'] + .split('#').last + +substrate_cur_sha = current_cargo['package'] + .find { |p| p['name'] == 'sc-cli' }['source'] + .split('#').last + +substrate_cl = Changelog.new( + 'paritytech/substrate', substrate_prev_sha, substrate_cur_sha, + token: token, + prefix: true +) + +all_changes = polkadot_cl.changes + substrate_cl.changes + +# Set all the variables needed for a release + +misc_changes = Changelog.changes_with_label(all_changes, 'B1-releasenotes') +client_changes = Changelog.changes_with_label(all_changes, 'B5-clientnoteworthy') +runtime_changes = Changelog.changes_with_label(all_changes, 'B7-runtimenoteworthy') + +release_priority = Changelog.highest_priority_for_changes(all_changes) + +# Pulled from the previous Github step +rustc_stable = ENV['RUSTC_STABLE'] +rustc_nightly = ENV['RUSTC_NIGHTLY'] + +polkadot_runtime = get_runtime('polkadot', polkadot_path) +kusama_runtime = get_runtime('kusama', polkadot_path) +westend_runtime = get_runtime('westend', polkadot_path) + +# These json files should have been downloaded as part of the build-runtimes +# github action + +polkadot_json = JSON.parse( + File.read( + ENV['GITHUB_WORKSPACE'] + '/polkadot-srtool-json/srtool_output.json' + ) +) + +kusama_json = JSON.parse( + File.read( + ENV['GITHUB_WORKSPACE'] + '/kusama-srtool-json/srtool_output.json' + ) +) + +puts renderer.result diff --git a/scripts/github/lib.rb b/scripts/github/lib.rb new file mode 100644 index 0000000000000000000000000000000000000000..35ebd3b6e7a9e4b771cbbd0af629d0a812c72c4c --- /dev/null +++ b/scripts/github/lib.rb @@ -0,0 +1,12 @@ +# frozen_string_literal: true + +# A collection of helper functions that might be useful for various scripts + +# Gets the runtime version for a given runtime. +# Optionally accepts a path that is the root of the project which defaults to +# the current working directory +def get_runtime(runtime, path = '.') + File.open(path + "/runtime/#{runtime}/src/lib.rs") do |f| + f.find { |l| l =~ /spec_version/ }.match(/[0-9]+/)[0] + end +end diff --git a/scripts/github/polkadot_release.erb b/scripts/github/polkadot_release.erb new file mode 100644 index 0000000000000000000000000000000000000000..dde7165e92efda4b5789f1a20361050b5dc1808b --- /dev/null +++ b/scripts/github/polkadot_release.erb @@ -0,0 +1,42 @@ +<%= print release_priority[:text] %> <%= puts " due to changes: *#{Changelog.changes_with_label(all_changes, release_priority[:label]).map(&:pretty_title).join(", ")}*" if release_priority[:priority] > 1 %> + +Native runtimes: + +- Polkadot: **<%= polkadot_runtime %>** +- Kusama: **<%= kusama_runtime %>** +- Westend: **<%= westend_runtime %>** + +This release was tested against the following versions of `rustc`. Other versions may work. + +- <%= rustc_stable %> +- <%= rustc_nightly %> + +WASM runtimes built with [srtool](https://gitlab.com/chevdor/srtool) using `<%= polkadot_json['rustc'] %>`. + +Proposal hashes: +* `polkadot_runtime-v<%= polkadot_runtime %>.compact.wasm - <%= polkadot_json['prop'] %>` +* `kusama_runtime-v<%= kusama_runtime %>.compact.wasm - <%= kusama_json['prop'] %>` + +<% unless misc_changes.empty? %> +## Changes + +<% misc_changes.each do |c| %> +* <%= c[:pretty_title] %> +<% end %> +<% end %> + +<% unless client_changes.empty? %> +## Client + +<% client_changes.each do |c| %> +* <%= c[:pretty_title] %> +<% end %> +<% end %> + +<% unless runtime_changes.empty? %> +## Runtime + +<% runtime_changes.each do |c| %> +* <%= c[:pretty_title] %> +<% end %> +<% end %> diff --git a/scripts/gitlab/check_labels.sh b/scripts/gitlab/check_labels.sh index b2feebc09c054f06a932d5c96285491bdaaf7400..c70599912ce4ad04f27e71fe2ce093e1867d7e49 100755 --- a/scripts/gitlab/check_labels.sh +++ b/scripts/gitlab/check_labels.sh @@ -18,7 +18,7 @@ ensure_labels() { releasenotes_labels=( 'B0-silent' 'B1-releasenotes' - 'B2-runtimenoteworthy' + 'B7-runtimenoteworthy' ) priority_labels=( diff --git a/scripts/gitlab/check_runtime.sh b/scripts/gitlab/check_runtime.sh index 2cedd2f656df59f796cf23ece6991f49a1bdf2ac..0c635c88246c58076a48fd960216b8967a73aef7 100755 --- a/scripts/gitlab/check_runtime.sh +++ b/scripts/gitlab/check_runtime.sh @@ -1,18 +1,17 @@ -#!/bin/sh +#!/usr/bin/env bash + +# Check for any changes in any runtime directories (e.g., ^runtime/polkadot) as +# well as directories common to all runtimes (e.g., ^runtime/common). If there +# are no changes, check if the Substrate git SHA in Cargo.lock has been +# changed. If so, pull the repo and verify if {spec,impl}_versions have been +# altered since the previous Substrate version used. Also, if any of the +# Substrate changes between the previous and current version referenced by +# Cargo.lock were labelled with 'D2-breaksapi', label this PR the same. # -# -# check for any changes in the ^runtime/ tree. if there are no changes check -# if the substrate reference in the Cargo.lock has been changed. If so pull -# the repo and verify if the {spec,impl}_version s have been altered since the -# last reference. If there were changes the script will continue to check if -# the spec_version resp impl_version of polkadot have been altered as well. -# this will also be checked if there were changes to the runtime source files. -# -# If there are any changes found, it will mark the PR breaksapi and -# "auto-fail" the PR if there isn't a change in the -# runtime/{polkadot,kusama}/src/lib.rs file -# that alters the version since the last release tag. - +# If there were changes to any runtimes or common dirs, we iterate over each +# runtime (defined in the $runtimes() array), and check if {spec,impl}_version +# have been changed since the last release. Also, if there have been changes to +# the runtime since the last commit to master, label the PR with 'D2-breaksapi' set -e # fail on any error @@ -21,188 +20,210 @@ set -e # fail on any error . "$(dirname "${0}")/lib.sh" SUBSTRATE_REPO="https://github.com/paritytech/substrate" -SUBSTRATE_REPO_CARGO="git\+${SUBSTRATE_REPO}\?branch=polkadot-master" +SUBSTRATE_REPO_CARGO="git\+${SUBSTRATE_REPO}" SUBSTRATE_VERSIONS_FILE="bin/node/runtime/src/lib.rs" -boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } -boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } - - # figure out the latest release tag LATEST_TAG="$(git tag -l | sort -V | tail -n 1)" boldprint "latest release tag ${LATEST_TAG}" - boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}" git --no-pager log --graph --oneline --decorate=short -n 10 boldprint "make sure the master branch is available in shallow clones" -git fetch --depth=${GIT_DEPTH:-100} origin master +git fetch --depth="${GIT_DEPTH:-100}" origin master +runtimes=( + "kusama" + "polkadot" + "westend" +) -github_label () { - echo - echo "# run github-api job for labeling it ${1}" - curl -sS -X POST \ - -F "token=${CI_JOB_TOKEN}" \ - -F "ref=master" \ - -F "variables[LABEL]=${1}" \ - -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ - -F "variables[PROJECT]=paritytech/polkadot" \ - ${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline -} +common_dirs=( + "common" +) +# Helper function to join elements in an array with a multi-char delimiter +# https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash +function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; } + +# Construct a regex to search for any changes to runtime or common directories +runtime_regex="^runtime/$(join_by '|^runtime/' "${runtimes[@]}" "${common_dirs[@]}")" boldprint "check if the wasm sources changed since ${LATEST_TAG}" -if ! git diff --name-only refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} \ - | grep -q -e '^runtime/' +if ! git diff --name-only "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" \ + | grep -E -q -e "$runtime_regex" then - boldprint "no changes to the polkadot runtime source code detected" - # continue checking if Cargo.lock was updated with a new substrate reference - # and if that change includes a {spec|impl}_version update. - - SUBSTRATE_REFS_CHANGED="$(git diff refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} Cargo.lock \ - | sed -n -r "s~^[\+\-]source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | wc -l)" - - # check Cargo.lock for substrate ref change - case "${SUBSTRATE_REFS_CHANGED}" in - (0) - boldprint "substrate refs not changed in Cargo.lock" - exit 0 - ;; - (2) - boldprint "substrate refs updated since ${LATEST_TAG}" - ;; - (*) - boldprint "check unsupported: more than one commit targeted in repo ${SUBSTRATE_REPO_CARGO}" - exit 1 - esac - - - SUBSTRATE_PREV_REF="$(git diff refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} Cargo.lock \ - | sed -n -r "s~^\-source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1)" - - SUBSTRATE_NEW_REF="$(git diff refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} Cargo.lock \ - | sed -n -r "s~^\+source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1)" - - - boldcat <<-EOT - previous substrate commit id ${SUBSTRATE_PREV_REF} - new substrate commit id ${SUBSTRATE_NEW_REF} - EOT - - # okay so now need to fetch the substrate repository and check whether spec_version or impl_version has changed there - SUBSTRATE_CLONE_DIR="$(mktemp -t -d substrate-XXXXXX)" - trap "rm -rf ${SUBSTRATE_CLONE_DIR}" INT QUIT TERM ABRT EXIT - - - git clone --branch polkadot-master --depth 100 --no-tags \ - ${SUBSTRATE_REPO} ${SUBSTRATE_CLONE_DIR} - - - # check if there are changes to the spec|impl versions - git -C ${SUBSTRATE_CLONE_DIR} diff \ - ${SUBSTRATE_PREV_REF}..${SUBSTRATE_NEW_REF} ${SUBSTRATE_VERSIONS_FILE} \ - | grep -E '^[\+\-][[:space:]]+(spec|impl)_version: +([0-9]+),$' || exit 0 - - boldcat <<-EOT - spec_version or or impl_version have changed in substrate after updating Cargo.lock - please make sure versions are bumped in polkadot accordingly - EOT - - # Now check if any of the substrate changes have been tagged D2-breaksapi - ( - cd "${SUBSTRATE_CLONE_DIR}" - substrate_changes="$(sanitised_git_logs "${SUBSTRATE_PREV_REF}" "${SUBSTRATE_NEW_REF}")" - echo "$substrate_changes" | while read -r line; do - pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') - - if has_label 'paritytech/substrate' "$pr_id" 'D2-breaksapi'; then - boldprint "Substrate change labelled with D2-breaksapi. Labelling..." - github_label "D2-breaksapi" - exit 1 - fi - done - ) + boldprint "no changes to any runtime source code detected" + # continue checking if Cargo.lock was updated with a new substrate reference + # and if that change includes a {spec|impl}_version update. + + SUBSTRATE_REFS_CHANGED="$( + git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \ + | sed -n -r "s~^[\+\-]source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | wc -l + )" + + # check Cargo.lock for substrate ref change + case "${SUBSTRATE_REFS_CHANGED}" in + (0) + boldprint "substrate refs not changed in Cargo.lock" + exit 0 + ;; + (2) + boldprint "substrate refs updated since ${LATEST_TAG}" + ;; + (*) + boldprint "check unsupported: more than one commit targeted in repo ${SUBSTRATE_REPO_CARGO}" + exit 1 + esac + + + SUBSTRATE_PREV_REF="$( + git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \ + | sed -n -r "s~^\-source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1 + )" + + SUBSTRATE_NEW_REF="$( + git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \ + | sed -n -r "s~^\+source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1 + )" + + + boldcat < ${add_spec_version} + boldcat < ${add_spec_version} - add_impl_version="$(git diff refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p')" - sub_impl_version="$(git diff refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p')" +EOT + continue + else + # check for impl_version updates: if only the impl versions changed, we assume + # there is no consensus-critical logic that has changed. - # see if the impl version changed - if [ "${add_impl_version}" != "${sub_impl_version}" ] - then - boldcat <<-EOT + add_impl_version="$( + git diff refs/tags/"${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \ + | sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p' + )" + sub_impl_version="$( + git diff refs/tags/"${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \ + | sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p' + )" - changes to the runtime sources and changes in the impl version. - impl_version: ${sub_impl_version} -> ${add_impl_version} + # see if the impl version changed + if [ "${add_impl_version}" != "${sub_impl_version}" ] + then + boldcat < ${add_impl_version} - source file directories: - - runtime +EOT + continue + fi - versions file: ${VERSIONS_FILE} + failed_runtime_checks+=("$RUNTIME") + fi +done - EOT +if [ ${#failed_runtime_checks} -gt 0 ]; then + boldcat </dev/null 2>&1 && pwd )/lib.sh" + +time cargo check --features runtime-benchmarks diff --git a/scripts/gitlab/check_web_wasm.sh b/scripts/gitlab/check_web_wasm.sh new file mode 100755 index 0000000000000000000000000000000000000000..9d9006e908a73957cc1556e3eee0d14ea5a74a6e --- /dev/null +++ b/scripts/gitlab/check_web_wasm.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +#shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path runtime/polkadot/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path runtime/kusama/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path erasure-coding/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path parachain/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path primitives/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path rpc/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path statement-table/Cargo.toml +time cargo build --locked --target=wasm32-unknown-unknown --manifest-path cli/Cargo.toml --no-default-features --features browser diff --git a/scripts/gitlab/lib.sh b/scripts/gitlab/lib.sh index 0f1c74193bdc0be858fec662ed326b91cd8d0f8a..993cab35e3406af7a1f0e49650d1f13b4ea23754 100755 --- a/scripts/gitlab/lib.sh +++ b/scripts/gitlab/lib.sh @@ -59,9 +59,21 @@ has_label(){ [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] } +github_label () { + echo + echo "# run github-api job for labeling it ${1}" + curl -sS -X POST \ + -F "token=${CI_JOB_TOKEN}" \ + -F "ref=master" \ + -F "variables[LABEL]=${1}" \ + -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ + -F "variables[PROJECT]=paritytech/polkadot" \ + "${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline" +} + # Formats a message into a JSON string for posting to Matrix # message: 'any plaintext message' -# formatted_message: 'optional message formatted in html' +# formatted_message: 'optional message formatted in html' # Usage: structure_message $content $formatted_content (optional) structure_message() { if [ -z "$2" ]; then @@ -80,3 +92,22 @@ structure_message() { send_message() { curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" } + +# Pretty-printing functions +boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; } +boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; } + +skip_if_companion_pr() { + url="https://api.github.com/repos/paritytech/polkadot/pulls/${CI_COMMIT_REF_NAME}" + echo "[+] API URL: $url" + + pr_title=$(curl -sSL -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .title) + echo "[+] PR title: $pr_title" + + if echo "$pr_title" | grep -qi '^companion'; then + echo "[!] PR is a companion PR. Build is already done in substrate" + exit 0 + else + echo "[+] PR is not a companion PR. Proceeding test" + fi +} diff --git a/scripts/gitlab/publish_draft_release.sh b/scripts/gitlab/publish_draft_release.sh deleted file mode 100755 index c91846bc884719e2541fb82718d8562982dc2af1..0000000000000000000000000000000000000000 --- a/scripts/gitlab/publish_draft_release.sh +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" - -# Set initial variables -substrate_repo="https://github.com/paritytech/substrate" -substrate_dir='./substrate' - -# Cloning repos to ensure freshness -echo "[+] Cloning substrate to generate list of changes" -git clone $substrate_repo $substrate_dir -echo "[+] Finished cloning substrate into $substrate_dir" - -version="$CI_COMMIT_TAG" -last_version=$(git tag -l | sort -V | grep -B 1 -x "$CI_COMMIT_TAG" | head -n 1) -echo "[+] Version: $version; Previous version: $last_version" - -# Check that a signed tag exists on github for this version -echo '[+] Checking tag has been signed' -check_tag "paritytech/polkadot" "$version" -case $? in - 0) echo '[+] Tag found and has been signed' - ;; - 1) echo '[!] Tag found but has not been signed. Aborting release.'; exit 1 - ;; - 2) echo '[!] Tag not found. Aborting release.'; exit 1 -esac - -# Pull rustc version used by rust-builder for stable and nightly -stable_rustc="$(rustc +stable --version)" -nightly_rustc="$(rustc +nightly --version)" - -# Start with referencing current native runtime -# and find any referenced PRs since last release -# Note: Drop any changes that begin with '[contracts]' or 'contracts:' -polkadot_spec=$(grep spec_version runtime/polkadot/src/lib.rs | tail -n 1 | grep -Eo '[0-9]+') -echo "[+] Polkadot spec version: $polkadot_spec" -kusama_spec=$(grep spec_version runtime/kusama/src/lib.rs | tail -n 1 | grep -Eo '[0-9]+') -echo "[+] Kusama spec version: $kusama_spec" -westend_spec=$(grep spec_version runtime/westend/src/lib.rs | tail -n 1 | grep -Eo '[0-9]+') -echo "[+] Westend spec version: $westend_spec" -release_text="Native runtimes: -- Polkadot: **$polkadot_spec** -- Kusama: **$kusama_spec** -- Westend: **$westend_spec** - -This release was built with the following versions of \`rustc\`. Other versions may work. -- $stable_rustc -- $nightly_rustc -" - -declare -a misc_changes -declare -a runtime_changes -declare -a client_changes - -# Following variables are for tracking the priority of the release (i.e., -# how important it is for the user to upgrade). -# It's frustrating that we need to make an array of indexes (in this case the -# labels), but it's necessary to maintain the correct order. Labels and -# descriptions *must* be kept in lockstep - -priority_labels=( - 'C1-low' - 'C3-medium' - 'C7-high' - 'C9-critical' -) - -declare -A priority_descriptions=( -['C1-low']="Upgrade priority: **Low** (upgrade at your convenience)" -['C3-medium']="Upgrade priority: **Medium** (timely upgrade recommended)" -['C7-high']="Upgrade priority:❗ **HIGH** ❗ Please upgrade your node as soon as possible" -['C9-critical']="Upgrade priority: ❗❗ **URGENT** ❗❗ PLEASE UPGRADE IMMEDIATELY" -) - -# We don't actually take any action on C1-low, so we can start at medium -# But set C1-low as the default -max_label=1 -priority="${priority_descriptions['C1-low']}" -declare -a priority_changes - -# Iterate through every PR -while IFS= read -r line; do - pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') - - # Release priority check: - # For each PR, we look for every label equal to or higher than the current highest - # I.e., if there has already been a PR marked as 'medium', we only need - # to look for priorities medium or above. If we find one, we set the - # priority to that level. - for ((index=max_label; index<${#priority_labels[@]}; index++)) ; do - cur_label="${priority_labels[$index]}" - echo "[+] Checking #$pr_id for presence of $cur_label label" - if has_label 'paritytech/polkadot' "$pr_id" "$cur_label" ; then - echo "[+] #$pr_id has label $cur_label. Setting max." - prev_label="$max_label" - max_label="$index" - priority="${priority_descriptions[$cur_label]}" - - # If it's not an increase in priority, we just append the PR to the list - if [ "$prev_label" == "$max_label" ]; then - priority_changes+=("${line/\* /}") - fi - # If the priority has increased, we override previous changes with new changes - if [ "$prev_label" != "$max_label" ]; then - priority_changes=("${line/\* /}") - fi - - # Append priority to change - # Skip first 3 chars - note=${cur_label:3} - # And capitalise - line=" \`${note^}\` $line" - fi - done - - # If the PR is labelled silent, we can do an early continue to save a little work - if has_label 'paritytech/polkadot' "$pr_id" 'B0-silent'; then - continue - fi - - # If the PR has a runtimenoteworthy label, add to the runtime_changes section - if has_label 'paritytech/polkadot' "$pr_id" 'B2-runtimenoteworthy'; then - runtime_changes+=("$line") - fi - # If the PR has a releasenotes label, add to the release section - if has_label 'paritytech/polkadot' "$pr_id" 'B1-releasenotes'; then - misc_changes+=("$line") - fi -done <<< "$(sanitised_git_logs "$last_version" "$version" | \ - sed '/^\[contracts\].*/d' | \ - sed '/^contracts:.*/d' )" - -# Get substrate changes between last polkadot version and current -# By grepping the Cargo.lock for a substrate crate, and grepping out the commit hash -cur_substrate_commit=$(grep -A 2 'name = "sc-cli"' Cargo.lock | grep -E -o '[a-f0-9]{40}') -old_substrate_commit=$(git diff "refs/tags/$last_version" Cargo.lock |\ - grep -A 2 'name = "sc-cli"' | grep -E -o '[a-f0-9]{40}') -pushd $substrate_dir || exit - git checkout master > /dev/null - git pull > /dev/null - all_substrate_changes="$(sanitised_git_logs "$old_substrate_commit" "$cur_substrate_commit" | sed 's/(#/(paritytech\/substrate#/')" - - echo "[+] Iterating through substrate changes to find labelled PRs" - while IFS= read -r line; do - pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') - - # Basically same check as Polkadot priority - # We only need to check for any labels of the current priority or higher - for ((index=max_label; index<${#priority_labels[@]}; index++)) ; do - cur_label="${priority_labels[$index]}" - echo "[+] Checking substrate/#$pr_id for presence of $cur_label label" - if has_label 'paritytech/substrate' "$pr_id" "$cur_label" ; then - echo "[+] #$pr_id has label $cur_label. Setting max." - prev_label="$max_label" - max_label="$index" - priority="${priority_descriptions[$cur_label]}" - - # If it's not an increase in priority, we just append - if [ "$prev_label" == "$max_label" ]; then - priority_changes+=("${line/\* /}") - fi - # If the priority has increased, we override previous changes with new changes - if [ "$prev_label" != "$max_label" ]; then - priority_changes=("${line/\* /}") - fi - - # Append priority to change - # Skip first 3 chars - note=${cur_label:3} - # And capitalise - line=" \`${note^}\` $line" - fi - done - - # Skip if the PR has the silent label - this allows us to skip a few requests - if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then - continue - fi - if has_label 'paritytech/substrate' "$pr_id" 'B5-clientnoteworthy'; then - client_changes+=("$line") - fi - if has_label 'paritytech/substrate' "$pr_id" 'B7-runtimenoteworthy'; then - runtime_changes+=("$line") - fi - done <<< "$all_substrate_changes" -popd || exit - - -# Add the priorities to the *start* of the release notes -# If polkadot and substrate priority = low, no need for list of changes -if [ "$priority" == "${priority_descriptions['C1-low']}" ]; then - release_text="$priority - -$release_text" -else - release_text="$priority - due to change(s): *${priority_changes[*]}* - -$release_text" -fi - -# Append all notable changes to the release notes - -if [ "${#misc_changes[*]}" -gt 0 ] ; then - release_text="$release_text - -## Changes -$(printf '* %s\n' "${misc_changes[@]}")" -fi - -if [ "${#client_changes[*]}" -gt 0 ] ; then - release_text="$release_text - -## Client -$(printf '* %s\n' "${client_changes[@]}")" -fi - -if [ "${#runtime_changes[*]}" -gt 0 ] ; then - release_text="$release_text - -## Runtime -$(printf '* %s\n' "${runtime_changes[@]}")" -fi - -echo "[+] Release text generated: " -echo "$release_text" - -echo "[+] Pushing release to github" -# Create release on github -release_name="Polkadot CC1 $version" -data=$(jq -Rs --arg version "$version" \ - --arg release_name "$release_name" \ - --arg release_text "$release_text" \ -'{ - "tag_name": $version, - "target_commitish": "master", - "name": $release_name, - "body": $release_text, - "draft": true, - "prerelease": false -}' < /dev/null) - -out=$(curl -s -X POST --data "$data" -H "Authorization: token $GITHUB_RELEASE_TOKEN" "$api_base/paritytech/polkadot/releases") - -html_url=$(echo "$out" | jq -r .html_url) - -if [ "$html_url" == "null" ] -then - echo "[!] Something went wrong posting:" - echo "$out" - # If we couldn't post, don't want to announce in Matrix - exit 1 -else - echo "[+] Release draft created: $html_url" -fi - -echo '[+] Sending draft release URL to Matrix' - -msg_body=$(cat <New version of polkadot tagged: $CI_COMMIT_TAG
-Gav: Draft release created: $html_url
-Build pipeline: $CI_PIPELINE_URL -EOF -) -send_message "$(structure_message "$msg_body" "$formatted_msg_body")" "$MATRIX_ROOM_ID" "$MATRIX_ACCESS_TOKEN" - -echo "[+] Done! Maybe the release worked..." diff --git a/scripts/gitlab/test_deterministic_wasm.sh b/scripts/gitlab/test_deterministic_wasm.sh new file mode 100755 index 0000000000000000000000000000000000000000..db391ca0a2fde43b64c5412eb06570e9978f15b2 --- /dev/null +++ b/scripts/gitlab/test_deterministic_wasm.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +#shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +# build runtime +WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime +# make checksum +sha256sum target/release/wbuild/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 +# clean up - FIXME: can we reuse some of the artifacts? +cargo clean +# build again +WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime +# confirm checksum +sha256sum -c checksum.sha256 diff --git a/scripts/gitlab/test_linux_stable.sh b/scripts/gitlab/test_linux_stable.sh new file mode 100755 index 0000000000000000000000000000000000000000..a18ff43874097811ef2d26a68b22d53f0234a5a9 --- /dev/null +++ b/scripts/gitlab/test_linux_stable.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +#shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +time cargo test --all --release --verbose --locked --features runtime-benchmarks diff --git a/scripts/prepare-test-net.sh b/scripts/prepare-test-net.sh new file mode 100755 index 0000000000000000000000000000000000000000..6499a1199bde20ae251d7c1ae6271406eb5cd51a --- /dev/null +++ b/scripts/prepare-test-net.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +set -e + +if [ "$#" -ne 1 ]; then + echo "Please provide the number of initial validators!" + exit 1 +fi + +generate_account_id() { + subkey ${3:-} inspect "$SECRET//$1//$2" | grep "Account ID" | awk '{ print $3 }' +} + +generate_address() { + subkey ${3:-} inspect "$SECRET//$1//$2" | grep "SS58 Address" | awk '{ print $3 }' +} + +generate_address_and_account_id() { + ACCOUNT=$(generate_account_id $1 $2 $3) + ADDRESS=$(generate_address $1 $2 $3) + if ${4:-false}; then + INTO="unchecked_into" + else + INTO="into" + fi + + printf "//$ADDRESS\nhex![\"${ACCOUNT#'0x'}\"].$INTO()," +} + +V_NUM=$1 + +AUTHORITIES="" + +for i in $(seq 1 $V_NUM); do + AUTHORITIES+="(\n" + AUTHORITIES+="$(generate_address_and_account_id $i stash)\n" + AUTHORITIES+="$(generate_address_and_account_id $i controller)\n" + AUTHORITIES+="$(generate_address_and_account_id $i babe '--sr25519' true)\n" + AUTHORITIES+="$(generate_address_and_account_id $i grandpa '--ed25519' true)\n" + AUTHORITIES+="$(generate_address_and_account_id $i im_online '--sr25519' true)\n" + AUTHORITIES+="$(generate_address_and_account_id $i parachains '--sr25519' true)\n" + AUTHORITIES+="$(generate_address_and_account_id $i authority_discovery '--sr25519' true)\n" + AUTHORITIES+="),\n" +done + +printf "$AUTHORITIES" diff --git a/service/Cargo.toml b/service/Cargo.toml index bdd869da46d57812393f5b750f767bb3e21e3ae5..e377aee2a5f910bcab7bb9e34d3b2c31651e3792 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-service" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" @@ -12,13 +12,11 @@ log = "0.4.8" futures = "0.3.4" slog = "2.5.2" hex-literal = "0.2.1" -av_store = { package = "polkadot-availability-store", path = "../availability-store", optional = true } consensus = { package = "polkadot-validation", path = "../validation", optional = true } polkadot-primitives = { path = "../primitives" } polkadot-runtime = { path = "../runtime/polkadot" } kusama-runtime = { path = "../runtime/kusama" } westend-runtime = { path = "../runtime/westend" } -polkadot-network = { path = "../network", optional = true } polkadot-rpc = { path = "../rpc" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -31,6 +29,7 @@ sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "mas sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } @@ -43,15 +42,15 @@ sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master" } -im-online = { package = "pallet-im-online", git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master" } authority-discovery = { package = "sc-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -system_rpc_runtime_api = { package = "frame-system-rpc-runtime-api", git = "https://github.com/paritytech/substrate", branch = "master" } -codec = { package = "parity-scale-codec", version = "1.3.0" } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +codec = { package = "parity-scale-codec", version = "1.3.4" } sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } @@ -66,4 +65,4 @@ env_logger = "0.7.0" default = ["db", "full-node"] db = ["service/db"] runtime-benchmarks = ["polkadot-runtime/runtime-benchmarks", "kusama-runtime/runtime-benchmarks", "westend-runtime/runtime-benchmarks"] -full-node = ["av_store", "consensus", "polkadot-network"] +full-node = ["consensus"] diff --git a/service/res/kusama.json b/service/res/kusama.json index 64e14b9ec8fb0285fe92c738ee0c3724819311ae..34c2180184264a7635ef3d4a1aa5c18b99aa7a32 100644 --- a/service/res/kusama.json +++ b/service/res/kusama.json @@ -8,9 +8,9 @@ "/dns/p2p.cc3-3.kusama.network/tcp/30100/p2p/12D3KooWEGHw84b4hfvXEfyq4XWEmWCbRGuHMHQMpby4BAtZ4xJf", "/dns/p2p.cc3-4.kusama.network/tcp/30100/p2p/12D3KooWF9KDPRMN8WpeyXhEeURZGP8Dmo7go1tDqi7hTYpxV9uW", "/dns/p2p.cc3-5.kusama.network/tcp/30100/p2p/12D3KooWDiwMeqzvgWNreS9sV1HW3pZv1PA7QGA7HUCo7FzN5gcA", - "/dns/kusama-bootnode-0.paritytech.net/tcp/30333/p2p/QmTFUXWi98EADXdsUxvv7t9fhJG1XniRijahDXxdv1EbAW", - "/dns/kusama-bootnode-0.paritytech.net/tcp/30334/ws/p2p/QmTFUXWi98EADXdsUxvv7t9fhJG1XniRijahDXxdv1EbAW", - "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/Qmf58BhdDSkHxGy1gX5YUuHCpdYYGACxQM3nGWa7xJa5an" + "/dns/kusama-bootnode-0.paritytech.net/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", + "/dns/kusama-bootnode-0.paritytech.net/tcp/30334/ws/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", + "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw" ], "telemetryEndpoints": [ [ diff --git a/service/res/polkadot.json b/service/res/polkadot.json index 90e3f529a878553c6e303248f1cf8b00672f1afa..9d6742d5cedd6a14b3256da07880b1c6cebe48c2 100644 --- a/service/res/polkadot.json +++ b/service/res/polkadot.json @@ -1,5 +1,5 @@ { - "name": "Polkadot CC1", + "name": "Polkadot", "id": "polkadot", "chainType": "Live", "bootNodes": [ @@ -22,7 +22,7 @@ "properties": { "ss58Format": 0, "tokenDecimals": 12, - "tokenSymbol": "DOT" + "tokenSymbol": "DOT (old)" }, "forkBlocks": null, "badBlocks": null, diff --git a/service/res/westend.json b/service/res/westend.json index 5aebe04d231b29a3b31e6b299d0c4bd06e8c5943..2f4c53ee845207469ab9bb053715b0303e94d778 100644 --- a/service/res/westend.json +++ b/service/res/westend.json @@ -2,14 +2,14 @@ "name": "Westend", "id": "westend2", "bootNodes": [ - "/ip4/104.155.79.90/tcp/30333/p2p/QmZ4HcUygvbrPaABU9MfjqFsSPUKwmH6LyXBFYX2xSXGo2", - "/ip4/104.155.79.90/tcp/30334/ws/p2p/QmZ4HcUygvbrPaABU9MfjqFsSPUKwmH6LyXBFYX2xSXGo2", - "/ip4/35.205.142.129/tcp/30333/p2p/QmaynNd1C321UowxH8a8MVBe6R8FUYQmaU6Sm4wnvdTyuq", - "/ip4/35.205.142.129/tcp/30334/ws/p2p/QmaynNd1C321UowxH8a8MVBe6R8FUYQmaU6Sm4wnvdTyuq", - "/ip4/34.73.55.183/tcp/30333/p2p/QmPDkHPQtjVQzhVUwhg1hKyV3U2rmF46aUN4pDUXopZd9j", - "/ip4/34.73.55.183/tcp/30334/ws/p2p/QmPDkHPQtjVQzhVUwhg1hKyV3U2rmF46aUN4pDUXopZd9j", - "/ip4/35.243.227.147/tcp/30333/p2p/QmfZHq4crnQyJky6vLgUECCVmLiCM1VuiGUpdw44FvLEr5", - "/ip4/35.243.227.147/tcp/30334/ws/p2p/QmfZHq4crnQyJky6vLgUECCVmLiCM1VuiGUpdw44FvLEr5" + "/dns/0.westend.paritytech.net/tcp/30333/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", + "/dns/0.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWKer94o1REDPtAhjtYR4SdLehnSrN8PEhBnZm5NBoCrMC", + "/dns/1.westend.paritytech.net/tcp/30333/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", + "/dns/1.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWPVPzs42GvRBShdUMtFsk4SvnByrSdWqb6aeAAHvLMSLS", + "/dns/2.westend.paritytech.net/tcp/30333/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po", + "/dns/2.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWByVpK92hMi9CzTjyFg9cPHDU5ariTM3EPMq9vdh5S5Po", + "/dns/3.westend.paritytech.net/tcp/30333/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K", + "/dns/3.westend.paritytech.net/tcp/30334/ws/p2p/12D3KooWGi1tCpKXLMYED9y28QXLnwgD4neYb1Arqq4QpeV1Sv3K" ], "telemetryEndpoints": [ [ diff --git a/service/src/chain_spec.rs b/service/src/chain_spec.rs index 5e1bfbbdbab0c24a926453b793a2d4edbec0b4ae..900742c3a2404aac9140e145296beeb85136374f 100644 --- a/service/src/chain_spec.rs +++ b/service/src/chain_spec.rs @@ -17,7 +17,7 @@ //! Polkadot chain configurations. use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; -use polkadot_primitives::{AccountId, AccountPublic, parachain::ValidatorId}; +use polkadot_primitives::v0::{AccountId, AccountPublic, ValidatorId}; use polkadot_runtime as polkadot; use kusama_runtime as kusama; use westend_runtime as westend; @@ -31,7 +31,7 @@ use telemetry::TelemetryEndpoints; use hex_literal::hex; use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; -use im_online::sr25519::{AuthorityId as ImOnlineId}; +use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; use pallet_staking::Forcing; @@ -48,9 +48,9 @@ const DEFAULT_PROTOCOL_ID: &str = "dot"; #[serde(rename_all = "camelCase")] pub struct Extensions { /// Block numbers with known hashes. - pub fork_blocks: sc_client_api::ForkBlocks, + pub fork_blocks: sc_client_api::ForkBlocks, /// Known bad block hashes. - pub bad_blocks: sc_client_api::BadBlocks, + pub bad_blocks: sc_client_api::BadBlocks, } /// The `ChainSpec parametrised for polkadot runtime`. @@ -79,8 +79,8 @@ pub fn kusama_config() -> Result { KusamaChainSpec::from_json_bytes(&include_bytes!("../res/kusama.json")[..]) } -pub fn westend_config() -> Result { - PolkadotChainSpec::from_json_bytes(&include_bytes!("../res/westend.json")[..]) +pub fn westend_config() -> Result { + WestendChainSpec::from_json_bytes(&include_bytes!("../res/westend.json")[..]) } fn polkadot_session_keys( @@ -113,7 +113,7 @@ fn westend_session_keys( westend::SessionKeys { babe, grandpa, im_online, parachain_validator, authority_discovery } } -fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { +fn polkadot_staging_testnet_config_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![]; @@ -131,27 +131,27 @@ fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { const STASH: u128 = 100 * DOTS; polkadot::GenesisConfig { - system: Some(polkadot::SystemConfig { - code: polkadot::WASM_BINARY.to_vec(), + frame_system: Some(polkadot::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(polkadot::BalancesConfig { + pallet_balances: Some(polkadot::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(polkadot::IndicesConfig { + pallet_indices: Some(polkadot::IndicesConfig { indices: vec![], }), - session: Some(polkadot::SessionConfig { + pallet_session: Some(polkadot::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), polkadot_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(polkadot::StakingConfig { + pallet_staking: Some(polkadot::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -163,44 +163,34 @@ fn polkadot_staging_testnet_config_genesis() -> polkadot::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(Default::default()), - collective_Instance1: Some(polkadot::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(Default::default()), + pallet_collective_Instance1: Some(polkadot::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(polkadot::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(polkadot::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(polkadot::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(polkadot::VestingConfig { + pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![], }), - sudo: Some(polkadot::SudoConfig { - key: endowed_accounts[0].clone(), - }), } } -fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { +fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![ // 5ENpP27BrVdJTdUfY6djmcw3d3xEJ6NzSUU52CCPmGpMrdEY @@ -286,27 +276,27 @@ fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { const STASH: u128 = 100 * WND; westend::GenesisConfig { - system: Some(westend::SystemConfig { - code: westend::WASM_BINARY.to_vec(), + frame_system: Some(westend::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(westend::BalancesConfig { + pallet_balances: Some(westend::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(westend::IndicesConfig { + pallet_indices: Some(westend::IndicesConfig { indices: vec![], }), - session: Some(westend::SessionConfig { + pallet_session: Some(westend::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), westend_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(westend::StakingConfig { + pallet_staking: Some(westend::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -318,29 +308,22 @@ fn westend_staging_testnet_config_genesis() -> westend::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(westend::AuthorityDiscoveryConfig { + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(westend::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(westend::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(westend::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), - vesting: Some(westend::VestingConfig { + pallet_vesting: Some(westend::VestingConfig { vesting: vec![], }), - sudo: Some(westend::SudoConfig { + pallet_sudo: Some(westend::SudoConfig { key: endowed_accounts[0].clone(), }), } } -fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { +fn kusama_staging_testnet_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { // subkey inspect "$SECRET" let endowed_accounts = vec![ // 5CVFESwfkk7NmhQ6FwHCM9roBvr9BGa4vJHFYU8DnGQxrXvz @@ -426,27 +409,27 @@ fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { const STASH: u128 = 100 * KSM; kusama::GenesisConfig { - system: Some(kusama::SystemConfig { - code: kusama::WASM_BINARY.to_vec(), + frame_system: Some(kusama::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - balances: Some(kusama::BalancesConfig { + pallet_balances: Some(kusama::BalancesConfig { balances: endowed_accounts.iter() .map(|k: &AccountId| (k.clone(), ENDOWMENT)) .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) .collect(), }), - indices: Some(kusama::IndicesConfig { + pallet_indices: Some(kusama::IndicesConfig { indices: vec![], }), - session: Some(kusama::SessionConfig { + pallet_session: Some(kusama::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), kusama_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(kusama::StakingConfig { + pallet_staking: Some(kusama::StakingConfig { validator_count: 50, minimum_validator_count: 4, stakers: initial_authorities @@ -458,89 +441,88 @@ fn kusama_staging_testnet_config_genesis() -> kusama::GenesisConfig { slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(Default::default()), - collective_Instance1: Some(kusama::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(Default::default()), + pallet_collective_Instance1: Some(kusama::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(kusama::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(kusama::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(kusama::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(kusama::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(kusama::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(kusama::RegistrarConfig { - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(kusama::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(kusama::VestingConfig { + pallet_vesting: Some(kusama::VestingConfig { vesting: vec![], }), } } /// Polkadot staging testnet config. -pub fn polkadot_staging_testnet_config() -> PolkadotChainSpec { +pub fn polkadot_staging_testnet_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; let boot_nodes = vec![]; - PolkadotChainSpec::from_genesis( + + Ok(PolkadotChainSpec::from_genesis( "Polkadot Staging Testnet", "polkadot_staging_testnet", ChainType::Live, - polkadot_staging_testnet_config_genesis, + move || polkadot_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(POLKADOT_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Polkadot Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Staging testnet config. -pub fn kusama_staging_testnet_config() -> KusamaChainSpec { +pub fn kusama_staging_testnet_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; let boot_nodes = vec![]; - KusamaChainSpec::from_genesis( + + Ok(KusamaChainSpec::from_genesis( "Kusama Staging Testnet", "kusama_staging_testnet", ChainType::Live, - kusama_staging_testnet_config_genesis, + move || kusama_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(KUSAMA_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Kusama Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Westend staging testnet config. -pub fn westend_staging_testnet_config() -> WestendChainSpec { +pub fn westend_staging_testnet_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; let boot_nodes = vec![]; - WestendChainSpec::from_genesis( + + Ok(WestendChainSpec::from_genesis( "Westend Staging Testnet", "westend_staging_testnet", ChainType::Live, - westend_staging_testnet_config_genesis, + move || westend_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some(TelemetryEndpoints::new(vec![(WESTEND_STAGING_TELEMETRY_URL.to_string(), 0)]) .expect("Westend Staging telemetry url is valid; qed")), Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Helper function to generate a crypto pair from seed @@ -598,8 +580,9 @@ fn testnet_accounts() -> Vec { /// Helper function to create polkadot GenesisConfig for testing pub fn polkadot_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, - root_key: AccountId, + _root_key: AccountId, endowed_accounts: Option>, ) -> polkadot::GenesisConfig { let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); @@ -608,24 +591,24 @@ pub fn polkadot_testnet_genesis( const STASH: u128 = 100 * DOTS; polkadot::GenesisConfig { - system: Some(polkadot::SystemConfig { - code: polkadot::WASM_BINARY.to_vec(), + frame_system: Some(polkadot::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(polkadot::IndicesConfig { + pallet_indices: Some(polkadot::IndicesConfig { indices: vec![], }), - balances: Some(polkadot::BalancesConfig { + pallet_balances: Some(polkadot::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(polkadot::SessionConfig { + pallet_session: Some(polkadot::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), polkadot_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(polkadot::StakingConfig { + pallet_staking: Some(polkadot::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -636,45 +619,36 @@ pub fn polkadot_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(polkadot::DemocracyConfig::default()), - collective_Instance1: Some(polkadot::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(polkadot::DemocracyConfig::default()), + pallet_collective_Instance1: Some(polkadot::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(polkadot::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(polkadot::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(polkadot::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(polkadot::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(polkadot::VestingConfig { + pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![], }), - sudo: Some(polkadot::SudoConfig { - key: root_key, - }), } } /// Helper function to create kusama GenesisConfig for testing pub fn kusama_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, _root_key: AccountId, endowed_accounts: Option>, @@ -685,24 +659,24 @@ pub fn kusama_testnet_genesis( const STASH: u128 = 100 * KSM; kusama::GenesisConfig { - system: Some(kusama::SystemConfig { - code: kusama::WASM_BINARY.to_vec(), + frame_system: Some(kusama::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(kusama::IndicesConfig { + pallet_indices: Some(kusama::IndicesConfig { indices: vec![], }), - balances: Some(kusama::BalancesConfig { + pallet_balances: Some(kusama::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(kusama::SessionConfig { + pallet_session: Some(kusama::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), kusama_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(kusama::StakingConfig { + pallet_staking: Some(kusama::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -713,35 +687,28 @@ pub fn kusama_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - elections_phragmen: Some(Default::default()), - democracy: Some(kusama::DemocracyConfig::default()), - collective_Instance1: Some(kusama::CouncilConfig { + pallet_elections_phragmen: Some(Default::default()), + pallet_democracy: Some(kusama::DemocracyConfig::default()), + pallet_collective_Instance1: Some(kusama::CouncilConfig { members: vec![], phantom: Default::default(), }), - collective_Instance2: Some(kusama::TechnicalCommitteeConfig { + pallet_collective_Instance2: Some(kusama::TechnicalCommitteeConfig { members: vec![], phantom: Default::default(), }), - membership_Instance1: Some(Default::default()), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(kusama::AuthorityDiscoveryConfig { + pallet_membership_Instance1: Some(Default::default()), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(kusama::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(kusama::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(kusama::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), claims: Some(kusama::ClaimsConfig { claims: vec![], vesting: vec![], }), - vesting: Some(kusama::VestingConfig { + pallet_vesting: Some(kusama::VestingConfig { vesting: vec![], }), } @@ -749,6 +716,7 @@ pub fn kusama_testnet_genesis( /// Helper function to create polkadot GenesisConfig for testing pub fn westend_testnet_genesis( + wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, root_key: AccountId, endowed_accounts: Option>, @@ -759,24 +727,24 @@ pub fn westend_testnet_genesis( const STASH: u128 = 100 * DOTS; westend::GenesisConfig { - system: Some(westend::SystemConfig { - code: westend::WASM_BINARY.to_vec(), + frame_system: Some(westend::SystemConfig { + code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(westend::IndicesConfig { + pallet_indices: Some(westend::IndicesConfig { indices: vec![], }), - balances: Some(westend::BalancesConfig { + pallet_balances: Some(westend::BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect(), }), - session: Some(westend::SessionConfig { + pallet_session: Some(westend::SessionConfig { keys: initial_authorities.iter().map(|x| ( x.0.clone(), x.0.clone(), westend_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone(), x.6.clone()), )).collect::>(), }), - staking: Some(westend::StakingConfig { + pallet_staking: Some(westend::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities.iter() @@ -787,30 +755,24 @@ pub fn westend_testnet_genesis( slash_reward_fraction: Perbill::from_percent(10), .. Default::default() }), - babe: Some(Default::default()), - grandpa: Some(Default::default()), - im_online: Some(Default::default()), - authority_discovery: Some(westend::AuthorityDiscoveryConfig { + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(Default::default()), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(westend::AuthorityDiscoveryConfig { keys: vec![], }), - parachains: Some(westend::ParachainsConfig { - authorities: vec![], - }), - registrar: Some(westend::RegistrarConfig{ - parachains: vec![], - _phdata: Default::default(), - }), - vesting: Some(westend::VestingConfig { + pallet_vesting: Some(westend::VestingConfig { vesting: vec![], }), - sudo: Some(westend::SudoConfig { + pallet_sudo: Some(westend::SudoConfig { key: root_key, }), } } -fn polkadot_development_config_genesis() -> polkadot::GenesisConfig { +fn polkadot_development_config_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { polkadot_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -819,8 +781,9 @@ fn polkadot_development_config_genesis() -> polkadot::GenesisConfig { ) } -fn kusama_development_config_genesis() -> kusama::GenesisConfig { +fn kusama_development_config_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { kusama_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -829,8 +792,9 @@ fn kusama_development_config_genesis() -> kusama::GenesisConfig { ) } -fn westend_development_config_genesis() -> westend::GenesisConfig { +fn westend_development_config_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { westend_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), ], @@ -840,52 +804,59 @@ fn westend_development_config_genesis() -> westend::GenesisConfig { } /// Polkadot development config (single validator Alice) -pub fn polkadot_development_config() -> PolkadotChainSpec { - PolkadotChainSpec::from_genesis( +pub fn polkadot_development_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; + + Ok(PolkadotChainSpec::from_genesis( "Development", "dev", ChainType::Development, - polkadot_development_config_genesis, + move || polkadot_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Kusama development config (single validator Alice) -pub fn kusama_development_config() -> KusamaChainSpec { - KusamaChainSpec::from_genesis( +pub fn kusama_development_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; + + Ok(KusamaChainSpec::from_genesis( "Development", "kusama_dev", ChainType::Development, - kusama_development_config_genesis, + move || kusama_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } /// Westend development config (single validator Alice) -pub fn westend_development_config() -> WestendChainSpec { - WestendChainSpec::from_genesis( +pub fn westend_development_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; + + Ok(WestendChainSpec::from_genesis( "Development", "westend_dev", ChainType::Development, - westend_development_config_genesis, + move || westend_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn polkadot_local_testnet_genesis() -> polkadot::GenesisConfig { +fn polkadot_local_testnet_genesis(wasm_binary: &[u8]) -> polkadot::GenesisConfig { polkadot_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -896,22 +867,25 @@ fn polkadot_local_testnet_genesis() -> polkadot::GenesisConfig { } /// Polkadot local testnet config (multivalidator Alice + Bob) -pub fn polkadot_local_testnet_config() -> PolkadotChainSpec { - PolkadotChainSpec::from_genesis( +pub fn polkadot_local_testnet_config() -> Result { + let wasm_binary = polkadot::WASM_BINARY.ok_or("Polkadot development wasm not available")?; + + Ok(PolkadotChainSpec::from_genesis( "Local Testnet", "local_testnet", ChainType::Local, - polkadot_local_testnet_genesis, + move || polkadot_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn kusama_local_testnet_genesis() -> kusama::GenesisConfig { +fn kusama_local_testnet_genesis(wasm_binary: &[u8]) -> kusama::GenesisConfig { kusama_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -922,22 +896,25 @@ fn kusama_local_testnet_genesis() -> kusama::GenesisConfig { } /// Kusama local testnet config (multivalidator Alice + Bob) -pub fn kusama_local_testnet_config() -> KusamaChainSpec { - KusamaChainSpec::from_genesis( +pub fn kusama_local_testnet_config() -> Result { + let wasm_binary = kusama::WASM_BINARY.ok_or("Kusama development wasm not available")?; + + Ok(KusamaChainSpec::from_genesis( "Kusama Local Testnet", "kusama_local_testnet", ChainType::Local, - kusama_local_testnet_genesis, + move || kusama_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } -fn westend_local_testnet_genesis() -> westend::GenesisConfig { +fn westend_local_testnet_genesis(wasm_binary: &[u8]) -> westend::GenesisConfig { westend_testnet_genesis( + wasm_binary, vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), @@ -948,16 +925,18 @@ fn westend_local_testnet_genesis() -> westend::GenesisConfig { } /// Westend local testnet config (multivalidator Alice + Bob) -pub fn westend_local_testnet_config() -> WestendChainSpec { - WestendChainSpec::from_genesis( +pub fn westend_local_testnet_config() -> Result { + let wasm_binary = westend::WASM_BINARY.ok_or("Westend development wasm not available")?; + + Ok(WestendChainSpec::from_genesis( "Westend Local Testnet", "westend_local_testnet", ChainType::Local, - westend_local_testnet_genesis, + move || westend_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), None, Default::default(), - ) + )) } diff --git a/service/src/client.rs b/service/src/client.rs index 28d2bccabbe5e270dce5bdd66253e343233c8b60..563a049cbd1382de3d8ec3df8f146af0fed246d9 100644 --- a/service/src/client.rs +++ b/service/src/client.rs @@ -14,40 +14,145 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Polkadot Client meta trait +//! Polkadot Client abstractions. -use sp_api::{ProvideRuntimeApi, ConstructRuntimeApi, CallApiAt}; +use std::sync::Arc; +use sp_api::{ProvideRuntimeApi, CallApiAt}; use sp_blockchain::HeaderBackend; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; use sc_client_api::{Backend as BackendT, BlockchainEvents}; +use polkadot_primitives::v0::{Block, ParachainHost, AccountId, Nonce, Balance}; -/// Polkadot client abstraction, this super trait only pulls in functionality required for -/// polkadot internal crates like polkadot-collator. -pub trait PolkadotClient: +/// A set of APIs that polkadot-like runtimes must implement. +pub trait RuntimeApiCollection: + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::ApiExt + + babe_primitives::BabeApi + + grandpa_primitives::GrandpaApi + + ParachainHost + + sp_block_builder::BlockBuilder + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + sp_api::Metadata + + sp_offchain::OffchainWorkerApi + + sp_session::SessionKeys + + authority_discovery_primitives::AuthorityDiscoveryApi +where + >::StateBackend: sp_api::StateBackend, +{} + +impl RuntimeApiCollection for Api +where + Api: + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::ApiExt + + babe_primitives::BabeApi + + grandpa_primitives::GrandpaApi + + ParachainHost + + sp_block_builder::BlockBuilder + + frame_system_rpc_runtime_api::AccountNonceApi + + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi + + sp_api::Metadata + + sp_offchain::OffchainWorkerApi + + sp_session::SessionKeys + + authority_discovery_primitives::AuthorityDiscoveryApi, + >::StateBackend: sp_api::StateBackend, +{} + +/// Trait that abstracts over all available client implementations. +/// +/// For a concrete type there exists [`Client`]. +pub trait AbstractClient: BlockchainEvents + Sized + Send + Sync - + ProvideRuntimeApi + + ProvideRuntimeApi + HeaderBackend + CallApiAt< Block, Error = sp_blockchain::Error, - StateBackend = Backend ::State + StateBackend = Backend::State > where Block: BlockT, Backend: BackendT, - Runtime: ConstructRuntimeApi + Backend::State: sp_api::StateBackend, + Self::Api: RuntimeApiCollection, {} -impl PolkadotClient for Client +impl AbstractClient for Client where Block: BlockT, - Runtime: ConstructRuntimeApi, Backend: BackendT, - Client: BlockchainEvents + ProvideRuntimeApi + HeaderBackend + Backend::State: sp_api::StateBackend, + Client: BlockchainEvents + ProvideRuntimeApi + HeaderBackend + Sized + Send + Sync + CallApiAt< Block, Error = sp_blockchain::Error, - StateBackend = Backend ::State - > + StateBackend = Backend::State + >, + Client::Api: RuntimeApiCollection, {} + +/// Execute something with the client instance. +/// +/// As there exist multiple chains inside Polkadot, like Polkadot itself, Kusama, Westend etc, +/// there can exist different kinds of client types. As these client types differ in the generics +/// that are being used, we can not easily return them from a function. For returning them from a +/// function there exists [`Client`]. However, the problem on how to use this client instance still +/// exists. This trait "solves" it in a dirty way. It requires a type to implement this trait and +/// than the [`execute_with_client`](ExecuteWithClient::execute_with_client) function can be called +/// with any possible client instance. +/// +/// In a perfect world, we could make a closure work in this way. +pub trait ExecuteWithClient { + /// The return type when calling this instance. + type Output; + + /// Execute whatever should be executed with the given client instance. + fn execute_with_client(self, client: Arc) -> Self::Output + where + >::StateBackend: sp_api::StateBackend, + Backend: sc_client_api::Backend, + Backend::State: sp_api::StateBackend, + Api: crate::RuntimeApiCollection, + Client: AbstractClient + 'static; +} + +/// A handle to a Polkadot client instance. +/// +/// The Polkadot service supports multiple different runtimes (Westend, Polkadot itself, etc). As each runtime has a +/// specialized client, we need to hide them behind a trait. This is this trait. +/// +/// When wanting to work with the inner client, you need to use `execute_with`. +/// +/// See [`ExecuteWithClient`](trait.ExecuteWithClient.html) for more information. +pub trait ClientHandle { + /// Execute the given something with the client. + fn execute_with(&self, t: T) -> T::Output; +} + +/// A client instance of Polkadot. +/// +/// See [`ExecuteWithClient`] for more information. +#[derive(Clone)] +pub enum Client { + Polkadot(Arc>), + Westend(Arc>), + Kusama(Arc>), +} + +impl ClientHandle for Client { + fn execute_with(&self, t: T) -> T::Output { + match self { + Self::Polkadot(client) => { + T::execute_with_client::<_, _, crate::FullBackend>(t, client.clone()) + }, + Self::Westend(client) => { + T::execute_with_client::<_, _, crate::FullBackend>(t, client.clone()) + }, + Self::Kusama(client) => { + T::execute_with_client::<_, _, crate::FullBackend>(t, client.clone()) + }, + } + } +} diff --git a/service/src/grandpa_support.rs b/service/src/grandpa_support.rs index a875c4b45a375c9513f9eef3df4c54c89e2ed76a..41179a19c46e6342420c8bf8564e674c8eab63b6 100644 --- a/service/src/grandpa_support.rs +++ b/service/src/grandpa_support.rs @@ -16,14 +16,14 @@ //! Polkadot-specific GRANDPA integration utilities. -use polkadot_primitives::Hash; +use polkadot_primitives::v0::Hash; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A custom GRANDPA voting rule that "pauses" voting (i.e. keeps voting for the /// same last finalized block) after a given block at height `N` has been /// finalized and for a delay of `M` blocks, i.e. until the best block reaches /// `N` + `M`, the voter will keep voting for block `N`. -pub(crate) struct PauseAfterBlockFor(pub(crate) N, pub(crate) N); +pub struct PauseAfterBlockFor(pub N, pub N); impl grandpa::VotingRule for PauseAfterBlockFor> where Block: BlockT, @@ -98,7 +98,7 @@ impl grandpa::VotingRule for PauseAfterBlockFor Vec<( grandpa_primitives::SetId, - (Hash, polkadot_primitives::BlockNumber), + (Hash, polkadot_primitives::v0::BlockNumber), grandpa_primitives::AuthorityList, )> { use sp_core::crypto::Ss58Codec; @@ -250,17 +250,21 @@ mod tests { let mut push_blocks = { let mut client = client.clone(); + let mut base = 0; + move |n| { - for _ in 0..n { + for i in 0..n { let mut builder = client.new_block(Default::default()).unwrap(); - for extrinsic in polkadot_test_runtime_client::needed_extrinsics(vec![]) { + for extrinsic in polkadot_test_runtime_client::needed_extrinsics(base + i) { builder.push(extrinsic).unwrap() } let block = builder.build().unwrap().block; client.import(BlockOrigin::Own, block).unwrap(); } + + base += n; } }; diff --git a/service/src/lib.rs b/service/src/lib.rs index 82e8460437a4df036957f82cae3b9b2a8af356de..d2fb3dab00af7aba6cdc5ca51813c4898d85f5a2 100644 --- a/service/src/lib.rs +++ b/service/src/lib.rs @@ -17,22 +17,23 @@ //! Polkadot service. Specialized wrapper over substrate service. pub mod chain_spec; -mod grandpa_support; +pub mod grandpa_support; mod client; use std::sync::Arc; use std::time::Duration; -use polkadot_primitives::{parachain, Hash, BlockId, AccountId, Nonce, Balance}; -#[cfg(feature = "full-node")] -use polkadot_network::{legacy::gossip::Known, protocol as network_protocol}; -use service::{error::Error as ServiceError, ServiceBuilder}; +use polkadot_primitives::v0 as parachain; +use service::error::Error as ServiceError; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use sc_executor::native_executor_instance; use log::info; +use sp_trie::PrefixedMemoryDB; +use sc_client_api::ExecutorProvider; +use prometheus_endpoint::Registry; pub use service::{ - AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, + Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, RpcHandlers, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, - Configuration, ChainSpec, ServiceBuilderCommand, + Configuration, ChainSpec, TaskManager, }; pub use service::config::{DatabaseConfig, PrometheusConfig}; pub use sc_executor::NativeExecutionDispatch; @@ -41,8 +42,7 @@ pub use sc_consensus::LongestChain; pub use sp_api::{Core as CoreApi, ConstructRuntimeApi, ProvideRuntimeApi, StateBackend}; pub use sp_runtime::traits::{HashFor, NumberFor}; pub use consensus_common::{SelectChain, BlockImport, block_validation::Chain}; -pub use polkadot_primitives::parachain::{CollatorId, ParachainHost}; -pub use polkadot_primitives::Block; +pub use polkadot_primitives::v0::{Block, CollatorId, ParachainHost}; pub use sp_runtime::traits::{Block as BlockT, self as runtime_traits, BlakeTwo256}; pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec}; #[cfg(feature = "full-node")] @@ -51,8 +51,7 @@ pub use codec::Codec; pub use polkadot_runtime; pub use kusama_runtime; pub use westend_runtime; -use prometheus_endpoint::Registry; -pub use self::client::PolkadotClient; +pub use self::client::*; native_executor_instance!( pub PolkadotExecutor, @@ -75,48 +74,6 @@ native_executor_instance!( frame_benchmarking::benchmarking::HostFunctions, ); -/// A set of APIs that polkadot-like runtimes must implement. -pub trait RuntimeApiCollection: - sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::ApiExt - + babe_primitives::BabeApi - + grandpa_primitives::GrandpaApi - + ParachainHost - + sp_block_builder::BlockBuilder - + system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + sp_session::SessionKeys - + authority_discovery_primitives::AuthorityDiscoveryApi -where - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, -{} - -impl RuntimeApiCollection for Api -where - Api: - sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::ApiExt - + babe_primitives::BabeApi - + grandpa_primitives::GrandpaApi - + ParachainHost - + sp_block_builder::BlockBuilder - + system_rpc_runtime_api::AccountNonceApi - + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi - + sp_api::Metadata - + sp_offchain::OffchainWorkerApi - + sp_session::SessionKeys - + authority_discovery_primitives::AuthorityDiscoveryApi, - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, -{} - -pub trait RuntimeExtrinsic: codec::Codec + Send + Sync + 'static {} - -impl RuntimeExtrinsic for E where E: codec::Codec + Send + Sync + 'static {} - /// Can be called for a `Configuration` to check if it is a configuration for the `Kusama` network. pub trait IdentifyVariant { /// Returns if this is a configuration for the `Kusama` network. @@ -130,556 +87,528 @@ impl IdentifyVariant for Box { fn is_kusama(&self) -> bool { self.id().starts_with("kusama") || self.id().starts_with("ksm") } + fn is_westend(&self) -> bool { self.id().starts_with("westend") || self.id().starts_with("wnd") } } -// If we're using prometheus, use a registry with a prefix of `polkadot`. -fn set_prometheus_registry(config: &mut Configuration) -> Result<(), ServiceError> { - if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { - *registry = Registry::new_custom(Some("polkadot".into()), None)?; - } - - Ok(()) -} - -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -macro_rules! new_full_start { - ($config:expr, $runtime:ty, $executor:ty) => {{ - set_prometheus_registry(&mut $config)?; - - let mut import_setup = None; - let mut rpc_setup = None; - let inherent_data_providers = inherents::InherentDataProviders::new(); - let builder = service::ServiceBuilder::new_full::< - Block, $runtime, $executor - >($config)? - .with_select_chain(|_, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let pool_api = sc_transaction_pool::FullChainApi::new(builder.client().clone()); - let pool = sc_transaction_pool::BasicPool::new( - builder.config().transaction_pool.clone(), - std::sync::Arc::new(pool_api), - builder.prometheus_registry(), - ); - Ok(pool) - })? - .with_import_queue(| - config, - client, - mut select_chain, - _, - spawn_task_handle, - registry, - | { - let select_chain = select_chain.take() - .ok_or_else(|| service::Error::SelectChainRequired)?; - - let grandpa_hard_forks = if config.chain_spec.is_kusama() { - grandpa_support::kusama_hard_forks() - } else { - Vec::new() - }; - - let (grandpa_block_import, grandpa_link) = - grandpa::block_import_with_authority_set_hard_forks( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - grandpa_hard_forks, - )?; - - let justification_import = grandpa_block_import.clone(); - - let (block_import, babe_link) = babe::block_import( - babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - None, - client, - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; - - import_setup = Some((block_import, grandpa_link, babe_link)); - Ok(import_queue) - })? - .with_rpc_extensions_builder(|builder| { - let grandpa_link = import_setup.as_ref().map(|s| &s.1) - .expect("GRANDPA LinkHalf is present for full services or set up failed; qed."); - - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = grandpa::SharedVoterState::empty(); - - rpc_setup = Some((shared_voter_state.clone())); - - let babe_link = import_setup.as_ref().map(|s| &s.2) - .expect("BabeLink is present for full services or set up faile; qed."); - - let babe_config = babe_link.config().clone(); - let shared_epoch_changes = babe_link.epoch_changes().clone(); - - let client = builder.client().clone(); - let pool = builder.pool().clone(); - let select_chain = builder.select_chain().cloned() - .expect("SelectChain is present for full services or set up failed; qed."); - let keystore = builder.keystore().clone(); - - Ok(move |deny_unsafe| -> polkadot_rpc::RpcExtension { - let deps = polkadot_rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - select_chain: select_chain.clone(), - deny_unsafe, - babe: polkadot_rpc::BabeDeps { - babe_config: babe_config.clone(), - shared_epoch_changes: shared_epoch_changes.clone(), - keystore: keystore.clone(), - }, - grandpa: polkadot_rpc::GrandpaDeps { - shared_voter_state: shared_voter_state.clone(), - shared_authority_set: shared_authority_set.clone(), - }, - }; - - polkadot_rpc::create_full(deps) - }) - })?; - - (builder, import_setup, inherent_data_providers, rpc_setup) - }} -} +/// Polkadot's full backend. +pub type FullBackend = service::TFullBackend; -/// Builds a new service for a full client. -#[macro_export] -macro_rules! new_full { - ( - $config:expr, - $collating_for:expr, - $max_block_data_size:expr, - $authority_discovery_enabled:expr, - $slot_duration:expr, - $grandpa_pause:expr, - $runtime:ty, - $dispatch:ty, - ) => {{ - use sc_network::Event; - use sc_client_api::ExecutorProvider; - use futures::stream::StreamExt; - use sp_core::traits::BareCryptoStorePtr; - - let is_collator = $collating_for.is_some(); - let role = $config.role.clone(); - let is_authority = role.is_authority() && !is_collator; - let force_authoring = $config.force_authoring; - let max_block_data_size = $max_block_data_size; - let db_path = match $config.database.path() { - Some(path) => std::path::PathBuf::from(path), - None => return Err("Starting a Polkadot service with a custom database isn't supported".to_string().into()), - }; - let disable_grandpa = $config.disable_grandpa; - let name = $config.network.node_name.clone(); - let authority_discovery_enabled = $authority_discovery_enabled; - let slot_duration = $slot_duration; - - let (builder, mut import_setup, inherent_data_providers, mut rpc_setup) = - new_full_start!($config, $runtime, $dispatch); - - let service = builder - .with_finality_proof_provider(|client, backend| { - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build_full()?; - - let (block_import, link_half, babe_link) = import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - let shared_voter_state = rpc_setup.take() - .expect("The SharedVoterState is present for Full Services or setup failed before. qed"); - - let client = service.client(); - let known_oracle = client.clone(); - - let mut handles = FullNodeHandles::default(); - let select_chain = if let Some(select_chain) = service.select_chain() { - select_chain - } else { - info!("The node cannot start as an authority because it can't select chain."); - return Ok((service, client, handles)); - }; - let gossip_validator_select_chain = select_chain.clone(); - - let is_known = move |block_hash: &Hash| { - use consensus_common::BlockStatus; - - match known_oracle.block_status(&BlockId::hash(*block_hash)) { - Err(_) | Ok(BlockStatus::Unknown) | Ok(BlockStatus::Queued) => None, - Ok(BlockStatus::KnownBad) => Some(Known::Bad), - Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { - match gossip_validator_select_chain.leaves() { - Err(_) => None, - Ok(leaves) => if leaves.contains(block_hash) { - Some(Known::Leaf) - } else { - Some(Known::Old) - }, - } - } - } - }; +/// Polkadot's select chain. +pub type FullSelectChain = sc_consensus::LongestChain; - let polkadot_network_service = network_protocol::start( - service.network(), - network_protocol::Config { - collating_for: $collating_for, - }, - (is_known, client.clone()), - client.clone(), - service.spawn_task_handle(), - ).map_err(|e| format!("Could not spawn network worker: {:?}", e))?; - - let authority_handles = if is_collator || role.is_authority() { - let availability_store = { - use std::path::PathBuf; - - let mut path = PathBuf::from(db_path); - path.push("availability"); - - #[cfg(not(target_os = "unknown"))] - { - av_store::Store::new( - ::av_store::Config { - cache_size: None, - path, - }, - polkadot_network_service.clone(), - )? - } - - #[cfg(target_os = "unknown")] - av_store::Store::new_in_memory(gossip) - }; +/// Polkadot's full client. +pub type FullClient = service::TFullClient; - polkadot_network_service.register_availability_store(availability_store.clone()); +/// Polkadot's full Grandpa block import. +pub type FullGrandpaBlockImport = grandpa::GrandpaBlockImport< + FullBackend, Block, FullClient, FullSelectChain +>; - let (validation_service_handle, validation_service) = consensus::ServiceBuilder { - client: client.clone(), - network: polkadot_network_service.clone(), - collators: polkadot_network_service.clone(), - spawner: service.spawn_task_handle(), - availability_store: availability_store.clone(), - select_chain: select_chain.clone(), - keystore: service.keystore(), - max_block_data_size, - }.build(); +/// Polkadot's light backend. +pub type LightBackend = service::TLightBackendWithHash; - service.spawn_essential_task_handle().spawn("validation-service", Box::pin(validation_service)); +/// Polkadot's light client. +pub type LightClient = + service::TLightClientWithBackend; - handles.validation_service_handle = Some(validation_service_handle.clone()); +#[cfg(feature = "full-node")] +pub fn new_partial(config: &mut Configuration, test: bool) -> Result< + service::PartialComponents< + FullClient, FullBackend, FullSelectChain, + consensus_common::DefaultImportQueue>, + sc_transaction_pool::FullPool>, + ( + impl Fn(polkadot_rpc::DenyUnsafe, polkadot_rpc::SubscriptionManager) -> polkadot_rpc::RpcExtension, + ( + babe::BabeBlockImport< + Block, FullClient, FullGrandpaBlockImport + >, + grandpa::LinkHalf, FullSelectChain>, + babe::BabeLink + ), + grandpa::SharedVoterState, + ) + >, + Error +> + where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: + RuntimeApiCollection>, + Executor: NativeExecutionDispatch + 'static, +{ + if !test { + // If we're using prometheus, use a registry with a prefix of `polkadot`. + if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { + *registry = Registry::new_custom(Some("polkadot".into()), None)?; + } + } - Some((validation_service_handle, availability_store)) - } else { - None - }; + let inherent_data_providers = inherents::InherentDataProviders::new(); - if role.is_authority() { - let (validation_service_handle, availability_store) = authority_handles - .clone() - .expect("Authority handles are set for authority nodes; qed"); + let (client, backend, keystore, task_manager) = + service::new_full_parts::(&config)?; + let client = Arc::new(client); - let proposer = consensus::ProposerFactory::new( - client.clone(), - service.transaction_pool(), - validation_service_handle, - slot_duration, - service.prometheus_registry().as_ref(), - ); + let select_chain = sc_consensus::LongestChain::new(backend.clone()); - let select_chain = service.select_chain().ok_or(ServiceError::SelectChainRequired)?; - let can_author_with = - consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); - let block_import = availability_store.block_import( - block_import, - client.clone(), - service.spawn_task_handle(), - service.keystore(), - )?; + let grandpa_hard_forks = if config.chain_spec.is_kusama() && !test { + crate::grandpa_support::kusama_hard_forks() + } else { + Vec::new() + }; - let babe_config = babe::BabeParams { - keystore: service.keystore(), + let (grandpa_block_import, grandpa_link) = + grandpa::block_import_with_authority_set_hard_forks( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + grandpa_hard_forks, + )?; + + let justification_import = grandpa_block_import.clone(); + + let (block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), + None, + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()), + )?; + + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = grandpa::SharedVoterState::empty(); + + let import_setup = (block_import.clone(), grandpa_link, babe_link.clone()); + let rpc_setup = shared_voter_state.clone(); + + let babe_config = babe_link.config().clone(); + let shared_epoch_changes = babe_link.epoch_changes().clone(); + + let rpc_extensions_builder = { + let client = client.clone(); + let keystore = keystore.clone(); + let transaction_pool = transaction_pool.clone(); + let select_chain = select_chain.clone(); + + move |deny_unsafe, subscriptions| -> polkadot_rpc::RpcExtension { + let deps = polkadot_rpc::FullDeps { client: client.clone(), - select_chain, - block_import, - env: proposer, - sync_oracle: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - force_authoring, - babe_link, - can_author_with, + pool: transaction_pool.clone(), + select_chain: select_chain.clone(), + deny_unsafe, + babe: polkadot_rpc::BabeDeps { + babe_config: babe_config.clone(), + shared_epoch_changes: shared_epoch_changes.clone(), + keystore: keystore.clone(), + }, + grandpa: polkadot_rpc::GrandpaDeps { + shared_voter_state: shared_voter_state.clone(), + shared_authority_set: shared_authority_set.clone(), + justification_stream: justification_stream.clone(), + subscriptions, + }, }; - let babe = babe::start_babe(babe_config)?; - service.spawn_essential_task_handle().spawn_blocking("babe", babe); + polkadot_rpc::create_full(deps) } + }; - if matches!(role, Role::Authority{..} | Role::Sentry{..}) { - if authority_discovery_enabled { - let (sentries, authority_discovery_role) = match role { - Role::Authority { ref sentry_nodes } => ( - sentry_nodes.clone(), - authority_discovery::Role::Authority ( - service.keystore(), - ), - ), - Role::Sentry {..} => ( - vec![], - authority_discovery::Role::Sentry, - ), - _ => unreachable!("Due to outer matches! constraint; qed."), - }; - - let network = service.network(); - let network_event_stream = network.event_stream("authority-discovery"); - let dht_event_stream = network_event_stream.filter_map(|e| async move { match e { - Event::Dht(e) => Some(e), - _ => None, - }}).boxed(); - let authority_discovery = authority_discovery::AuthorityDiscovery::new( - service.client(), - network, - sentries, - dht_event_stream, - authority_discovery_role, - service.prometheus_registry(), - ); + Ok(service::PartialComponents { + client, backend, task_manager, keystore, select_chain, import_queue, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup) + }) +} - service.spawn_task_handle().spawn("authority-discovery", authority_discovery); - } - } +#[cfg(feature = "full-node")] +pub fn new_full( + mut config: Configuration, + collating_for: Option<(CollatorId, parachain::Id)>, + authority_discovery_enabled: bool, + grandpa_pause: Option<(u32, u32)>, + test: bool, +) -> Result<( + TaskManager, + Arc>, + FullNodeHandles, + Arc::Hash>>, + RpcHandlers, +), Error> + where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: + RuntimeApiCollection>, + Executor: NativeExecutionDispatch + 'static, +{ + use sc_network::Event; + use futures::stream::StreamExt; + use sp_core::traits::BareCryptoStorePtr; + + let is_collator = collating_for.is_some(); + let role = config.role.clone(); + let is_authority = role.is_authority() && !is_collator; + let force_authoring = config.force_authoring; + let disable_grandpa = config.disable_grandpa; + let name = config.network.node_name.clone(); + + let service::PartialComponents { + client, backend, mut task_manager, keystore, select_chain, import_queue, transaction_pool, + inherent_data_providers, + other: (rpc_extensions_builder, import_setup, rpc_setup) + } = new_partial::(&mut config, test)?; + + let prometheus_registry = config.prometheus_registry().cloned(); + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + service::build_network(service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + finality_proof_request_builder: None, + finality_proof_provider: Some(finality_proof_provider.clone()), + })?; + + if config.offchain_worker.enabled { + service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if is_authority { - Some(service.keystore() as BareCryptoStorePtr) - } else { - None - }; + let telemetry_connection_sinks = service::TelemetryConnectionSinks::default(); - let config = grandpa::Config { - // FIXME substrate#1578 make this available through chainspec - gossip_duration: Duration::from_millis(1000), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_network_authority(), + let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore.clone(), + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + on_demand: None, + remote_blockchain: None, + telemetry_connection_sinks: telemetry_connection_sinks.clone(), + network_status_sinks, system_rpc_tx, + })?; + + let (block_import, link_half, babe_link) = import_setup; + + let shared_voter_state = rpc_setup; + + if role.is_authority() { + let proposer = consensus::ProposerFactory::new( + client.clone(), + transaction_pool, + prometheus_registry.as_ref(), + ); + + let can_author_with = + consensus_common::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let babe_config = babe::BabeParams { + keystore: keystore.clone(), + client: client.clone(), + select_chain, + block_import, + env: proposer, + sync_oracle: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + force_authoring, + babe_link, + can_author_with, }; - let enable_grandpa = !disable_grandpa; - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: unlike in substrate we are currently running the full - // GRANDPA voter protocol for all full nodes (regardless of whether - // they're validators or not). at this point the full voter should - // provide better guarantees of block and vote data availability than - // the observer. - - // add a custom voting rule to temporarily stop voting for new blocks - // after the given pause block is finalized and restarting after the - // given delay. - let voting_rule = match $grandpa_pause { - Some((block, delay)) => { - info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", - block, - delay, - ); - - grandpa::VotingRulesBuilder::default() - .add(grandpa_support::PauseAfterBlockFor(block, delay)) - .build() - }, - None => - grandpa::VotingRulesBuilder::default() - .build(), - }; + let babe = babe::start_babe(babe_config)?; + task_manager.spawn_essential_handle().spawn_blocking("babe", babe); + } - let grandpa_config = grandpa::GrandpaParams { - config, - link: link_half, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule, - prometheus_registry: service.prometheus_registry(), - shared_voter_state, + if matches!(role, Role::Authority{..} | Role::Sentry{..}) { + if authority_discovery_enabled { + let (sentries, authority_discovery_role) = match role { + Role::Authority { ref sentry_nodes } => ( + sentry_nodes.clone(), + authority_discovery::Role::Authority ( + keystore.clone(), + ), + ), + Role::Sentry {..} => ( + vec![], + authority_discovery::Role::Sentry, + ), + _ => unreachable!("Due to outer matches! constraint; qed."), }; - service.spawn_essential_task_handle().spawn_blocking( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); - } else { - grandpa::setup_disabled_grandpa( + let network_event_stream = network.event_stream("authority-discovery"); + let dht_event_stream = network_event_stream.filter_map(|e| async move { match e { + Event::Dht(e) => Some(e), + _ => None, + }}).boxed(); + let (authority_discovery_worker, _service) = authority_discovery::new_worker_and_service( client.clone(), - &inherent_data_providers, - service.network(), - )?; + network.clone(), + sentries, + dht_event_stream, + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker); } + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if is_authority { + Some(keystore as BareCryptoStorePtr) + } else { + None + }; + + let config = grandpa::Config { + // FIXME substrate#1578 make this available through chainspec + gossip_duration: Duration::from_millis(1000), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_network_authority(), + }; + + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: unlike in substrate we are currently running the full + // GRANDPA voter protocol for all full nodes (regardless of whether + // they're validators or not). at this point the full voter should + // provide better guarantees of block and vote data availability than + // the observer. + + // add a custom voting rule to temporarily stop voting for new blocks + // after the given pause block is finalized and restarting after the + // given delay. + let voting_rule = match grandpa_pause { + Some((block, delay)) => { + info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", + block, + delay, + ); + + grandpa::VotingRulesBuilder::default() + .add(crate::grandpa_support::PauseAfterBlockFor(block, delay)) + .build() + }, + None => + grandpa::VotingRulesBuilder::default() + .build(), + }; + + let grandpa_config = grandpa::GrandpaParams { + config, + link: link_half, + network: network.clone(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), + voting_rule, + prometheus_registry: prometheus_registry.clone(), + shared_voter_state, + }; - handles.polkadot_network = Some(polkadot_network_service); - (service, client, handles) - }} + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + client.clone(), + &inherent_data_providers, + network.clone(), + )?; + } + + network_starter.start_network(); + + Ok((task_manager, client, FullNodeHandles, network, rpc_handlers)) } /// Builds a new service for a light client. -#[macro_export] -macro_rules! new_light { - ($config:expr, $runtime:ty, $dispatch:ty) => {{ - crate::set_prometheus_registry(&mut $config)?; - let inherent_data_providers = inherents::InherentDataProviders::new(); - - ServiceBuilder::new_light::($config)? - .with_select_chain(|_, backend| { - Ok(sc_consensus::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; - let pool_api = sc_transaction_pool::LightChainApi::new( - builder.client().clone(), - fetcher, - ); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - builder.config().transaction_pool.clone(), - Arc::new(pool_api), - builder.prometheus_registry(), - sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(| - _config, - client, - backend, - fetcher, - _select_chain, - _, - spawn_task_handle, - registry, - | { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend, &(client.clone() as Arc<_>), Arc::new(fetch_checker) - )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let (babe_block_import, babe_link) = babe::block_import( - babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let import_queue = babe::import_queue( - babe_link, - babe_block_import, - None, - Some(Box::new(finality_proof_import)), - client, - inherent_data_providers.clone(), - spawn_task_handle, - registry, - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - let provider = client as Arc>; - Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) - })? - .with_rpc_extensions(|builder| { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start node RPC without active fetcher")?; - let remote_blockchain = builder.remote_backend() - .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; - - let light_deps = polkadot_rpc::LightDeps { - remote_blockchain, - fetcher, - client: builder.client().clone(), - pool: builder.pool(), - }; - Ok(polkadot_rpc::create_light(light_deps)) - })? - .build_light() - }} +fn new_light(mut config: Configuration) -> Result<(TaskManager, RpcHandlers), Error> + where + Runtime: 'static + Send + Sync + ConstructRuntimeApi>, + >>::RuntimeApi: + RuntimeApiCollection>, + Dispatch: NativeExecutionDispatch + 'static, +{ + use sc_client_api::backend::RemoteBackend; + + // If we're using prometheus, use a registry with a prefix of `polkadot`. + if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { + *registry = Registry::new_custom(Some("polkadot".into()), None)?; + } + + let (client, backend, keystore, mut task_manager, on_demand) = + service::new_light_parts::(&config)?; + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + on_demand.clone(), + )); + + let grandpa_block_import = grandpa::light_block_import( + client.clone(), backend.clone(), &(client.clone() as Arc<_>), + Arc::new(on_demand.checker().clone()), + )?; + + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let (babe_block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let inherent_data_providers = inherents::InherentDataProviders::new(); + + // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. + let import_queue = babe::import_queue( + babe_link, + babe_block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + select_chain.clone(), + inherent_data_providers.clone(), + &task_manager.spawn_handle(), + config.prometheus_registry(), + consensus_common::NeverCanAuthor, + )?; + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + service::build_network(service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + finality_proof_request_builder: Some(finality_proof_request_builder), + finality_proof_provider: Some(finality_proof_provider), + })?; + + if config.offchain_worker.enabled { + service::build_offchain_workers( + &config, backend.clone(), task_manager.spawn_handle(), client.clone(), network.clone(), + ); + } + + let light_deps = polkadot_rpc::LightDeps { + remote_blockchain: backend.remote_blockchain(), + fetcher: on_demand.clone(), + client: client.clone(), + pool: transaction_pool.clone(), + }; + + let rpc_extensions = polkadot_rpc::create_light(light_deps); + + let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)), + task_manager: &mut task_manager, + telemetry_connection_sinks: service::TelemetryConnectionSinks::default(), + config, keystore, backend, transaction_pool, client, network, network_status_sinks, + system_rpc_tx, + })?; + + network_starter.start_network(); + + Ok((task_manager, rpc_handlers)) } /// Builds a new object suitable for chain operations. -pub fn new_chain_ops(mut config: Configuration) - -> Result, ServiceError> +#[cfg(feature = "full-node")] +pub fn new_chain_ops(mut config: Configuration) -> Result< + ( + Arc>, + Arc, + consensus_common::import_queue::BasicQueue>, + TaskManager, + ), + ServiceError +> where - Runtime: ConstructRuntimeApi> + Send + Sync + 'static, + Runtime: ConstructRuntimeApi> + Send + Sync + 'static, Runtime::RuntimeApi: - RuntimeApiCollection, Block>>, + RuntimeApiCollection>, Dispatch: NativeExecutionDispatch + 'static, - Extrinsic: RuntimeExtrinsic, - >::StateBackend: sp_api::StateBackend, { config.keystore = service::config::KeystoreConfig::InMemory; - Ok(new_full_start!(config, Runtime, Dispatch).0) + let service::PartialComponents { client, backend, import_queue, task_manager, .. } + = new_partial::(&mut config, false)?; + Ok((client, backend, import_queue, task_manager)) } /// Create a new Polkadot service for a full node. #[cfg(feature = "full-node")] pub fn polkadot_new_full( - mut config: Configuration, + config: Configuration, collating_for: Option<(CollatorId, parachain::Id)>, - max_block_data_size: Option, authority_discovery_enabled: bool, - slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, - Arc, - polkadot_runtime::RuntimeApi - >>, + TaskManager, + Arc>, FullNodeHandles, ), ServiceError> { - let (service, client, handles) = new_full!( + let (service, client, handles, _, _) = new_full::( config, collating_for, - max_block_data_size, authority_discovery_enabled, - slot_duration, grandpa_pause, - polkadot_runtime::RuntimeApi, - PolkadotExecutor, - ); + false, + )?; Ok((service, client, handles)) } @@ -687,67 +616,48 @@ pub fn polkadot_new_full( /// Create a new Kusama service for a full node. #[cfg(feature = "full-node")] pub fn kusama_new_full( - mut config: Configuration, + config: Configuration, collating_for: Option<(CollatorId, parachain::Id)>, - max_block_data_size: Option, authority_discovery_enabled: bool, - slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, - Arc, - kusama_runtime::RuntimeApi - > - >, + TaskManager, + Arc>, FullNodeHandles ), ServiceError> { - let (service, client, handles) = new_full!( + let (service, client, handles, _, _) = new_full::( config, collating_for, - max_block_data_size, authority_discovery_enabled, - slot_duration, grandpa_pause, - kusama_runtime::RuntimeApi, - KusamaExecutor, - ); + false, + )?; Ok((service, client, handles)) } -/// Create a new Kusama service for a full node. +/// Create a new Westend service for a full node. #[cfg(feature = "full-node")] pub fn westend_new_full( - mut config: Configuration, + config: Configuration, collating_for: Option<(CollatorId, parachain::Id)>, - max_block_data_size: Option, authority_discovery_enabled: bool, - slot_duration: u64, grandpa_pause: Option<(u32, u32)>, ) -> Result<( - impl AbstractService, - Arc, - westend_runtime::RuntimeApi - >>, + TaskManager, + Arc>, FullNodeHandles, ), ServiceError> { - let (service, client, handles) = new_full!( + let (service, client, handles, _, _) = new_full::( config, collating_for, - max_block_data_size, authority_discovery_enabled, - slot_duration, grandpa_pause, - westend_runtime::RuntimeApi, - WestendExecutor, - ); + false, + )?; Ok((service, client, handles)) } @@ -756,49 +666,50 @@ pub fn westend_new_full( /// of the node may use. #[cfg(feature = "full-node")] #[derive(Default)] -pub struct FullNodeHandles { - /// A handle to the Polkadot networking protocol. - pub polkadot_network: Option, - /// A handle to the validation service. - pub validation_service_handle: Option, -} - -/// Create a new Polkadot service for a light client. -pub fn polkadot_new_light(mut config: Configuration) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = polkadot_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor, - >, ServiceError> -{ - new_light!(config, polkadot_runtime::RuntimeApi, PolkadotExecutor) -} - -/// Create a new Kusama service for a light client. -pub fn kusama_new_light(mut config: Configuration) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = kusama_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor, - >, ServiceError> -{ - new_light!(config, kusama_runtime::RuntimeApi, KusamaExecutor) +pub struct FullNodeHandles; + +/// Build a new light node. +pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), ServiceError> { + if config.chain_spec.is_kusama() { + new_light::(config) + } else if config.chain_spec.is_westend() { + new_light::(config) + } else { + new_light::(config) + } } -/// Create a new Westend service for a light client. -pub fn westend_new_light(mut config: Configuration, ) -> Result< - impl AbstractService< - Block = Block, - RuntimeApi = westend_runtime::RuntimeApi, - Backend = TLightBackend, - SelectChain = LongestChain, Block>, - CallExecutor = TLightCallExecutor - >, - ServiceError> -{ - new_light!(config, westend_runtime::RuntimeApi, KusamaExecutor) +/// Build a new full node. +#[cfg(feature = "full-node")] +pub fn build_full( + config: Configuration, + collating_for: Option<(CollatorId, parachain::Id)>, + authority_discovery_enabled: bool, + grandpa_pause: Option<(u32, u32)>, +) -> Result<(TaskManager, Client, FullNodeHandles), ServiceError> { + if config.chain_spec.is_kusama() { + new_full::( + config, + collating_for, + authority_discovery_enabled, + grandpa_pause, + false, + ).map(|(task_manager, client, handles, _, _)| (task_manager, Client::Kusama(client), handles)) + } else if config.chain_spec.is_westend() { + new_full::( + config, + collating_for, + authority_discovery_enabled, + grandpa_pause, + false, + ).map(|(task_manager, client, handles, _, _)| (task_manager, Client::Westend(client), handles)) + } else { + new_full::( + config, + collating_for, + authority_discovery_enabled, + grandpa_pause, + false, + ).map(|(task_manager, client, handles, _, _)| (task_manager, Client::Polkadot(client), handles)) + } } diff --git a/statement-table/Cargo.toml b/statement-table/Cargo.toml index 209397056a4060f2fbb4fecf7d5bd0d7c465cbe8..358a9b37d7ae643ff9973969b9412107a4b56852 100644 --- a/statement-table/Cargo.toml +++ b/statement-table/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "polkadot-statement-table" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } primitives = { package = "polkadot-primitives", path = "../primitives" } diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 83cae9daf26b975526bd831f1fb0e8ec49d93399..cb95d74d62237f48fd38aa25d0cc8bcf3456cda8 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -28,6 +28,8 @@ use std::collections::hash_map::{HashMap, Entry}; use std::hash::Hash; use std::fmt::Debug; +use primitives::v1::{ValidityAttestation as PrimitiveValidityAttestation, ValidatorSignature}; + use codec::{Encode, Decode}; /// Context for the statement table. @@ -98,7 +100,7 @@ pub enum ValidityDoubleVote { /// Implicit vote by issuing and explicitly voting invalidity IssuedAndInvalidity((C, S), (D, S)), /// Direct votes for validity and invalidity - ValidityAndInvalidity(D, S, S), + ValidityAndInvalidity(C, S, S), } /// Misbehavior: multiple signatures on same statement. @@ -180,6 +182,15 @@ pub enum ValidityAttestation { Explicit(S), } +impl Into for ValidityAttestation { + fn into(self) -> PrimitiveValidityAttestation { + match self { + Self::Implicit(s) => PrimitiveValidityAttestation::Implicit(s), + Self::Explicit(s) => PrimitiveValidityAttestation::Explicit(s), + } + } +} + /// An attested-to candidate. #[derive(Clone, PartialEq, Decode, Encode)] pub struct AttestedCandidate { @@ -550,7 +561,7 @@ impl Table { // valid vote conflicting with invalid vote (ValidityVote::Valid(good), ValidityVote::Invalid(bad)) | (ValidityVote::Invalid(bad), ValidityVote::Valid(good)) => - make_vdv(ValidityDoubleVote::ValidityAndInvalidity(digest, good, bad)), + make_vdv(ValidityDoubleVote::ValidityAndInvalidity(votes.candidate.clone(), good, bad)), // two signatures on same candidate (ValidityVote::Issued(a), ValidityVote::Issued(b)) => @@ -817,7 +828,7 @@ mod tests { assert_eq!( table.detected_misbehavior.get(&AuthorityId(2)).unwrap(), &Misbehavior::ValidityDoubleVote(ValidityDoubleVote::ValidityAndInvalidity( - candidate_digest, + Candidate(2, 100), Signature(2), Signature(2), )) diff --git a/statement-table/src/lib.rs b/statement-table/src/lib.rs index 97d0cda76344c9a3a14321f4753f426a5252ce25..fed60ded0da2a08a60e82d37ec80c3951e1a810b 100644 --- a/statement-table/src/lib.rs +++ b/statement-table/src/lib.rs @@ -16,75 +16,87 @@ pub mod generic; -pub use generic::Table; - -use primitives::parachain::{ - Id, AbridgedCandidateReceipt, CompactStatement as PrimitiveStatement, ValidatorSignature, ValidatorIndex, -}; -use primitives::Hash; - -/// Statements about candidates on the network. -pub type Statement = generic::Statement; - -/// Signed statements about candidates. -pub type SignedStatement = generic::SignedStatement< - AbridgedCandidateReceipt, - Hash, - ValidatorIndex, - ValidatorSignature, ->; - -/// Kinds of misbehavior, along with proof. -pub type Misbehavior = generic::Misbehavior< - AbridgedCandidateReceipt, - Hash, - ValidatorIndex, - ValidatorSignature, ->; - -/// A summary of import of a statement. -pub type Summary = generic::Summary; - -/// Context necessary to construct a table. -pub trait Context { - /// Whether a authority is a member of a group. - /// Members are meant to submit candidates and vote on validity. - fn is_member_of(&self, authority: ValidatorIndex, group: &Id) -> bool; - - /// requisite number of votes for validity from a group. - fn requisite_votes(&self, group: &Id) -> usize; -} - -impl generic::Context for C { - type AuthorityId = ValidatorIndex; - type Digest = Hash; - type GroupId = Id; - type Signature = ValidatorSignature; - type Candidate = AbridgedCandidateReceipt; - - fn candidate_digest(candidate: &AbridgedCandidateReceipt) -> Hash { - candidate.hash() - } - - fn candidate_group(candidate: &AbridgedCandidateReceipt) -> Id { - candidate.parachain_index.clone() - } - - fn is_member_of(&self, authority: &Self::AuthorityId, group: &Id) -> bool { - Context::is_member_of(self, *authority, group) - } - - fn requisite_votes(&self, group: &Id) -> usize { - Context::requisite_votes(self, group) +pub use generic::{Table, Context}; + +/// Concrete instantiations suitable for v0 primitives. +pub mod v0 { + use crate::generic; + use primitives::v0::{ + Hash, + Id, AbridgedCandidateReceipt, CompactStatement as PrimitiveStatement, ValidatorSignature, ValidatorIndex, + }; + + /// Statements about candidates on the network. + pub type Statement = generic::Statement; + + /// Signed statements about candidates. + pub type SignedStatement = generic::SignedStatement< + AbridgedCandidateReceipt, + Hash, + ValidatorIndex, + ValidatorSignature, + >; + + /// Kinds of misbehavior, along with proof. + pub type Misbehavior = generic::Misbehavior< + AbridgedCandidateReceipt, + Hash, + ValidatorIndex, + ValidatorSignature, + >; + + /// A summary of import of a statement. + pub type Summary = generic::Summary; + + impl<'a> From<&'a Statement> for PrimitiveStatement { + fn from(s: &'a Statement) -> PrimitiveStatement { + match *s { + generic::Statement::Valid(s) => PrimitiveStatement::Valid(s), + generic::Statement::Invalid(s) => PrimitiveStatement::Invalid(s), + generic::Statement::Candidate(ref s) => PrimitiveStatement::Candidate(s.hash()), + } + } } } -impl<'a> From<&'a Statement> for PrimitiveStatement { - fn from(s: &'a Statement) -> PrimitiveStatement { - match *s { - generic::Statement::Valid(s) => PrimitiveStatement::Valid(s), - generic::Statement::Invalid(s) => PrimitiveStatement::Invalid(s), - generic::Statement::Candidate(ref s) => PrimitiveStatement::Candidate(s.hash()), +/// Concrete instantiations suitable for v1 primitives. +pub mod v1 { + use crate::generic; + use primitives::v1::{ + Hash, + Id, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + ValidatorSignature, ValidatorIndex, + }; + + /// Statements about candidates on the network. + pub type Statement = generic::Statement; + + /// Signed statements about candidates. + pub type SignedStatement = generic::SignedStatement< + CommittedCandidateReceipt, + Hash, + ValidatorIndex, + ValidatorSignature, + >; + + /// Kinds of misbehavior, along with proof. + pub type Misbehavior = generic::Misbehavior< + CommittedCandidateReceipt, + Hash, + ValidatorIndex, + ValidatorSignature, + >; + + /// A summary of import of a statement. + pub type Summary = generic::Summary; + + impl<'a> From<&'a Statement> for PrimitiveStatement { + fn from(s: &'a Statement) -> PrimitiveStatement { + match *s { + generic::Statement::Valid(s) => PrimitiveStatement::Valid(s), + generic::Statement::Invalid(s) => PrimitiveStatement::Invalid(s), + generic::Statement::Candidate(ref s) => PrimitiveStatement::Candidate(s.hash()), + } } } } diff --git a/validation/Cargo.toml b/validation/Cargo.toml index be07d4ce528df398471ba2abcdddbfb94ea56309..a9ce31214d0999c0ee069fc9b17e82625880b8be 100644 --- a/validation/Cargo.toml +++ b/validation/Cargo.toml @@ -1,41 +1,31 @@ [package] name = "polkadot-validation" -version = "0.8.12" +version = "0.8.22" authors = ["Parity Technologies "] edition = "2018" [dependencies] +polkadot-primitives = { path = "../primitives" } +parachain = { package = "polkadot-parachain", path = "../parachain" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +consensus = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } +runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master" } futures = "0.3.4" -futures-timer = "2.0" -parking_lot = "0.9.0" -tokio = { version = "0.2.13", features = ["rt-core", "blocking"] } -derive_more = "0.14.1" log = "0.4.8" -exit-future = "0.2.0" -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -availability_store = { package = "polkadot-availability-store", path = "../availability-store" } -parachain = { package = "polkadot-parachain", path = "../parachain" } -polkadot-primitives = { path = "../primitives" } -polkadot-erasure-coding = { path = "../erasure-coding" } -table = { package = "polkadot-statement-table", path = "../statement-table" } +derive_more = "0.14.1" +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" } -consensus = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } block-builder = { package = "sc-block-builder", git = "https://github.com/paritytech/substrate", branch = "master" } trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" } -runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master" } -bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -runtime_babe = { package = "pallet-babe", git = "https://github.com/paritytech/substrate", branch = "master" } babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -keystore = { package = "sc-keystore", git = "https://github.com/paritytech/substrate", branch = "master" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } [dev-dependencies] sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/validation/src/block_production.rs b/validation/src/block_production.rs index 30b7ac3ccd46b979acb9cf91a337a729b10c0334..047a12a457bc9c7eaa5dbf493418768c90166b32 100644 --- a/validation/src/block_production.rs +++ b/validation/src/block_production.rs @@ -22,36 +22,25 @@ use std::{ pin::Pin, sync::Arc, - time::{self, Duration, Instant}, + time::Duration, }; use sp_blockchain::HeaderBackend; use block_builder::{BlockBuilderApi, BlockBuilderProvider}; use consensus::{Proposal, RecordProof}; -use polkadot_primitives::{Block, Header}; -use polkadot_primitives::parachain::{ - ParachainHost, NEW_HEADS_IDENTIFIER, -}; +use polkadot_primitives::v0::{NEW_HEADS_IDENTIFIER, Block, Header, AttestedCandidate}; use runtime_primitives::traits::{DigestFor, HashFor}; -use futures_timer::Delay; use txpool_api::TransactionPool; use futures::prelude::*; use inherents::InherentData; -use sp_timestamp::TimestampInherentData; use sp_api::{ApiExt, ProvideRuntimeApi}; use prometheus_endpoint::Registry as PrometheusRegistry; -use crate::{ - Error, - dynamic_inclusion::DynamicInclusion, - validation_service::ServiceHandle, -}; +use crate::Error; // Polkadot proposer factory. pub struct ProposerFactory { - service_handle: ServiceHandle, - babe_slot_duration: u64, factory: sc_basic_authorship::ProposerFactory, } @@ -60,8 +49,6 @@ impl ProposerFactory { pub fn new( client: Arc, transaction_pool: Arc, - service_handle: ServiceHandle, - babe_slot_duration: u64, prometheus: Option<&PrometheusRegistry>, ) -> Self { let factory = sc_basic_authorship::ProposerFactory::new( @@ -70,8 +57,6 @@ impl ProposerFactory { prometheus, ); ProposerFactory { - service_handle, - babe_slot_duration, factory, } } @@ -82,7 +67,7 @@ impl consensus::Environment where TxPool: TransactionPool + 'static, Client: BlockBuilderProvider + ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, - Client::Api: ParachainHost + BlockBuilderApi + Client::Api: BlockBuilderApi + ApiExt, Backend: sc_client_api::Backend< Block, @@ -101,37 +86,24 @@ where &mut self, parent_header: &Header, ) -> Self::CreateProposer { - let parent_hash = parent_header.hash(); - let slot_duration = self.babe_slot_duration.clone(); - let proposer = self.factory.init(parent_header).into_inner(); - - let maybe_proposer = self.service_handle - .clone() - .get_validation_instance(parent_hash) - .and_then(move |tracker| future::ready(proposer - .map_err(Into::into) - .map(|proposer| Proposer { - tracker, - slot_duration, - proposer, - }) - )); - - Box::pin(maybe_proposer) + let proposer = self.factory.init(parent_header) + .into_inner() + .map_err(Into::into) + .map(|proposer| Proposer { proposer }); + + Box::pin(future::ready(proposer)) } } /// The Polkadot proposer logic. pub struct Proposer, Backend> { - tracker: crate::validation_service::ValidationInstanceHandle, - slot_duration: u64, proposer: sc_basic_authorship::Proposer, } impl consensus::Proposer for Proposer where TxPool: TransactionPool + 'static, Client: BlockBuilderProvider + ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, - Client::Api: ParachainHost + BlockBuilderApi + ApiExt, + Client::Api: BlockBuilderApi + ApiExt, Backend: sc_client_api::Backend> + 'static, // Rust bug: https://github.com/rust-lang/rust/issues/24159 sp_api::StateBackendFor: sp_api::StateBackend> + Send, @@ -140,8 +112,11 @@ impl consensus::Proposer for Proposer; type Proposal = Pin< Box< - dyn Future>, Error>> - + Send + dyn Future>, + Self::Error, + >> + + Send > >; @@ -152,57 +127,17 @@ impl consensus::Proposer for Proposer Self::Proposal { - const SLOT_DURATION_DENOMINATOR: u64 = 3; // wait up to 1/3 of the slot for candidates. - - let initial_included = self.tracker.table().includable_count(); - let now = Instant::now(); - - let dynamic_inclusion = DynamicInclusion::new( - self.tracker.table().num_parachains(), - self.tracker.started(), - Duration::from_millis(self.slot_duration / SLOT_DURATION_DENOMINATOR), - ); - async move { - let enough_candidates = dynamic_inclusion.acceptable_in( - now, - initial_included, - ).unwrap_or_else(|| Duration::from_millis(1)); - - let believed_timestamp = match inherent_data.timestamp_inherent_data() { - Ok(timestamp) => timestamp, - Err(e) => return Err(Error::InherentError(e)), - }; - - let deadline_diff = max_duration - max_duration / 3; - - // set up delay until next allowed timestamp. - let current_timestamp = current_timestamp(); - if current_timestamp < believed_timestamp { - Delay::new(Duration::from_millis(current_timestamp - believed_timestamp)) - .await; - } - - Delay::new(enough_candidates).await; - - let proposed_candidates = self.tracker.table().proposed_set(); - let mut inherent_data = inherent_data; - inherent_data.put_data(NEW_HEADS_IDENTIFIER, &proposed_candidates) + inherent_data.put_data(NEW_HEADS_IDENTIFIER, &Vec::::new()) .map_err(Error::InherentError)?; self.proposer.propose( inherent_data, inherent_digests.clone(), - deadline_diff, + max_duration, record_proof ).await.map_err(Into::into) }.boxed() } } - -fn current_timestamp() -> u64 { - time::SystemTime::now().duration_since(time::UNIX_EPOCH) - .expect("now always later than unix epoch; qed") - .as_millis() as u64 -} diff --git a/validation/src/collation.rs b/validation/src/collation.rs deleted file mode 100644 index a2e682a066d68e07a369bb3aa0660decde88dae3..0000000000000000000000000000000000000000 --- a/validation/src/collation.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Validator-side view of collation. -//! -//! This module contains type definitions, a trait for a batch of collators, and a trait for -//! attempting to fetch a collation repeatedly until a valid one is obtained. - -use std::sync::Arc; - -use polkadot_primitives::{ - BlakeTwo256, Block, Hash, HashT, - parachain::{ - CollatorId, ParachainHost, Id as ParaId, Collation, ErasureChunk, CollationInfo, - }, -}; -use polkadot_erasure_coding as erasure; -use sp_api::ProvideRuntimeApi; -use futures::prelude::*; -use log::debug; - -/// Encapsulates connections to collators and allows collation on any parachain. -/// -/// This is expected to be a lightweight, shared type like an `Arc`. -pub trait Collators: Clone { - /// Errors when producing collations. - type Error: std::fmt::Debug; - /// A full collation. - type Collation: Future>; - - /// Collate on a specific parachain, building on a given relay chain parent hash. - /// - /// The returned collation should be checked for basic validity in the signature - /// and will be checked for state-transition validity by the consumer of this trait. - /// - /// This does not have to guarantee local availability, as a valid collation - /// will be passed to the `TableRouter` instance. - /// - /// The returned future may be prematurely concluded if the `relay_parent` goes - /// out of date. - fn collate(&self, parachain: ParaId, relay_parent: Hash) -> Self::Collation; - - /// Note a bad collator. TODO: take proof (https://github.com/paritytech/polkadot/issues/217) - fn note_bad_collator(&self, collator: CollatorId); -} - -/// A future which resolves when a collation is available. -pub async fn collation_fetch( - validation_pool: Option, - parachain: ParaId, - relay_parent: Hash, - collators: C, - client: Arc

, - max_block_data_size: Option, - n_validators: usize, -) -> Result<(CollationInfo, crate::pipeline::FullOutput), C::Error> - where - P::Api: ParachainHost, - C: Collators + Unpin, - P: ProvideRuntimeApi, - ::Collation: Unpin, -{ - loop { - let collation = collators.collate(parachain, relay_parent).await?; - let Collation { info, pov } = collation; - let res = crate::pipeline::full_output_validation_with_api( - validation_pool.as_ref(), - &*client, - &info, - &pov, - &relay_parent, - max_block_data_size, - n_validators, - ); - - match res { - Ok(full_output) => { - return Ok((info, full_output)) - } - Err(e) => { - debug!("Failed to validate parachain due to API error: {}", e); - - // just continue if we got a bad collation or failed to validate - collators.note_bad_collator(info.collator) - } - } - } -} - -/// Validate an erasure chunk against an expected root. -pub fn validate_chunk( - root: &Hash, - chunk: &ErasureChunk, -) -> Result<(), ()> { - let expected = erasure::branch_hash(root, &chunk.proof, chunk.index as usize).map_err(|_| ())?; - let got = BlakeTwo256::hash(&chunk.chunk); - - if expected != got { - return Err(()) - } - - Ok(()) -} diff --git a/validation/src/dynamic_inclusion.rs b/validation/src/dynamic_inclusion.rs deleted file mode 100644 index d1091159d6a725d74f664ec8b0c84ce834fbd2b6..0000000000000000000000000000000000000000 --- a/validation/src/dynamic_inclusion.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Dynamic inclusion threshold over time. - -use std::time::{Duration, Instant}; - -fn duration_to_micros(duration: &Duration) -> u64 { - duration.as_secs() * 1_000_000 + (duration.subsec_nanos() / 1000) as u64 -} - -/// Dynamic inclusion threshold over time. -/// -/// The acceptable proportion of parachains which must have parachain candidates -/// reduces over time (eventually going to zero). -#[derive(Debug, Clone)] -pub struct DynamicInclusion { - start: Instant, - y: u64, - m: u64, -} - -impl DynamicInclusion { - /// Constructs a new dynamic inclusion threshold calculator based on the time now, - /// how many parachain candidates are required at the beginning, and when an empty - /// block will be allowed. - pub fn new(initial: usize, start: Instant, allow_empty: Duration) -> Self { - // linear function f(n_candidates) -> valid after microseconds - // f(0) = allow_empty - // f(initial) = 0 - // m is actually the negative slope to avoid using signed arithmetic. - let (y, m) = if initial != 0 { - let y = duration_to_micros(&allow_empty); - - (y, y / initial as u64) - } else { - (0, 0) - }; - - DynamicInclusion { - start, - y, - m, - } - } - - /// Returns the duration from `now` after which the amount of included parachain candidates - /// would be enough, or `None` if it is sufficient now. - /// - /// Panics if `now` is earlier than the `start`. - pub fn acceptable_in(&self, now: Instant, included: usize) -> Option { - let elapsed = now.duration_since(self.start); - let elapsed = duration_to_micros(&elapsed); - - let valid_after = self.y.saturating_sub(self.m * included as u64); - - if elapsed >= valid_after { - None - } else { - Some(Duration::from_millis((valid_after - elapsed) as u64 / 1000)) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn full_immediately_allowed() { - let now = Instant::now(); - - let dynamic = DynamicInclusion::new( - 10, - now, - Duration::from_millis(4000), - ); - - assert!(dynamic.acceptable_in(now, 10).is_none()); - assert!(dynamic.acceptable_in(now, 11).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 10).is_none()); - } - - #[test] - fn half_allowed_halfway() { - let now = Instant::now(); - - let dynamic = DynamicInclusion::new( - 10, - now, - Duration::from_millis(4000), - ); - - assert_eq!(dynamic.acceptable_in(now, 5), Some(Duration::from_millis(2000))); - assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 5).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(3000), 5).is_none()); - assert!(dynamic.acceptable_in(now + Duration::from_millis(4000), 5).is_none()); - } - - #[test] - fn zero_initial_is_flat() { - let now = Instant::now(); - - let dynamic = DynamicInclusion::new( - 0, - now, - Duration::from_secs(10_000), - ); - - for i in 0..10_001 { - let now = now + Duration::from_secs(i); - assert!(dynamic.acceptable_in(now, 0).is_none()); - assert!(dynamic.acceptable_in(now, 1).is_none()); - assert!(dynamic.acceptable_in(now, 10).is_none()); - } - } -} diff --git a/validation/src/error.rs b/validation/src/error.rs index 834270151f0be1341e7828a72b4f36e1957a5fe7..913b110e7f6718e44916271f79bec607c8c05af7 100644 --- a/validation/src/error.rs +++ b/validation/src/error.rs @@ -16,8 +16,6 @@ //! Errors that can occur during the validation process. -use polkadot_primitives::{parachain::ValidatorId, Hash}; - /// Error type for validation #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { @@ -25,63 +23,9 @@ pub enum Error { Client(sp_blockchain::Error), /// Consensus error Consensus(consensus::error::Error), - /// A wasm-validation error. - WasmValidation(parachain::wasm_executor::Error), - /// An I/O error. - Io(std::io::Error), - /// An error in the availability erasure-coding. - ErasureCoding(polkadot_erasure_coding::Error), - #[display(fmt = "Invalid duty roster length: expected {}, got {}", expected, got)] - InvalidDutyRosterLength { - /// Expected roster length - expected: usize, - /// Actual roster length - got: usize, - }, - /// Local account not a validator at this block - #[display(fmt = "Local account ID ({:?}) not a validator at this block.", _0)] - NotValidator(ValidatorId), /// Unexpected error checking inherents #[display(fmt = "Unexpected error while checking inherents: {}", _0)] InherentError(inherents::Error), - /// Proposer destroyed before finishing proposing or evaluating - #[display(fmt = "Proposer destroyed before finishing proposing or evaluating")] - PrematureDestruction, - /// Failed to build the table router. - #[display(fmt = "Failed to build the table router: {}", _0)] - CouldNotBuildTableRouter(String), - /// Timer failed - #[display(fmt = "Timer failed: {}", _0)] - Timer(std::io::Error), - #[display(fmt = "Failed to compute deadline of now + {:?}", _0)] - DeadlineComputeFailure(std::time::Duration), - #[display(fmt = "Validation service is down.")] - ValidationServiceDown, - /// PoV-block in collation doesn't match provided. - #[display(fmt = "PoV hash mismatch. Expected {:?}, got {:?}", _0, _1)] - PoVHashMismatch(Hash, Hash), - /// Collator signature is invalid. - #[display(fmt = "Invalid collator signature on collation")] - InvalidCollatorSignature, - /// Head-data too large. - #[display(fmt = "Head data size of {} exceeded maximum of {}", _0, _1)] - HeadDataTooLarge(usize, usize), - /// Head-data mismatch after validation. - #[display(fmt = "Validation produced a different parachain header")] - HeadDataMismatch, - /// Relay parent of candidate not allowed. - #[display(fmt = "Relay parent {} of candidate not allowed in this context.", _0)] - DisallowedRelayParent(Hash), - /// Commitments in candidate match commitments produced by validation. - #[display(fmt = "Commitments in candidate receipt do not match those produced by validation")] - CommitmentsMismatch, - /// The parachain for which validation work is being done is not active. - #[display(fmt = "Parachain {:?} is not active", _0)] - InactiveParachain(polkadot_primitives::parachain::Id), - /// Block data is too big - #[display(fmt = "Block data is too big (maximum allowed size: {}, actual size: {})", size, max_size)] - BlockDataTooBig { size: u64, max_size: u64 }, - Join(tokio::task::JoinError) } impl std::error::Error for Error { @@ -89,9 +33,6 @@ impl std::error::Error for Error { match self { Error::Client(ref err) => Some(err), Error::Consensus(ref err) => Some(err), - Error::WasmValidation(ref err) => Some(err), - Error::ErasureCoding(ref err) => Some(err), - Error::Io(ref err) => Some(err), _ => None, } } diff --git a/validation/src/lib.rs b/validation/src/lib.rs index 667f66e275a9d738213d37ef9de68ff135542563..ff86cdd11d914467c795b1741b30325b301fa0c2 100644 --- a/validation/src/lib.rs +++ b/validation/src/lib.rs @@ -29,207 +29,10 @@ //! //! Groups themselves may be compromised by malicious authorities. -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; -use codec::Encode; -use polkadot_primitives::parachain::{ - Id as ParaId, Chain, DutyRoster, AbridgedCandidateReceipt, - CompactStatement as PrimitiveStatement, - PoVBlock, ErasureChunk, ValidatorSignature, ValidatorIndex, - ValidatorPair, ValidatorId, SigningContext, -}; -use primitives::Pair; - -use futures::prelude::*; - pub use self::block_production::ProposerFactory; -pub use self::collation::Collators; pub use self::error::Error; -pub use self::shared_table::{ - SharedTable, ParachainWork, PrimedParachainWork, Validated, Statement, SignedStatement, - GenericStatement, -}; -pub use self::validation_service::{ServiceHandle, ServiceBuilder}; - pub use parachain::wasm_executor::run_worker as run_validation_worker; -mod dynamic_inclusion; mod error; -mod shared_table; pub mod block_production; -pub mod collation; -pub mod pipeline; -pub mod validation_service; - -/// A handle to a statement table router. -/// -/// This is expected to be a lightweight, shared type like an `Arc`. -/// Once all instances are dropped, consensus networking for this router -/// should be cleaned up. -pub trait TableRouter: Clone { - /// Errors when fetching data from the network. - type Error: std::fmt::Debug; - /// Future that drives sending of the local collation to the network. - type SendLocalCollation: Future>; - /// Future that resolves when candidate data is fetched. - type FetchValidationProof: Future>; - - /// Call with local candidate data. This will sign, import, and broadcast a statement about the candidate. - fn local_collation( - &self, - receipt: AbridgedCandidateReceipt, - pov_block: PoVBlock, - chunks: (ValidatorIndex, &[ErasureChunk]), - ) -> Self::SendLocalCollation; - - /// Fetch validation proof for a specific candidate. - /// - /// This future must conclude once all `Clone`s of this `TableRouter` have - /// been cleaned up. - fn fetch_pov_block(&self, candidate: &AbridgedCandidateReceipt) -> Self::FetchValidationProof; -} - -/// A long-lived network which can create parachain statement and BFT message routing processes on demand. -pub trait Network { - /// The error type of asynchronously building the table router. - type Error: std::fmt::Debug; - - /// The table router type. This should handle importing of any statements, - /// routing statements to peers, and driving completion of any `StatementProducers`. - type TableRouter: TableRouter; - - /// The future used for asynchronously building the table router. - /// This should not fail. - type BuildTableRouter: Future>; - - /// Instantiate a table router using the given shared table. - /// Also pass through any outgoing messages to be broadcast to peers. - #[must_use] - fn build_table_router( - &self, - table: Arc, - authorities: &[ValidatorId], - ) -> Self::BuildTableRouter; -} - -/// The local duty of a validator. -#[derive(Debug)] -pub struct LocalDuty { - validation: Chain, - index: ValidatorIndex, -} - -/// Information about a specific group. -#[derive(Debug, Clone, Default)] -pub struct GroupInfo { - /// Authorities meant to check validity of candidates. - validity_guarantors: HashSet, - /// Number of votes needed for validity. - needed_validity: usize, -} - -/// Sign a table statement against a parent hash. -/// The actual message signed is the encoded statement concatenated with the -/// parent hash. -pub fn sign_table_statement( - statement: &Statement, - key: &ValidatorPair, - signing_context: &SigningContext, -) -> ValidatorSignature { - let mut encoded = PrimitiveStatement::from(statement).encode(); - encoded.extend(signing_context.encode()); - - key.sign(&encoded) -} - -/// Check signature on table statement. -pub fn check_statement( - statement: &Statement, - signature: &ValidatorSignature, - signer: ValidatorId, - signing_context: &SigningContext, -) -> bool { - use runtime_primitives::traits::AppVerify; - - let mut encoded = PrimitiveStatement::from(statement).encode(); - encoded.extend(signing_context.encode()); - - signature.verify(&encoded[..], &signer) -} - -/// Compute group info out of a duty roster and a local authority set. -pub fn make_group_info( - roster: DutyRoster, - authorities: &[ValidatorId], - local_id: Option, -) -> Result<(HashMap, Option), Error> { - if roster.validator_duty.len() != authorities.len() { - return Err(Error::InvalidDutyRosterLength { - expected: authorities.len(), - got: roster.validator_duty.len() - }); - } - - let mut local_validation = None; - let mut local_index = 0; - let mut map = HashMap::new(); - - let duty_iter = authorities.iter().zip(&roster.validator_duty); - for (i, (authority, v_duty)) in duty_iter.enumerate() { - if Some(authority) == local_id.as_ref() { - local_validation = Some(v_duty.clone()); - local_index = i; - } - - match *v_duty { - Chain::Relay => {}, // does nothing for now. - Chain::Parachain(ref id) => { - map.entry(id.clone()).or_insert_with(GroupInfo::default) - .validity_guarantors - .insert(authority.clone()); - } - } - } - - for live_group in map.values_mut() { - let validity_len = live_group.validity_guarantors.len(); - live_group.needed_validity = validity_len / 2 + validity_len % 2; - } - - - let local_duty = local_validation.map(|v| LocalDuty { - validation: v, - index: local_index as u32, - }); - - Ok((map, local_duty)) -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_keyring::Sr25519Keyring; - - #[test] - fn sign_and_check_statement() { - let statement: Statement = GenericStatement::Valid([1; 32].into()); - let parent_hash = [2; 32].into(); - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash, - }; - - let sig = sign_table_statement(&statement, &Sr25519Keyring::Alice.pair().into(), &signing_context); - - let wrong_signing_context = SigningContext { - session_index: Default::default(), - parent_hash: [0xff; 32].into(), - }; - assert!(check_statement(&statement, &sig, Sr25519Keyring::Alice.public().into(), &signing_context)); - assert!(!check_statement(&statement, &sig, Sr25519Keyring::Alice.public().into(), &wrong_signing_context)); - assert!(!check_statement(&statement, &sig, Sr25519Keyring::Bob.public().into(), &signing_context)); - } -} diff --git a/validation/src/pipeline.rs b/validation/src/pipeline.rs deleted file mode 100644 index 52a724fca966e3213afa37dd6286462bd755725a..0000000000000000000000000000000000000000 --- a/validation/src/pipeline.rs +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The pipeline of validation functions a parachain block must pass through before -//! it can be voted for. - -use std::sync::Arc; - -use codec::Encode; -use polkadot_erasure_coding as erasure; -use polkadot_primitives::parachain::{ - CollationInfo, PoVBlock, LocalValidationData, GlobalValidationSchedule, OmittedValidationData, - AvailableData, FeeSchedule, CandidateCommitments, ErasureChunk, ParachainHost, - Id as ParaId, AbridgedCandidateReceipt, ValidationCode, -}; -use polkadot_primitives::{Block, BlockId, Balance, Hash}; -use parachain::{ - wasm_executor::{self, ExecutionMode}, - primitives::{UpwardMessage, ValidationParams}, -}; -use runtime_primitives::traits::{BlakeTwo256, Hash as HashT}; -use sp_api::ProvideRuntimeApi; -use parking_lot::Mutex; -use crate::Error; - -pub use parachain::wasm_executor::ValidationPool; - -/// Does basic checks of a collation. Provide the encoded PoV-block. -pub fn basic_checks( - collation: &CollationInfo, - expected_relay_parent: &Hash, - max_block_data_size: Option, - encoded_pov: &[u8], -) -> Result<(), Error> { - if &collation.relay_parent != expected_relay_parent { - return Err(Error::DisallowedRelayParent(collation.relay_parent)); - } - - if let Some(max_size) = max_block_data_size { - if encoded_pov.len() as u64 > max_size { - return Err(Error::BlockDataTooBig { size: encoded_pov.len() as _, max_size }); - } - } - - let hash = BlakeTwo256::hash(encoded_pov); - if hash != collation.pov_block_hash { - return Err(Error::PoVHashMismatch(collation.pov_block_hash, hash)); - } - - if let Err(()) = collation.check_signature() { - return Err(Error::InvalidCollatorSignature); - } - - Ok(()) -} - -struct ExternalitiesInner { - upward: Vec, - fees_charged: Balance, - free_balance: Balance, - fee_schedule: FeeSchedule, -} - -impl wasm_executor::Externalities for ExternalitiesInner { - fn post_upward_message(&mut self, message: UpwardMessage) -> Result<(), String> { - self.apply_message_fee(message.data.len())?; - - self.upward.push(message); - - Ok(()) - } -} - -impl ExternalitiesInner { - fn new(free_balance: Balance, fee_schedule: FeeSchedule) -> Self { - Self { - free_balance, - fee_schedule, - fees_charged: 0, - upward: Vec::new(), - } - } - - fn apply_message_fee(&mut self, message_len: usize) -> Result<(), String> { - let fee = self.fee_schedule.compute_message_fee(message_len); - let new_fees_charged = self.fees_charged.saturating_add(fee); - if new_fees_charged > self.free_balance { - Err("could not cover fee.".into()) - } else { - self.fees_charged = new_fees_charged; - Ok(()) - } - } - - // Returns the noted outputs of execution so far - upward messages and balances. - fn outputs(self) -> (Vec, Balance) { - (self.upward, self.fees_charged) - } -} - -#[derive(Clone)] -struct Externalities(Arc>); - -impl Externalities { - fn new(free_balance: Balance, fee_schedule: FeeSchedule) -> Self { - Self(Arc::new(Mutex::new( - ExternalitiesInner::new(free_balance, fee_schedule) - ))) - } -} - -impl wasm_executor::Externalities for Externalities { - fn post_upward_message(&mut self, message: UpwardMessage) -> Result<(), String> { - self.0.lock().post_upward_message(message) - } -} - -/// Data from a fully-outputted validation of a parachain candidate. This contains -/// all outputs and commitments of the validation as well as all additional data to make available. -pub struct FullOutput { - /// Data about the candidate to keep available in the network. - pub available_data: AvailableData, - /// Commitments issued alongside the candidate to be placed on-chain. - pub commitments: CandidateCommitments, - /// All erasure-chunks associated with the available data. Each validator - /// should keep their chunk (by index). Other chunks do not need to be - /// kept available long-term, but should be distributed to other validators. - pub erasure_chunks: Vec, - /// The number of validators that were present at this validation. - pub n_validators: usize, -} - -impl FullOutput { - /// Check consistency of the outputs produced by the validation pipeline against - /// data contained within a candidate receipt. - pub fn check_consistency(&self, receipt: &AbridgedCandidateReceipt) -> Result<(), Error> { - if self.commitments != receipt.commitments { - Err(Error::CommitmentsMismatch) - } else { - Ok(()) - } - } -} - -/// The successful result of validating a collation. If the full commitments of the -/// validation are needed, call `full_output`. Otherwise, safely drop this value. -pub struct ValidatedCandidate<'a> { - pov_block: &'a PoVBlock, - global_validation: &'a GlobalValidationSchedule, - local_validation: &'a LocalValidationData, - upward_messages: Vec, - fees: Balance, -} - -impl<'a> ValidatedCandidate<'a> { - /// Fully-compute the commitments and outputs of the candidate. Provide the number - /// of validators. This computes the erasure-coding. - pub fn full_output(self, n_validators: usize) -> Result { - let ValidatedCandidate { - pov_block, - global_validation, - local_validation, - upward_messages, - fees, - } = self; - - let omitted_validation = OmittedValidationData { - global_validation: global_validation.clone(), - local_validation: local_validation.clone(), - }; - - let available_data = AvailableData { - pov_block: pov_block.clone(), - omitted_validation, - }; - - let erasure_chunks = erasure::obtain_chunks( - n_validators, - &available_data, - )?; - - let branches = erasure::branches(erasure_chunks.as_ref()); - let erasure_root = branches.root(); - - let chunks: Vec<_> = erasure_chunks - .iter() - .zip(branches.map(|(proof, _)| proof)) - .enumerate() - .map(|(index, (chunk, proof))| ErasureChunk { - // branches borrows the original chunks, but this clone could probably be dodged. - chunk: chunk.clone(), - index: index as u32, - proof, - }) - .collect(); - - let commitments = CandidateCommitments { - upward_messages, - fees, - erasure_root, - new_validation_code: None, - }; - - Ok(FullOutput { - available_data, - commitments, - erasure_chunks: chunks, - n_validators, - }) - } -} - -/// Does full checks of a collation, with provided PoV-block and contextual data. -pub fn validate<'a>( - validation_pool: Option<&'_ ValidationPool>, - collation: &'a CollationInfo, - pov_block: &'a PoVBlock, - local_validation: &'a LocalValidationData, - global_validation: &'a GlobalValidationSchedule, - validation_code: &ValidationCode, -) -> Result, Error> { - if collation.head_data.0.len() > global_validation.max_head_data_size as _ { - return Err(Error::HeadDataTooLarge( - collation.head_data.0.len(), - global_validation.max_head_data_size as _, - )); - } - - let params = ValidationParams { - parent_head: local_validation.parent_head.clone(), - block_data: pov_block.block_data.clone(), - max_code_size: global_validation.max_code_size, - max_head_data_size: global_validation.max_head_data_size, - relay_chain_height: global_validation.block_number, - code_upgrade_allowed: local_validation.code_upgrade_allowed, - }; - - // TODO: remove when ext does not do this. - let fee_schedule = FeeSchedule { - base: 0, - per_byte: 0, - }; - - let execution_mode = validation_pool - .map(ExecutionMode::Remote) - .unwrap_or(ExecutionMode::Local); - - let ext = Externalities::new(local_validation.balance, fee_schedule); - match wasm_executor::validate_candidate( - &validation_code.0, - params, - ext.clone(), - execution_mode, - ) { - Ok(result) => { - if result.head_data == collation.head_data { - let (upward_messages, fees) = Arc::try_unwrap(ext.0) - .map_err(|_| "") - .expect("Wasm executor drops passed externalities on completion; \ - call has concluded; qed") - .into_inner() - .outputs(); - - Ok(ValidatedCandidate { - pov_block, - global_validation, - local_validation, - upward_messages, - fees, - }) - } else { - Err(Error::HeadDataMismatch) - } - } - Err(e) => Err(e.into()), - } -} - -/// Extracts validation parameters from a Polkadot runtime API for a specific parachain. -pub fn validation_params

(api: &P, relay_parent: Hash, para_id: ParaId) - -> Result<(LocalValidationData, GlobalValidationSchedule, ValidationCode), Error> -where - P: ProvideRuntimeApi, - P::Api: ParachainHost, -{ - let api = api.runtime_api(); - let relay_parent = BlockId::hash(relay_parent); - - // fetch all necessary data from runtime. - let local_validation = api.local_validation_data(&relay_parent, para_id)? - .ok_or_else(|| Error::InactiveParachain(para_id))?; - - let global_validation = api.global_validation_schedule(&relay_parent)?; - let validation_code = api.parachain_code(&relay_parent, para_id)? - .ok_or_else(|| Error::InactiveParachain(para_id))?; - - Ok((local_validation, global_validation, validation_code)) -} - -/// Does full-pipeline validation of a collation with provided contextual parameters. -pub fn full_output_validation_with_api

( - validation_pool: Option<&ValidationPool>, - api: &P, - collation: &CollationInfo, - pov_block: &PoVBlock, - expected_relay_parent: &Hash, - max_block_data_size: Option, - n_validators: usize, -) -> Result where - P: ProvideRuntimeApi, - P::Api: ParachainHost, -{ - let para_id = collation.parachain_index; - let (local_validation, global_validation, validation_code) - = validation_params(&*api, collation.relay_parent, para_id)?; - - // put the parameters through the validation pipeline, producing - // erasure chunks. - let encoded_pov = pov_block.encode(); - basic_checks( - &collation, - &expected_relay_parent, - max_block_data_size, - &encoded_pov, - ) - .and_then(|()| { - let res = validate( - validation_pool, - &collation, - &pov_block, - &local_validation, - &global_validation, - &validation_code, - ); - - match res { - Err(ref err) => log::debug!( - target: "validation", - "Failed to validate PoVBlock for parachain ({}): {:?}", - para_id, - err, - ), - Ok(_) => log::debug!( - target: "validation", - "Successfully validated PoVBlock for parachain ({}).", - para_id, - ), - } - - res - }) - .and_then(|validated| validated.full_output(n_validators)) -} - -#[cfg(test)] -mod tests { - use super::*; - use parachain::wasm_executor::Externalities as ExternalitiesTrait; - use parachain::primitives::ParachainDispatchOrigin; - - #[test] - fn ext_checks_fees_and_updates_correctly() { - let mut ext = ExternalitiesInner { - upward: vec![ - UpwardMessage { data: vec![42], origin: ParachainDispatchOrigin::Parachain }, - ], - fees_charged: 0, - free_balance: 1_000_000, - fee_schedule: FeeSchedule { - base: 1000, - per_byte: 10, - }, - }; - - ext.apply_message_fee(100).unwrap(); - assert_eq!(ext.fees_charged, 2000); - - ext.post_upward_message(UpwardMessage { - origin: ParachainDispatchOrigin::Signed, - data: vec![0u8; 100], - }).unwrap(); - assert_eq!(ext.fees_charged, 4000); - - - ext.apply_message_fee((1_000_000 - 4000 - 1000) / 10).unwrap(); - assert_eq!(ext.fees_charged, 1_000_000); - - // cannot pay fee. - assert!(ext.apply_message_fee(1).is_err()); - } -} diff --git a/validation/src/shared_table/includable.rs b/validation/src/shared_table/includable.rs deleted file mode 100644 index 1396f66c596724c8ba50e045816a7c55242fc2cc..0000000000000000000000000000000000000000 --- a/validation/src/shared_table/includable.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Implements a future which resolves when all of the candidates referenced are includable. - -use std::collections::HashMap; -use futures::channel::oneshot; -use polkadot_primitives::Hash; - -/// Track includability of a set of candidates, -pub(super) fn track>(candidates: I) - -> (IncludabilitySender, oneshot::Receiver<()>) { - - let (tx, rx) = oneshot::channel(); - let tracking: HashMap<_, _> = candidates.into_iter().collect(); - let includable_count = tracking.values().filter(|x| **x).count(); - - let mut sender = IncludabilitySender { - tracking, - includable_count, - sender: Some(tx), - }; - - sender.try_complete(); - - (sender, rx) -} - -/// The sending end of the includability sender. -pub(super) struct IncludabilitySender { - tracking: HashMap, - includable_count: usize, - sender: Option>, -} - -impl IncludabilitySender { - /// update the inner candidate. wakes up the task as necessary. - /// returns `Err(Canceled)` if the other end has hung up. - /// - /// returns `true` when this is completed and should be destroyed. - pub fn update_candidate(&mut self, candidate: Hash, includable: bool) -> bool { - use std::collections::hash_map::Entry; - - match self.tracking.entry(candidate) { - Entry::Vacant(_) => {} - Entry::Occupied(mut entry) => { - let old = entry.insert(includable); - if !old && includable { - self.includable_count += 1; - } else if old && !includable { - self.includable_count -= 1; - } - } - } - - self.try_complete() - } - - /// whether the sender is completed. - pub fn is_complete(&self) -> bool { - self.sender.is_none() - } - - fn try_complete(&mut self) -> bool { - if self.includable_count == self.tracking.len() { - if let Some(sender) = self.sender.take() { - let _ = sender.send(()); - } - - true - } else { - false - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use futures::executor::block_on; - - #[test] - fn it_works() { - let hash1 = [1; 32].into(); - let hash2 = [2; 32].into(); - let hash3 = [3; 32].into(); - - let (mut sender, recv) = track([ - (hash1, true), - (hash2, true), - (hash2, false), // overwrite should favor latter. - (hash3, true), - ].iter().cloned()); - - assert!(!sender.is_complete()); - - // true -> false transition is possible and should be handled. - sender.update_candidate(hash1, false); - assert!(!sender.is_complete()); - - sender.update_candidate(hash2, true); - assert!(!sender.is_complete()); - - sender.update_candidate(hash1, true); - assert!(sender.is_complete()); - - block_on(recv).unwrap(); - } -} diff --git a/validation/src/shared_table/mod.rs b/validation/src/shared_table/mod.rs deleted file mode 100644 index fd6702a7e640c30d7028b25247963e38538ac5c9..0000000000000000000000000000000000000000 --- a/validation/src/shared_table/mod.rs +++ /dev/null @@ -1,1026 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Parachain statement table meant to be shared with a message router -//! and a consensus proposer. - -use std::collections::hash_map::{HashMap, Entry}; -use std::sync::Arc; - -use availability_store::{Store as AvailabilityStore}; -use table::{self, Table, Context as TableContextTrait}; -use polkadot_primitives::{Block, Hash}; -use polkadot_primitives::parachain::{ - Id as ParaId, AbridgedCandidateReceipt, ValidatorPair, ValidatorId, - AttestedCandidate, ParachainHost, PoVBlock, ValidatorIndex, SigningContext, -}; - -use parking_lot::Mutex; -use futures::prelude::*; -use futures::channel::oneshot; -use log::{warn, debug}; -use bitvec::bitvec; - -use super::GroupInfo; -use self::includable::IncludabilitySender; -use primitives::Pair; -use sp_api::ProvideRuntimeApi; - -use crate::pipeline::{FullOutput, ValidationPool}; -use crate::Error; - -mod includable; - -pub use table::{SignedStatement, Statement}; -pub use table::generic::Statement as GenericStatement; - -struct TableContext { - signing_context: SigningContext, - key: Option>, - groups: HashMap, - validators: Vec, -} - -impl table::Context for TableContext { - fn is_member_of(&self, authority: ValidatorIndex, group: &ParaId) -> bool { - let key = match self.validators.get(authority as usize) { - Some(val) => val, - None => return false, - }; - - self.groups.get(group).map_or(false, |g| g.validity_guarantors.get(&key).is_some()) - } - - fn requisite_votes(&self, group: &ParaId) -> usize { - self.groups.get(group).map_or(usize::max_value(), |g| g.needed_validity) - } -} - -impl TableContext { - fn local_id(&self) -> Option { - self.key.as_ref().map(|k| k.public()) - } - - fn local_index(&self) -> Option { - self.local_id().and_then(|id| - self.validators - .iter() - .enumerate() - .find(|(_, k)| k == &&id) - .map(|(i, _)| i as ValidatorIndex) - ) - } - - fn sign_statement(&self, statement: table::Statement) -> Option { - self.local_index().and_then(move |sender| - self.key.as_ref() - .map(|key| crate::sign_table_statement( - &statement, - key, - &self.signing_context, - ).into() - ) - .map(move |signature| table::SignedStatement { statement, signature, sender }) - ) - } -} - -pub(crate) enum Validation { - Valid(PoVBlock), - Invalid(PoVBlock), // should take proof. -} - -enum ValidationWork { - Done(Validation), - InProgress, - Error(String), -} - -#[cfg(test)] -impl ValidationWork { - fn is_in_progress(&self) -> bool { - match *self { - ValidationWork::InProgress => true, - _ => false, - } - } - - fn is_done(&self) -> bool { - match *self { - ValidationWork::Done(_) => true, - _ => false, - } - } -} - -// A shared table object. -struct SharedTableInner { - table: Table, - trackers: Vec, - availability_store: AvailabilityStore, - validated: HashMap, - validation_pool: Option, -} - -impl SharedTableInner { - // Import a single statement. Provide a handle to a table router and a function - // used to determine if a referenced candidate is valid. - // - // the statement producer, if any, will produce only statements concerning the same candidate - // as the one just imported - fn import_remote_statement( - &mut self, - context: &TableContext, - fetch_pov_block: impl Fn(&AbridgedCandidateReceipt) -> Fetch, - statement: table::SignedStatement, - max_block_data_size: Option, - ) -> Option> { - let summary = self.table.import_statement(context, statement)?; - self.update_trackers(&summary.candidate, context); - - let local_index = context.local_index()?; - let para_member = context.is_member_of(local_index, &summary.group_id); - let digest = &summary.candidate; - - // TODO: consider a strategy based on the number of candidate votes as well. - // https://github.com/paritytech/polkadot/issues/218 - let do_validation = para_member && match self.validated.entry(digest.clone()) { - Entry::Occupied(_) => false, - Entry::Vacant(entry) => { - entry.insert(ValidationWork::InProgress); - true - } - }; - - let work = if do_validation { - match self.table.get_candidate(&digest) { - None => { - let message = format!( - "Table inconsistency detected. Summary returned for candidate {} \ - but receipt not present in table.", - digest, - ); - - warn!(target: "validation", "{}", message); - self.validated.insert(digest.clone(), ValidationWork::Error(message)); - None - } - Some(candidate) => { - let fetch = fetch_pov_block(candidate); - - Some(Work { - candidate_receipt: candidate.clone(), - fetch, - }) - } - } - } else { - None - }; - - work.map(|work| ParachainWork { - validation_pool: self.validation_pool.clone(), - availability_store: self.availability_store.clone(), - relay_parent: context.signing_context.parent_hash.clone(), - work, - max_block_data_size, - n_validators: context.validators.len(), - }) - } - - fn update_trackers(&mut self, candidate: &Hash, context: &TableContext) { - let includable = self.table.candidate_includable(candidate, context); - for i in (0..self.trackers.len()).rev() { - if self.trackers[i].update_candidate(candidate.clone(), includable) { - self.trackers.swap_remove(i); - } - } - } -} - -/// Produced after validating a candidate. -pub struct Validated { - /// A statement about the validity of the candidate. - statement: table::Statement, - /// The result of validation. - result: Validation, -} - -impl Validated { - /// Note that we've validated a candidate with given hash and it is bad. - pub fn known_bad(hash: Hash, collation: PoVBlock) -> Self { - Validated { - statement: GenericStatement::Invalid(hash), - result: Validation::Invalid(collation), - } - } - - /// Note that we've validated a candidate with given hash and it is good. - /// outgoing message required. - pub fn known_good(hash: Hash, collation: PoVBlock) -> Self { - Validated { - statement: GenericStatement::Valid(hash), - result: Validation::Valid(collation), - } - } - - /// Note that we've collated a candidate. - /// outgoing message required. - pub fn collated_local( - receipt: AbridgedCandidateReceipt, - collation: PoVBlock, - ) -> Self { - Validated { - statement: GenericStatement::Candidate(receipt), - result: Validation::Valid(collation), - } - } - - /// Get a reference to the proof-of-validation block. - pub fn pov_block(&self) -> &PoVBlock { - match self.result { - Validation::Valid(ref b) | Validation::Invalid(ref b) => b, - } - } -} - -/// Future that performs parachain validation work. -pub struct ParachainWork { - validation_pool: Option, - work: Work, - relay_parent: Hash, - availability_store: AvailabilityStore, - max_block_data_size: Option, - n_validators: usize, -} - -impl ParachainWork { - /// Prime the parachain work with an API reference for extracting - /// chain information. - pub fn prime>(self, api: Arc

) - -> PrimedParachainWork< - Fetch, - impl Send + FnMut(&PoVBlock, &AbridgedCandidateReceipt) - -> Result + Unpin, - > - where - P: Send + Sync + 'static, - P::Api: ParachainHost, - { - let max_block_data_size = self.max_block_data_size; - let n_validators = self.n_validators; - let expected_relay_parent = self.relay_parent; - - let pool = self.validation_pool.clone(); - let validate = move |pov_block: &PoVBlock, candidate: &AbridgedCandidateReceipt| { - let collation_info = candidate.to_collation_info(); - let full_output = crate::pipeline::full_output_validation_with_api( - pool.as_ref(), - &*api, - &collation_info, - pov_block, - &expected_relay_parent, - max_block_data_size, - n_validators, - )?; - - full_output.check_consistency(candidate)?; - Ok(full_output) - }; - - PrimedParachainWork { inner: self, validate } - } - - /// Prime the parachain work with a custom validation function. - #[cfg(test)] - pub fn prime_with(self, validate: F) -> PrimedParachainWork - where F: FnMut(&PoVBlock, &AbridgedCandidateReceipt) - -> Result - { - PrimedParachainWork { inner: self, validate } - } -} - -struct Work { - candidate_receipt: AbridgedCandidateReceipt, - fetch: Fetch -} - -/// Primed statement producer. -pub struct PrimedParachainWork { - inner: ParachainWork, - validate: F, -} - -impl PrimedParachainWork - where - Fetch: Future> + Unpin, - F: FnMut(&PoVBlock, &AbridgedCandidateReceipt) -> Result + Unpin, - Err: From, -{ - pub async fn validate(self) -> Result { - let candidate = self.inner.work.candidate_receipt; - let pov_block = self.inner.work.fetch.await?; - - let mut validate = self.validate; - let relay_parent = self.inner.relay_parent; - - // create a wrapper around the custom validation function that does - // some more general pre and post checks. - let mut validate = move |pov_block: &_, candidate: &AbridgedCandidateReceipt| { - if candidate.relay_parent != relay_parent { - return Err(Error::DisallowedRelayParent(candidate.relay_parent)); - } - - let full_output = validate(pov_block, candidate)?; - - if full_output.commitments != candidate.commitments { - return Err(Error::CommitmentsMismatch); - } - - Ok(full_output) - }; - - let validation_res = (validate)(&pov_block, &candidate); - let candidate_hash = candidate.hash(); - - debug!(target: "validation", "Making validity statement about candidate {}: is_good? {:?}", - candidate_hash, validation_res.is_ok()); - - match validation_res { - Err(err) => { - debug!(target: "validation", "candidate is invalid: {}", err); - Ok(Validated { - statement: GenericStatement::Invalid(candidate_hash), - result: Validation::Invalid(pov_block), - }) - } - Ok(full_output) => { - // make data and all erasure chunks available. The chunk - // must be fully available before we add the chunks. - self.inner.availability_store.make_available( - candidate_hash, - full_output.available_data, - ).await?; - self.inner.availability_store.add_erasure_chunks( - candidate, - full_output.n_validators as _, - full_output.erasure_chunks, - ).await?; - - Ok(Validated { - statement: GenericStatement::Valid(candidate_hash), - result: Validation::Valid(pov_block), - }) - } - } - } -} - -/// A shared table object. -pub struct SharedTable { - context: Arc, - inner: Arc>, - max_block_data_size: Option, -} - -impl Clone for SharedTable { - fn clone(&self) -> Self { - Self { - context: self.context.clone(), - inner: self.inner.clone(), - max_block_data_size: self.max_block_data_size, - } - } -} - -impl SharedTable { - /// Create a new shared table. - /// - /// Provide the key to sign with, and the parent hash of the relay chain - /// block being built. - pub fn new( - validators: Vec, - groups: HashMap, - key: Option>, - signing_context: SigningContext, - availability_store: AvailabilityStore, - max_block_data_size: Option, - validation_pool: Option, - ) -> Self { - SharedTable { - context: Arc::new(TableContext { groups, key, signing_context, validators: validators.clone(), }), - max_block_data_size, - inner: Arc::new(Mutex::new(SharedTableInner { - table: Table::default(), - validated: HashMap::new(), - trackers: Vec::new(), - availability_store, - validation_pool, - })) - } - } - - /// Get the parent hash this table should hold statements localized to. - pub fn signing_context(&self) -> &SigningContext { - &self.context.signing_context - } - - /// Get the local validator session key. - pub fn session_key(&self) -> Option { - self.context.local_id() - } - - /// Get group info. - pub fn group_info(&self) -> &HashMap { - &self.context.groups - } - - /// Import a single statement with remote source, whose signature has already been checked. - /// - /// Validity and invalidity statements are only valid if the corresponding - /// candidate has already been imported. - /// - /// The ParachainWork, if any, will produce only statements concerning the same candidate - /// as the one just imported - pub fn import_remote_statement( - &self, - fetch_pov_block: impl Fn(&AbridgedCandidateReceipt) -> Fetch, - statement: table::SignedStatement, - ) -> Option> { - self.inner.lock().import_remote_statement( - &*self.context, - fetch_pov_block, - statement, - self.max_block_data_size, - ) - } - - /// Import many statements at once. - /// - /// Provide an iterator yielding remote, pre-checked statements. - /// Validity and invalidity statements are only valid if the corresponding - /// candidate has already been imported. - /// - /// The ParachainWork, if any, will produce only statements concerning the same candidate - /// as the one just imported - pub fn import_remote_statements( - &self, - fetch_pov_block: impl Fn(&AbridgedCandidateReceipt) -> Fetch, - iterable: I, - ) -> U - where - I: IntoIterator, - U: ::std::iter::FromIterator>>, - { - let mut inner = self.inner.lock(); - - iterable.into_iter().map(move |statement| { - inner.import_remote_statement( - &*self.context, - &fetch_pov_block, - statement, - self.max_block_data_size, - ) - }).collect() - } - - /// Sign and import the result of candidate validation. Returns `None` if the table - /// was instantiated without a local key. Otherwise, returns a copy of the signed - /// statement. - pub fn import_validated(&self, validated: Validated) - -> Option - { - let digest = match validated.statement { - GenericStatement::Candidate(ref c) => c.hash(), - GenericStatement::Valid(h) | GenericStatement::Invalid(h) => h, - }; - - let signed_statement = self.context.sign_statement(validated.statement); - - if let Some(ref signed) = signed_statement { - let mut inner = self.inner.lock(); - inner.table.import_statement(&*self.context, signed.clone()); - inner.validated.insert(digest, ValidationWork::Done(validated.result)); - } - - signed_statement - } - - /// Execute a closure using a specific candidate. - /// - /// Deadlocks if called recursively. - pub fn with_candidate(&self, digest: &Hash, f: F) -> U - where F: FnOnce(Option<&AbridgedCandidateReceipt>) -> U - { - let inner = self.inner.lock(); - f(inner.table.get_candidate(digest)) - } - - /// Get a set of candidates that can be proposed. - pub fn proposed_set(&self) -> Vec { - use table::generic::{ValidityAttestation as GAttestation}; - use polkadot_primitives::parachain::ValidityAttestation; - - // we transform the types of the attestations gathered from the table - // into the type expected by the runtime. This may do signature - // aggregation in the future. - let table_attestations = self.inner.lock().table.proposed_candidates(&*self.context); - table_attestations.into_iter() - .map(|attested| { - let mut validity_votes: Vec<_> = attested.validity_votes.into_iter().map(|(id, a)| { - (id as usize, match a { - GAttestation::Implicit(s) => ValidityAttestation::Implicit(s), - GAttestation::Explicit(s) => ValidityAttestation::Explicit(s), - }) - }).collect(); - validity_votes.sort_by(|(id1, _), (id2, _)| id1.cmp(id2)); - - let mut validator_indices = bitvec![ - bitvec::order::Lsb0, u8; - 0; - validity_votes.last().map(|(i, _)| i + 1).unwrap_or_default() - ]; - for (id, _) in &validity_votes { - validator_indices.set(*id, true); - } - - AttestedCandidate { - candidate: attested.candidate, - validity_votes: validity_votes.into_iter().map(|(_, a)| a).collect(), - validator_indices, - } - }).collect() - } - - /// Get the number of total parachains. - pub fn num_parachains(&self) -> usize { - self.group_info().len() - } - - /// Get the number of parachains whose candidates may be included. - pub fn includable_count(&self) -> usize { - self.inner.lock().table.includable_count() - } - - /// Get all witnessed misbehavior. - pub fn get_misbehavior(&self) -> HashMap { - self.inner.lock().table.get_misbehavior().clone() - } - - /// Track includability of a given set of candidate hashes. - pub fn track_includability(&self, iterable: I) -> oneshot::Receiver<()> - where I: IntoIterator - { - let mut inner = self.inner.lock(); - - let (tx, rx) = includable::track(iterable.into_iter().map(|x| { - let includable = inner.table.candidate_includable(&x, &*self.context); - (x, includable) - })); - - if !tx.is_complete() { - inner.trackers.push(tx); - } - - rx - } - - /// Returns id of the validator corresponding to the given index. - pub fn index_to_id(&self, index: ValidatorIndex) -> Option { - self.context.validators.get(index as usize).cloned() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_keyring::Sr25519Keyring; - use polkadot_primitives::parachain::{ - BlockData, ErasureChunk, AvailableData, - }; - use polkadot_erasure_coding::{self as erasure}; - use availability_store::ErasureNetworking; - use futures::future; - use futures::executor::block_on; - use std::pin::Pin; - - fn pov_block_with_data(data: Vec) -> PoVBlock { - PoVBlock { - block_data: BlockData(data), - } - } - - #[derive(Clone)] - struct DummyErasureNetworking; - - impl ErasureNetworking for DummyErasureNetworking { - type Error = String; - - fn fetch_erasure_chunk( - &self, - _candidate_hash: &Hash, - _index: u32, - ) -> Pin> + Send>> { - future::pending().boxed() - } - - fn distribute_erasure_chunk( - &self, - _candidate_hash: Hash, - _chunk: ErasureChunk, - ) {} - } - - fn lazy_fetch_pov() - -> Box< - dyn Fn(&AbridgedCandidateReceipt) -> future::Ready< - Result - > - > - { - Box::new(|_| future::ok(pov_block_with_data(vec![1, 2, 3, 4, 5]))) - } - - #[test] - fn statement_triggers_fetch_and_evaluate() { - let mut groups = HashMap::new(); - - let para_id = ParaId::from(1); - let parent_hash = Hash::default(); - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: parent_hash.clone(), - }; - - let local_key = Sr25519Keyring::Alice.pair(); - let local_id: ValidatorId = local_key.public().into(); - let local_key: Arc = Arc::new(local_key.into()); - - let validity_other_key = Sr25519Keyring::Bob.pair(); - let validity_other: ValidatorId = validity_other_key.public().into(); - let validity_other_index = 1; - - groups.insert(para_id, GroupInfo { - validity_guarantors: [local_id.clone(), validity_other.clone()].iter().cloned().collect(), - needed_validity: 2, - }); - - let shared_table = SharedTable::new( - [local_id, validity_other].to_vec(), - groups, - Some(local_key.clone()), - signing_context.clone(), - AvailabilityStore::new_in_memory(DummyErasureNetworking), - None, - None, - ); - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.relay_parent = parent_hash; - - let candidate_statement = GenericStatement::Candidate(candidate); - - let signature = crate::sign_table_statement( - &candidate_statement, - &validity_other_key.into(), - &signing_context, - ); - let signed_statement = ::table::generic::SignedStatement { - statement: candidate_statement, - signature: signature.into(), - sender: validity_other_index, - }; - - shared_table.import_remote_statement( - lazy_fetch_pov(), - signed_statement, - ).expect("candidate and local validity group are same"); - } - - #[test] - fn statement_triggers_fetch_and_validity() { - let mut groups = HashMap::new(); - - let para_id = ParaId::from(1); - let parent_hash = Hash::default(); - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: parent_hash.clone(), - }; - - let local_key = Sr25519Keyring::Alice.pair(); - let local_id: ValidatorId = local_key.public().into(); - let local_key: Arc = Arc::new(local_key.into()); - - let validity_other_key = Sr25519Keyring::Bob.pair(); - let validity_other: ValidatorId = validity_other_key.public().into(); - let validity_other_index = 1; - - groups.insert(para_id, GroupInfo { - validity_guarantors: [local_id.clone(), validity_other.clone()].iter().cloned().collect(), - needed_validity: 1, - }); - - let shared_table = SharedTable::new( - [local_id, validity_other].to_vec(), - groups, - Some(local_key.clone()), - signing_context.clone(), - AvailabilityStore::new_in_memory(DummyErasureNetworking), - None, - None, - ); - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.relay_parent = parent_hash; - - let candidate_statement = GenericStatement::Candidate(candidate); - - let signature = crate::sign_table_statement( - &candidate_statement, - &validity_other_key.into(), - &signing_context, - ); - let signed_statement = ::table::generic::SignedStatement { - statement: candidate_statement, - signature: signature.into(), - sender: validity_other_index, - }; - - shared_table.import_remote_statement( - lazy_fetch_pov(), - signed_statement, - ).expect("should produce work"); - } - - #[test] - fn evaluate_makes_block_data_available() { - let store = AvailabilityStore::new_in_memory(DummyErasureNetworking); - let relay_parent = [0; 32].into(); - let para_id = 5.into(); - let pov_block = pov_block_with_data(vec![1, 2, 3]); - let pov_block_hash = pov_block.hash(); - let local_index = 0; - let n_validators = 2; - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.relay_parent = relay_parent; - candidate.pov_block_hash = pov_block_hash; - - let candidate_hash = candidate.hash(); - - store.note_validator_index_and_n_validators( - &relay_parent, - local_index as u32, - n_validators as u32, - ).unwrap(); - - let producer: ParachainWork>> = ParachainWork { - work: Work { - candidate_receipt: candidate, - fetch: future::ok(pov_block.clone()), - }, - relay_parent, - availability_store: store.clone(), - max_block_data_size: None, - n_validators, - validation_pool: None, - }; - - for i in 0..n_validators { - assert!(store.get_erasure_chunk(&candidate_hash, i).is_none()); - } - - let validated = block_on(producer.prime_with(|_, _| Ok( - FullOutput { - available_data: AvailableData { - pov_block: pov_block.clone(), - omitted_validation: Default::default(), - }, - erasure_chunks: (0..n_validators).map(|i| ErasureChunk { - chunk: vec![1, 2, 3], - index: i as u32, - proof: vec![], - }).collect(), - commitments: Default::default(), - n_validators, - } - )).validate()).unwrap(); - - assert_eq!(validated.pov_block(), &pov_block); - assert_eq!(validated.statement, GenericStatement::Valid(candidate_hash)); - - for i in 0..n_validators { - assert!(store.get_erasure_chunk(&candidate_hash, i).is_some()); - } - } - - #[test] - fn full_availability() { - let store = AvailabilityStore::new_in_memory(DummyErasureNetworking); - let relay_parent = [0; 32].into(); - let para_id = 5.into(); - let pov_block = pov_block_with_data(vec![1, 2, 3]); - let pov_block_hash = pov_block.hash(); - let local_index = 0; - let n_validators = 2; - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.pov_block_hash = pov_block_hash; - candidate.relay_parent = relay_parent; - - let candidate_hash = candidate.hash(); - - let available_data = AvailableData { - pov_block: pov_block.clone(), - omitted_validation: Default::default(), - }; - - let chunks = erasure::obtain_chunks(n_validators, &available_data).unwrap(); - - store.note_validator_index_and_n_validators( - &relay_parent, - local_index as u32, - n_validators as u32, - ).unwrap(); - - let producer = ParachainWork { - work: Work { - candidate_receipt: candidate, - fetch: future::ok::<_, ::std::io::Error>(pov_block.clone()), - }, - relay_parent, - availability_store: store.clone(), - max_block_data_size: None, - n_validators, - validation_pool: None, - }; - - let validated = block_on(producer.prime_with(|_, _| Ok( - FullOutput { - available_data: AvailableData { - pov_block: pov_block.clone(), - omitted_validation: Default::default(), - }, - erasure_chunks: (0..n_validators).map(|i| ErasureChunk { - chunk: chunks[i].clone(), - index: i as u32, - proof: vec![], - }).collect(), - commitments: Default::default(), - n_validators, - } - )).validate()).unwrap(); - - assert_eq!(validated.pov_block(), &pov_block); - - assert_eq!(store.execution_data(&candidate_hash).unwrap().pov_block, pov_block); - } - - #[test] - fn does_not_dispatch_work_after_starting_validation() { - let mut groups = HashMap::new(); - - let para_id = ParaId::from(1); - let parent_hash = Hash::default(); - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: parent_hash.clone(), - }; - - let local_key = Sr25519Keyring::Alice.pair(); - let local_id: ValidatorId = local_key.public().into(); - let local_key: Arc = Arc::new(local_key.into()); - - let validity_other_key = Sr25519Keyring::Bob.pair(); - let validity_other: ValidatorId = validity_other_key.public().into(); - let validity_other_index = 1; - - groups.insert(para_id, GroupInfo { - validity_guarantors: [local_id.clone(), validity_other.clone()].iter().cloned().collect(), - needed_validity: 1, - }); - - let shared_table = SharedTable::new( - [local_id, validity_other].to_vec(), - groups, - Some(local_key.clone()), - signing_context.clone(), - AvailabilityStore::new_in_memory(DummyErasureNetworking), - None, - None, - ); - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.relay_parent = parent_hash; - - let candidate_hash = candidate.hash(); - let candidate_statement = GenericStatement::Candidate(candidate); - - let signature = crate::sign_table_statement( - &candidate_statement, - &validity_other_key.into(), - &signing_context, - ); - let signed_statement = ::table::generic::SignedStatement { - statement: candidate_statement, - signature: signature.into(), - sender: validity_other_index, - }; - - let _a = shared_table.import_remote_statement( - lazy_fetch_pov(), - signed_statement.clone(), - ).expect("should produce work"); - - assert!(shared_table.inner.lock().validated.get(&candidate_hash) - .expect("validation has started").is_in_progress()); - - let b = shared_table.import_remote_statement( - lazy_fetch_pov(), - signed_statement.clone(), - ); - - assert!(b.is_none(), "cannot work when validation has started"); - } - - #[test] - fn does_not_dispatch_after_local_candidate() { - let mut groups = HashMap::new(); - - let para_id = ParaId::from(1); - let pov_block = pov_block_with_data(vec![1, 2, 3]); - let parent_hash = Hash::default(); - let signing_context = SigningContext { - session_index: Default::default(), - parent_hash: parent_hash.clone(), - }; - - let local_key = Sr25519Keyring::Alice.pair(); - let local_id: ValidatorId = local_key.public().into(); - let local_key: Arc = Arc::new(local_key.into()); - - let validity_other_key = Sr25519Keyring::Bob.pair(); - let validity_other: ValidatorId = validity_other_key.public().into(); - - groups.insert(para_id, GroupInfo { - validity_guarantors: [local_id.clone(), validity_other.clone()].iter().cloned().collect(), - needed_validity: 1, - }); - - let shared_table = SharedTable::new( - [local_id, validity_other].to_vec(), - groups, - Some(local_key.clone()), - signing_context.clone(), - AvailabilityStore::new_in_memory(DummyErasureNetworking), - None, - None, - ); - - let mut candidate = AbridgedCandidateReceipt::default(); - candidate.parachain_index = para_id; - candidate.relay_parent = parent_hash; - - let candidate_hash = candidate.hash(); - let signed_statement = shared_table.import_validated(Validated::collated_local( - candidate, - pov_block, - )).unwrap(); - - assert!(shared_table.inner.lock().validated.get(&candidate_hash) - .expect("validation has started").is_done()); - - let a = shared_table.import_remote_statement( - lazy_fetch_pov(), - signed_statement, - ); - - assert!(a.is_none()); - } -} diff --git a/validation/src/validation_service/mod.rs b/validation/src/validation_service/mod.rs deleted file mode 100644 index a19e2f0e3bbff1a933c320cd644ac7007170fee3..0000000000000000000000000000000000000000 --- a/validation/src/validation_service/mod.rs +++ /dev/null @@ -1,787 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The validation service is a long-running future that creates and manages parachain attestation -//! instances. -//! -//! As soon as we import a new chain head, we start a parachain attestation session on top of it. -//! The block authorship service may want access to the attestation session, and for that reason -//! we expose a `ServiceHandle` which can be used to request a copy of it. -//! -//! In fact, the import notification and request from the block production pipeline may race to be -//! the first one to create the instant, but the import notification will usually win. -//! -//! These attestation sessions are kept live until they are periodically garbage-collected. - -use std::{time::{Duration, Instant}, sync::Arc, pin::Pin}; -use std::collections::HashMap; - -use crate::pipeline::FullOutput; -use sc_client_api::{BlockchainEvents, BlockBackend}; -use consensus::SelectChain; -use futures::{prelude::*, task::{Spawn, SpawnExt}}; -use polkadot_primitives::{Block, Hash, BlockId}; -use polkadot_primitives::parachain::{ - Chain, ParachainHost, Id as ParaId, ValidatorIndex, ValidatorId, ValidatorPair, - CollationInfo, SigningContext, -}; -use keystore::KeyStorePtr; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use runtime_primitives::traits::HashFor; -use availability_store::Store as AvailabilityStore; - -use log::{warn, error, info, debug, trace}; - -use super::{Network, Collators, SharedTable, TableRouter}; -use crate::Error; -use crate::pipeline::ValidationPool; - -/// A handle to spawn background tasks onto. -pub type TaskExecutor = Arc; - -// Remote processes may request for a validation instance to be cloned or instantiated. -// They send a oneshot channel. -type ValidationInstanceRequest = ( - Hash, - futures::channel::oneshot::Sender>, -); - -/// A handle to a single instance of parachain validation, which is pinned to -/// a specific relay-chain block. This is the instance that should be used when -/// constructing any -#[derive(Clone)] -pub(crate) struct ValidationInstanceHandle { - table: Arc, - started: Instant, -} - -impl ValidationInstanceHandle { - /// Access the underlying table of attestations on parachain candidates. - pub(crate) fn table(&self) -> &Arc { - &self.table - } - - /// The moment we started this validation instance. - pub(crate) fn started(&self) -> Instant { - self.started.clone() - } -} - -/// A handle to the service. This can be used to create a block-production environment. -#[derive(Clone)] -pub struct ServiceHandle { - sender: futures::channel::mpsc::Sender, -} - -impl ServiceHandle { - /// Requests instantiation or cloning of a validation instance from the service. - /// - /// This can fail if the service task has shut down for some reason. - pub(crate) async fn get_validation_instance(self, relay_parent: Hash) - -> Result - { - let mut sender = self.sender; - let instance_rx = loop { - let (instance_tx, instance_rx) = futures::channel::oneshot::channel(); - match sender.send((relay_parent, instance_tx)).await { - Ok(()) => break instance_rx, - Err(e) => if !e.is_full() { - // Sink::send should be doing `poll_ready` before start-send, - // so this should only happen when there is a race. - return Err(Error::ValidationServiceDown) - }, - } - }; - - instance_rx.map_err(|_| Error::ValidationServiceDown).await.and_then(|x| x) - } -} - -fn interval(duration: Duration) -> impl Stream + Send + Unpin { - stream::unfold((), move |_| { - futures_timer::Delay::new(duration).map(|_| Some(((), ()))) - }).map(drop) -} - -/// A builder for the validation service. -pub struct ServiceBuilder { - /// The underlying blockchain client. - pub client: Arc

, - /// A handle to the network object used to communicate. - pub network: N, - /// A handle to the collator pool we are using. - pub collators: C, - /// A handle to a background executor. - pub spawner: SP, - /// A handle to the availability store. - pub availability_store: AvailabilityStore, - /// A chain selector for determining active leaves in the block-DAG. - pub select_chain: SC, - /// The keystore which holds the signing keys. - pub keystore: KeyStorePtr, - /// The maximum block-data size in bytes. - pub max_block_data_size: Option, -} - -impl ServiceBuilder where - C: Collators + Send + Sync + Unpin + 'static, - C::Collation: Send + Unpin + 'static, - P: BlockchainEvents + BlockBackend, - P: ProvideRuntimeApi + Send + Sync + 'static, - P::Api: ParachainHost, - N: Network + Send + Sync + 'static, - N::TableRouter: Send + 'static + Sync, - N::BuildTableRouter: Send + Unpin + 'static, - ::SendLocalCollation: Send, - SC: SelectChain + 'static, - SP: Spawn + Send + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, -{ - /// Build the service - this consists of a handle to it, as well as a background - /// future to be run to completion. - pub fn build(self) -> (ServiceHandle, impl Future + Send + 'static) { - const TIMER_INTERVAL: Duration = Duration::from_secs(30); - const CHAN_BUFFER: usize = 10; - - enum Message { - CollectGarbage, - // relay-parent, receiver for instance. - RequestInstance(ValidationInstanceRequest), - // new chain heads - import notification. - NotifyImport(sc_client_api::BlockImportNotification), - } - - let validation_pool = Some(ValidationPool::new()); - let mut parachain_validation = ParachainValidationInstances { - client: self.client.clone(), - network: self.network, - spawner: self.spawner, - availability_store: self.availability_store, - live_instances: HashMap::new(), - validation_pool: validation_pool.clone(), - collation_fetch: DefaultCollationFetch(self.collators, validation_pool), - }; - - let client = self.client; - let select_chain = self.select_chain; - let keystore = self.keystore; - let max_block_data_size = self.max_block_data_size; - - let (tx, rx) = futures::channel::mpsc::channel(CHAN_BUFFER); - let interval = interval(TIMER_INTERVAL).map(|_| Message::CollectGarbage); - let import_notifications = client.import_notification_stream().map(Message::NotifyImport); - let instance_requests = rx.map(Message::RequestInstance); - let service = ServiceHandle { sender: tx }; - - let background_work = async move { - let message_stream = futures::stream::select(interval, instance_requests); - let mut message_stream = futures::stream::select(import_notifications, message_stream); - while let Some(message) = message_stream.next().await { - match message { - Message::CollectGarbage => { - match select_chain.leaves() { - Ok(leaves) => { - parachain_validation.retain(|h| leaves.contains(h)); - } - Err(e) => { - warn!("Error fetching leaves from client: {:?}", e); - } - } - } - Message::RequestInstance((relay_parent, sender)) => { - // Upstream will handle the failure case. - let _ = sender.send(parachain_validation.get_or_instantiate( - relay_parent, - &keystore, - max_block_data_size, - ).await); - } - Message::NotifyImport(notification) => { - let relay_parent = notification.hash; - if notification.is_new_best { - let res = parachain_validation.get_or_instantiate( - relay_parent, - &keystore, - max_block_data_size, - ).await; - - if let Err(e) = res { - warn!( - "Unable to start parachain validation on top of {:?}: {}", - relay_parent, e - ); - } - } - } - } - } - }; - - (service, background_work) - } -} - -/// Abstraction over `collation_fetch`. -pub(crate) trait CollationFetch { - /// Error type used by `collation_fetch`. - type Error: std::fmt::Debug; - - /// Fetch a collation for the given `parachain`. - fn collation_fetch

( - self, - parachain: ParaId, - relay_parent: Hash, - client: Arc

, - max_block_data_size: Option, - n_validators: usize, - ) -> Pin> + Send>> - where - P::Api: ParachainHost, - P: ProvideRuntimeApi + Send + Sync + 'static; -} - -#[derive(Clone)] -struct DefaultCollationFetch(C, Option); -impl CollationFetch for DefaultCollationFetch - where - C: Collators + Send + Sync + Unpin + 'static, - C::Collation: Send + Unpin + 'static, -{ - type Error = C::Error; - - fn collation_fetch

( - self, - parachain: ParaId, - relay_parent: Hash, - client: Arc

, - max_block_data_size: Option, - n_validators: usize, - ) -> Pin> + Send>> - where - P::Api: ParachainHost, - P: ProvideRuntimeApi + Send + Sync + 'static, - { - let DefaultCollationFetch(collators, validation_pool) = self; - crate::collation::collation_fetch( - validation_pool, - parachain, - relay_parent, - collators, - client, - max_block_data_size, - n_validators, - ).boxed() - } -} - -// finds the first key we are capable of signing with out of the given set of validators, -// if any. -fn signing_key(validators: &[ValidatorId], keystore: &KeyStorePtr) -> Option> { - let keystore = keystore.read(); - validators.iter() - .find_map(|v| { - keystore.key_pair::(&v).ok() - }) - .map(|pair| Arc::new(pair)) -} - -/// A live instance that is related to a relay chain validation round. -/// -/// It stores the `instance_handle` and the `_table_router`. -struct LiveInstance { - instance_handle: ValidationInstanceHandle, - /// Make sure we keep the table router alive, to respond/receive consensus messages. - _table_router: TR, -} - -/// Constructs parachain-agreement instances. -pub(crate) struct ParachainValidationInstances { - /// The client instance. - client: Arc

, - /// The backing network handle. - network: N, - /// handle to spawner - spawner: SP, - /// Store for extrinsic data. - availability_store: AvailabilityStore, - /// Live agreements. Maps relay chain parent hashes to attestation - /// instances. - live_instances: HashMap>, - /// The underlying validation pool of processes to use. - /// Only `None` in tests. - validation_pool: Option, - /// Used to fetch a collation. - collation_fetch: CF, -} - -impl ParachainValidationInstances where - N: Network, - N::Error: 'static, - P: ProvideRuntimeApi + Send + Sync + 'static, - P::Api: ParachainHost, - N::TableRouter: Send + 'static + Sync, - ::SendLocalCollation: Send, - N::BuildTableRouter: Unpin + Send + 'static, - SP: Spawn + Send + 'static, - CF: CollationFetch + Clone + Send + Sync + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - sp_api::StateBackendFor: sp_api::StateBackend>, -{ - /// Get an attestation table for given parent hash. - /// - /// This starts a parachain agreement process on top of the parent hash if - /// one has not already started. - /// - /// Additionally, this will trigger broadcast of data to the new block's duty - /// roster. - async fn get_or_instantiate( - &mut self, - parent_hash: Hash, - keystore: &KeyStorePtr, - max_block_data_size: Option, - ) -> Result { - use primitives::Pair; - - if let Some(instance) = self.live_instances.get(&parent_hash) { - return Ok(instance.instance_handle.clone()); - } - - let id = BlockId::hash(parent_hash); - - let validators = self.client.runtime_api().validators(&id)?; - let sign_with = signing_key(&validators[..], keystore); - - let duty_roster = self.client.runtime_api().duty_roster(&id)?; - - let (group_info, local_duty) = crate::make_group_info( - duty_roster, - &validators, - sign_with.as_ref().map(|k| k.public()), - )?; - - info!( - "Starting parachain attestation session on top of parent {:?}. Local parachain duty is {:?}", - parent_hash, - local_duty, - ); - - let active_parachains = self.client.runtime_api().active_parachains(&id)?; - - debug!(target: "validation", "Active parachains: {:?}", active_parachains); - - // If we are a validator, we need to store our index in this round in availability store. - // This will tell which erasure chunk we should store. - if let Some(ref local_duty) = local_duty { - if let Err(e) = self.availability_store.note_validator_index_and_n_validators( - &parent_hash, - local_duty.index, - validators.len() as u32, - ) { - warn!( - target: "validation", - "Failed to add validator index and n_validators to the availability-store: {:?}", e - ) - } - } - - let api = self.client.runtime_api(); - - let signing_context = if api.has_api_with::, _>( - &BlockId::hash(parent_hash), - |version| version >= 3, - )? { - api.signing_context(&id)? - } else { - trace!( - target: "validation", - "Expected runtime with ParachainHost version >= 3", - ); - SigningContext { - session_index: 0, - parent_hash, - } - }; - - let table = Arc::new(SharedTable::new( - validators.clone(), - group_info, - sign_with, - signing_context, - self.availability_store.clone(), - max_block_data_size, - self.validation_pool.clone(), - )); - - // The router will join the consensus gossip network. This is important - // to receive messages sent for the current round. - let router = match self.network.build_table_router( - table.clone(), - &validators, - ).await { - Ok(res) => res, - Err(e) => { - warn!(target: "validation", "Failed to build router: {:?}", e); - return Err(Error::CouldNotBuildTableRouter(format!("{:?}", e))) - } - }; - - if let Some((Chain::Parachain(id), index)) = local_duty.map(|d| (d.validation, d.index)) { - let n_validators = validators.len(); - let availability_store = self.availability_store.clone(); - let client = self.client.clone(); - let collation_fetch = self.collation_fetch.clone(); - let router = router.clone(); - - let res = self.spawner.spawn( - launch_work( - move || collation_fetch.collation_fetch(id, parent_hash, client, max_block_data_size, n_validators), - availability_store, - router, - n_validators, - index, - ), - ); - - if let Err(e) = res { - error!(target: "validation", "Failed to launch work: {:?}", e); - } - } - - let tracker = ValidationInstanceHandle { - table, - started: Instant::now(), - }; - - let live_instance = LiveInstance { - instance_handle: tracker.clone(), - _table_router: router, - }; - self.live_instances.insert(parent_hash, live_instance); - - Ok(tracker) - } - - /// Retain validation sessions matching predicate. - fn retain bool>(&mut self, mut pred: F) { - self.live_instances.retain(|k, _| pred(k)) - } -} - -// launch parachain work asynchronously. -async fn launch_work( - collation_fetch: impl FnOnce() -> CFF, - availability_store: AvailabilityStore, - router: impl TableRouter, - n_validators: usize, - local_id: ValidatorIndex, -) where - E: std::fmt::Debug, - CFF: Future> + Send, -{ - // fetch a local collation from connected collators. - let (collation_info, full_output) = match collation_fetch().await { - Ok(res) => res, - Err(e) => { - warn!(target: "validation", "Failed to collate candidate: {:?}", e); - return - } - }; - - let crate::pipeline::FullOutput { - commitments, - erasure_chunks, - available_data, - .. - } = full_output; - - let receipt = collation_info.into_receipt(commitments); - let pov_block = available_data.pov_block.clone(); - - if let Err(e) = availability_store.make_available( - receipt.hash(), - available_data, - ).await { - warn!( - target: "validation", - "Failed to make parachain block data available: {}", - e, - ); - } - - if let Err(e) = availability_store.clone().add_erasure_chunks( - receipt.clone(), - n_validators as _, - erasure_chunks.clone(), - ).await { - warn!(target: "validation", "Failed to add erasure chunks: {}", e); - } - - if let Err(e) = router.local_collation( - receipt, - pov_block, - (local_id, &erasure_chunks), - ).await { - warn!(target: "validation", "Failed to send local collation: {:?}", e); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use futures::{executor::{ThreadPool, self}, future::ready, channel::mpsc}; - use availability_store::ErasureNetworking; - use polkadot_primitives::parachain::{ - PoVBlock, AbridgedCandidateReceipt, ErasureChunk, ValidatorIndex, - CollationInfo, DutyRoster, GlobalValidationSchedule, LocalValidationData, - Retriable, CollatorId, BlockData, Chain, AvailableData, SigningContext, ValidationCode, - }; - use runtime_primitives::traits::Block as BlockT; - use std::pin::Pin; - use sp_keyring::sr25519::Keyring; - - /// Events fired while running mock implementations to follow execution. - enum Events { - BuildTableRouter, - CollationFetch, - LocalCollation, - } - - #[derive(Clone)] - struct MockNetwork(mpsc::UnboundedSender); - - impl Network for MockNetwork { - type Error = String; - type TableRouter = MockTableRouter; - type BuildTableRouter = Pin> + Send>>; - - fn build_table_router( - &self, - _: Arc, - _: &[ValidatorId], - ) -> Self::BuildTableRouter { - let event_sender = self.0.clone(); - async move { - event_sender.unbounded_send(Events::BuildTableRouter).expect("Send `BuildTableRouter`"); - - Ok(MockTableRouter(event_sender)) - }.boxed() - } - } - - #[derive(Clone)] - struct MockTableRouter(mpsc::UnboundedSender); - - impl TableRouter for MockTableRouter { - type Error = String; - type SendLocalCollation = Pin> + Send>>; - type FetchValidationProof = Box> + Unpin>; - - fn local_collation( - &self, - _: AbridgedCandidateReceipt, - _: PoVBlock, - _: (ValidatorIndex, &[ErasureChunk]), - ) -> Self::SendLocalCollation { - let sender = self.0.clone(); - - async move { - sender.unbounded_send(Events::LocalCollation).expect("Send `LocalCollation`"); - - Ok(()) - }.boxed() - } - - fn fetch_pov_block(&self, _: &AbridgedCandidateReceipt) -> Self::FetchValidationProof { - unimplemented!("Not required in tests") - } - } - - #[derive(Clone)] - struct MockErasureNetworking; - - impl ErasureNetworking for MockErasureNetworking { - type Error = String; - - fn fetch_erasure_chunk( - &self, - _: &Hash, - _: u32, - ) -> Pin> + Send>> { - ready(Err("Not required in tests".to_string())).boxed() - } - - fn distribute_erasure_chunk(&self, _: Hash, _: ErasureChunk) { - unimplemented!("Not required in tests") - } - } - - #[derive(Clone)] - struct MockCollationFetch(mpsc::UnboundedSender); - - impl CollationFetch for MockCollationFetch { - type Error = (); - - fn collation_fetch

( - self, - parachain: ParaId, - relay_parent: Hash, - _: Arc

, - _: Option, - n_validators: usize, - ) -> Pin> + Send>> { - let info = CollationInfo { - parachain_index: parachain, - relay_parent, - collator: Default::default(), - signature: Default::default(), - head_data: Default::default(), - pov_block_hash: Default::default(), - }; - - let available_data = AvailableData { - pov_block: PoVBlock { block_data: BlockData(Vec::new()) }, - omitted_validation: Default::default(), - }; - - let full_output = FullOutput { - available_data, - commitments: Default::default(), - erasure_chunks: Default::default(), - n_validators, - }; - - let sender = self.0; - - async move { - sender.unbounded_send(Events::CollationFetch).expect("`CollationFetch` event send"); - - Ok((info, full_output)) - }.boxed() - } - } - - #[derive(Clone)] - struct MockRuntimeApi { - validators: Vec, - duty_roster: DutyRoster, - } - - impl ProvideRuntimeApi for MockRuntimeApi { - type Api = Self; - - fn runtime_api<'a>(&'a self) -> sp_api::ApiRef<'a, Self::Api> { - self.clone().into() - } - } - - sp_api::mock_impl_runtime_apis! { - impl ParachainHost for MockRuntimeApi { - type Error = sp_blockchain::Error; - - fn validators(&self) -> Vec { self.validators.clone() } - fn duty_roster(&self) -> DutyRoster { self.duty_roster.clone() } - fn active_parachains() -> Vec<(ParaId, Option<(CollatorId, Retriable)>)> { vec![(ParaId::from(1), None)] } - fn global_validation_schedule() -> GlobalValidationSchedule { Default::default() } - fn local_validation_data(_: ParaId) -> Option { None } - fn parachain_code(_: ParaId) -> Option { None } - fn get_heads(_: Vec<::Extrinsic>) -> Option> { - None - } - fn signing_context() -> SigningContext { - Default::default() - } - } - } - - #[test] - fn launch_work_is_executed_properly() { - let executor = ThreadPool::new().unwrap(); - let keystore = keystore::Store::new_in_memory(); - - // Make sure `Bob` key is in the keystore, so this mocked node will be a parachain validator. - keystore.write().insert_ephemeral_from_seed::(&Keyring::Bob.to_seed()) - .expect("Insert key into keystore"); - - let validators = vec![ValidatorId::from(Keyring::Alice.public()), ValidatorId::from(Keyring::Bob.public())]; - let validator_duty = vec![Chain::Relay, Chain::Parachain(1.into())]; - let duty_roster = DutyRoster { validator_duty }; - - let (events_sender, events) = mpsc::unbounded(); - - let mut parachain_validation = ParachainValidationInstances { - client: Arc::new(MockRuntimeApi { validators, duty_roster }), - network: MockNetwork(events_sender.clone()), - collation_fetch: MockCollationFetch(events_sender.clone()), - spawner: executor.clone(), - availability_store: AvailabilityStore::new_in_memory(MockErasureNetworking), - live_instances: HashMap::new(), - validation_pool: None, - }; - - executor::block_on(parachain_validation.get_or_instantiate(Default::default(), &keystore, None)) - .expect("Creates new validation round"); - assert!(parachain_validation.live_instances.contains_key(&Default::default())); - - let mut events = executor::block_on_stream(events); - - assert!(matches!(events.next().unwrap(), Events::BuildTableRouter)); - assert!(matches!(events.next().unwrap(), Events::CollationFetch)); - assert!(matches!(events.next().unwrap(), Events::LocalCollation)); - - drop(events_sender); - drop(parachain_validation); - assert!(events.next().is_none()); - } - - #[test] - fn router_is_built_on_relay_chain_validator() { - let executor = ThreadPool::new().unwrap(); - let keystore = keystore::Store::new_in_memory(); - - // Make sure `Alice` key is in the keystore, so this mocked node will be a relay-chain validator. - keystore.write().insert_ephemeral_from_seed::(&Keyring::Alice.to_seed()) - .expect("Insert key into keystore"); - - let validators = vec![ValidatorId::from(Keyring::Alice.public()), ValidatorId::from(Keyring::Bob.public())]; - let validator_duty = vec![Chain::Relay, Chain::Parachain(1.into())]; - let duty_roster = DutyRoster { validator_duty }; - - let (events_sender, events) = mpsc::unbounded(); - - let mut parachain_validation = ParachainValidationInstances { - client: Arc::new(MockRuntimeApi { validators, duty_roster }), - network: MockNetwork(events_sender.clone()), - collation_fetch: MockCollationFetch(events_sender.clone()), - spawner: executor.clone(), - availability_store: AvailabilityStore::new_in_memory(MockErasureNetworking), - live_instances: HashMap::new(), - validation_pool: None, - }; - - executor::block_on(parachain_validation.get_or_instantiate(Default::default(), &keystore, None)) - .expect("Creates new validation round"); - assert!(parachain_validation.live_instances.contains_key(&Default::default())); - - let mut events = executor::block_on_stream(events); - - assert!(matches!(events.next().unwrap(), Events::BuildTableRouter)); - - drop(events_sender); - drop(parachain_validation); - assert!(events.next().is_none()); - } -}