diff --git a/.github/review-bot.yml b/.github/review-bot.yml new file mode 100644 index 0000000000000000000000000000000000000000..c9eadd6e58ba1c5d6facde097fa26816da31f2d7 --- /dev/null +++ b/.github/review-bot.yml @@ -0,0 +1,121 @@ +rules: + - name: CI files + condition: + include: + - ^\.gitlab-ci\.yml + - ^docker/.* + - ^\.github/.* + - ^\.gitlab/.* + - ^\.config/nextest.toml + - ^\.cargo/.* + exclude: + - ^./gitlab/pipeline/zombienet.* + min_approvals: 2 + type: basic + teams: + - ci + - release-engineering + + - name: Audit rules + type: basic + condition: + include: + - ^polkadot/runtime\/(kusama|polkadot|common)\/.* + - ^polkadot/primitives/src\/.+\.rs$ + - ^substrate/primitives/.* + - ^substrate/frame/.* + exclude: + - ^polkadot/runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$ + - ^substrate\/frame\/.+\.md$ + min_approvals: 1 + allowedToSkipRule: + teams: + - core-devs + teams: + - srlabs + + - name: Core developers + countAuthor: true + condition: + include: + - .* + # excluding files from 'Runtime files' and 'CI files' rules + exclude: + - ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$ + - ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$ + - ^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$ + - ^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$ + - ^cumulus/parachains/common/src/[^/]+\.rs$ + - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) + - ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$ + - ^\.gitlab-ci\.yml + - ^docker/.* + - ^\.github/.* + - ^\.gitlab/.* + - ^\.config/nextest.toml + - ^\.cargo/.* + min_approvals: 2 + type: basic + teams: + - core-devs + + # cumulus + - name: Runtime files cumulus + countAuthor: true + condition: + include: + - ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$ + - ^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$ + - ^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$ + - ^cumulus/parachains/common/src/[^/]+\.rs$ + type: and-distinct + reviewers: + - min_approvals: 1 + teams: + - locks-review + - min_approvals: 1 + teams: + - polkadot-review + + # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) + - name: Bridges subtree files + type: basic + condition: + include: + - ^bridges/.* + min_approvals: 1 + teams: + - bridges-core + + # substrate + + - name: FRAME coders substrate + condition: + include: + - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) + type: "and" + reviewers: + - min_approvals: 2 + teams: + - core-devs + - min_approvals: 1 + teams: + - frame-coders + + # Protection of THIS file + - name: Review Bot + condition: + include: + - review-bot\.yml + min_approvals: 2 + type: "and" + reviewers: + - min_approvals: 1 + teams: + - opstooling + - min_approvals: 1 + teams: + - locks-review + - min_approvals: 1 + teams: + - ci diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 4d0afefc47aace08bff9af796f9fb9c30dee99a7..a5c4f2577c0019b91d4ba94da46b2dc7f15e2ec4 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -14,7 +14,7 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-node@v3.8.1 with: node-version: "18.x" diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index f1e46ca273515d9081f9c5a7f6b8b4ff902c444a..be676d3c1e8c85afc4ee39a6427105487c6a5d53 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -13,7 +13,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - uses: actions/setup-node@v3.8.1 with: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index d153184941ac1e05f341ae49c4c8d4e212c0d85b..23ae2d1aca9dc7e9cdaa099402332dfb543e9641 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -40,7 +40,7 @@ jobs: - name: Checkout repo if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac #v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 #v4.1.0 - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index df785404036e138c78cf1818620dd9db8c30a243..db93a3d53a76ceab86879ffa8740df380ddeb2d2 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -16,7 +16,7 @@ jobs: container: image: paritytech/ci-unified:bullseye-1.70.0-2023-05-23-v20230706 steps: - - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Cargo fmt run: cargo +nightly fmt --all -- --check diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index d01d78631e19b71cef7f450808ea707b061b8d82..15631c172b9fe1fe2027d661149615435a3b975d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -79,7 +79,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 #TODO: this step will be needed when automated triggering will work #this step runs only if the workflow is triggered automatically when new release is published @@ -117,7 +117,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Get artifacts from cache uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 @@ -239,7 +239,7 @@ jobs: needs: fetch-latest-debian-package-version steps: - name: Checkout sources - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml new file mode 100644 index 0000000000000000000000000000000000000000..aeb33b5da3d3b89960b2619097bf52614b02d7c7 --- /dev/null +++ b/.github/workflows/review-bot.yml @@ -0,0 +1,31 @@ +name: Review PR +on: + pull_request_target: + types: + - opened + - reopened + - synchronize + - review_requested + - review_request_removed + - ready_for_review + pull_request_review: + +permissions: + contents: read + +jobs: + review-approvals: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: team_token + uses: tibdex/github-app-token@v1 + with: + app_id: ${{ secrets.REVIEW_APP_ID }} + private_key: ${{ secrets.REVIEW_APP_KEY }} + - name: "Evaluates PR reviews and assigns reviewers" + uses: paritytech/review-bot@v1.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + team-token: ${{ steps.team_token.outputs.token }} + checks-token: ${{ steps.team_token.outputs.token }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 10dd69f12a77c27a7d3de083f5063bd9f6a0e931..ee6b9c98733396be991c5af3305918431e267742 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.67" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.68" DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 5b53798c403dd3ad82d908f7d7fe32956bcd6838..029c0f6a3cddd168edd83e645022f29155f9136a 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -19,12 +19,18 @@ build-linux-stable: RUN_UI_TESTS: 1 script: - time cargo build --locked --profile testnet --features pyroscope,fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker + - time ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-10/ + - time ROCOCO_EPOCH_DURATION=100 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-100/ + - time ROCOCO_EPOCH_DURATION=600 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-600/ + - pwd + - ls -alR runtimes # pack artifacts - mkdir -p ./artifacts - VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name - mv ./target/testnet/polkadot ./artifacts/. - mv ./target/testnet/polkadot-prepare-worker ./artifacts/. - mv ./target/testnet/polkadot-execute-worker ./artifacts/. + - mv ./runtimes/ ./artifacts/. - pushd artifacts - sha256sum polkadot | tee polkadot.sha256 - shasum -c polkadot.sha256 diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 5b0ea276773dd28e5e86a18a6b3c10b3bd103cde..5bfe4b729e6204a9899e6b1abd0c70d5a1f400f4 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -15,6 +15,12 @@ short-benchmark-westend: &short-bench artifacts: true variables: RUNTIME: westend + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" tags: - benchmark script: @@ -32,6 +38,12 @@ short-benchmark-westend: &short-bench artifacts: true variables: RUNTIME_CHAIN: benchmarked-runtime-chain + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" tags: - benchmark script: diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 3a4f5e71247ca33fa1d3655d1f1a01fd0dffb3bf..ad0ef4d9b8e66233a45716fe318c792cc8acd557 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -314,8 +314,8 @@ node-bench-regression-guard: --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA" after_script: [""] -# if this fails (especially after rust version upgrade) run -# ./substrate/.maintain/update-rust-stable.sh <rust_version> +# if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally +# see ./docs/CONTRIBUTING.md#ui-tests test-frame-ui: stage: test extends: diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3347eda1baae5ebb943ac2c2250154f69e86db56..3f2c6f64fbfe120ae15bf394982d6dbdd29405b0 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -5,7 +5,7 @@ before_script: - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - - echo "${RELAY_IMAGE}" + - echo "${POLKADOT_IMAGE}" - echo "${COL_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index e420baf486aa9bd3fa22c4127f1d773e44b98d8c..0402c194134b78256aaee00999e87c7a18397bf0 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -110,7 +110,7 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" @@ -127,12 +127,12 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - - export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image + - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" - echo "local-dir ${LOCAL_DIR}" - echo "polkadot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - - echo "colander image ${COL_IMAGE}" + - echo "polkadot-parachain image ${CUMULUS_IMAGE}" - echo "malus image ${MALUS_IMAGE}" script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -193,27 +193,3 @@ zombienet-polkadot-malus-0001-dispute-valid: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh --local-dir="${LOCAL_DIR}/integrationtests" --test="0001-dispute-valid-block.zndsl" - -zombienet-polkadot-async-backing-compatibility: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="001-async-backing-compatibility.zndsl" - -zombienet-polkadot-async-backing-runtime-upgrade: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="002-async-backing-runtime-upgrade.zndsl" - -zombienet-polkadot-async-backing-collator-mix: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/async_backing" - --test="003-async-backing-collator-mix.zndsl" diff --git a/Cargo.lock b/Cargo.lock index eab134cdd85ac5f38838dcbde59c4bd4ae5703c1..fdbae9851682bb50282453f679b10cc128f864d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead 0.5.2", "aes 0.8.3", @@ -774,6 +774,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -866,6 +867,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -961,6 +963,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -1149,7 +1152,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -1190,7 +1193,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -1859,6 +1862,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -1922,6 +1926,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -2021,6 +2026,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -2636,6 +2642,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -2846,6 +2853,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -4367,9 +4375,9 @@ dependencies = [ [[package]] name = "directories" -version = "4.0.1" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" dependencies = [ "dirs-sys", ] @@ -4386,13 +4394,14 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.7" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", + "option-ext", "redox_users", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -5592,7 +5601,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "waker-fn", ] @@ -5649,7 +5658,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "pin-utils", "slab", ] @@ -5814,6 +5823,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -6057,7 +6067,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", ] [[package]] @@ -6100,7 +6110,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "socket2 0.4.9", "tokio", "tower-service", @@ -6810,6 +6820,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-grandpa", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-offchain", @@ -8447,6 +8458,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-grandpa", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -8694,6 +8706,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "orchestra" version = "0.3.3" @@ -10912,6 +10930,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -11244,6 +11263,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -11346,9 +11366,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -12015,6 +12035,7 @@ version = "1.0.0" dependencies = [ "always-assert", "assert_matches", + "cfg-if", "futures", "futures-timer", "hex-literal", @@ -12071,6 +12092,7 @@ name = "polkadot-node-core-pvf-common" version = "1.0.0" dependencies = [ "assert_matches", + "cfg-if", "cpu-time", "futures", "landlock", @@ -12112,6 +12134,7 @@ dependencies = [ name = "polkadot-node-core-pvf-prepare-worker" version = "1.0.0" dependencies = [ + "cfg-if", "futures", "libc", "parity-scale-codec", @@ -12626,6 +12649,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -13054,6 +13078,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -13150,7 +13175,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "windows-sys 0.48.0", ] @@ -13596,9 +13621,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31999cfc7927c4e212e60fd50934ab40e8e8bfd2d493d6095d2d306bc0764d9" +checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" dependencies = [ "bytes", "rand 0.8.5", @@ -13929,7 +13954,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "rustls 0.21.6", "rustls-pemfile", "serde", @@ -14068,6 +14093,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -14105,6 +14131,7 @@ dependencies = [ "pallet-bounties", "pallet-child-bounties", "pallet-collective", + "pallet-conviction-voting", "pallet-democracy", "pallet-elections-phragmen", "pallet-grandpa", @@ -14119,7 +14146,9 @@ dependencies = [ "pallet-offences", "pallet-preimage", "pallet-proxy", + "pallet-ranked-collective", "pallet-recovery", + "pallet-referenda", "pallet-scheduler", "pallet-session", "pallet-society", @@ -14133,6 +14162,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", + "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", @@ -14148,11 +14178,13 @@ dependencies = [ "serde_json", "smallvec", "sp-api", + "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -15357,6 +15389,7 @@ dependencies = [ "substrate-test-runtime-client", "thiserror", "tokio", + "tokio-stream", ] [[package]] @@ -16102,6 +16135,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -16349,6 +16383,7 @@ dependencies = [ "sp-block-builder", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-offchain", "sp-runtime", @@ -17437,7 +17472,7 @@ dependencies = [ name = "sp-statement-store" version = "4.0.0-dev" dependencies = [ - "aes-gcm 0.10.2", + "aes-gcm 0.10.3", "curve25519-dalek 4.0.0", "ed25519-dalek", "hkdf", @@ -17763,6 +17798,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", @@ -18768,7 +18804,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "signal-hook-registry", "socket2 0.5.3", "tokio-macros", @@ -18814,7 +18850,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tokio-util", ] @@ -18854,7 +18890,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tokio", "tracing", ] @@ -18926,7 +18962,7 @@ dependencies = [ "http", "http-body", "http-range-header", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tower-layer", "tower-service", ] @@ -18951,7 +18987,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.13", "tracing-attributes", "tracing-core", ] @@ -20066,7 +20102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a00f4242f2db33307347bd5be53263c52a0331c96c14292118c9a6bb48d267" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.2", + "aes-gcm 0.10.3", "async-trait", "bincode", "block-modes", @@ -20236,6 +20272,7 @@ dependencies = [ "pallet-beefy", "pallet-beefy-mmr", "pallet-collective", + "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", @@ -20257,6 +20294,7 @@ dependencies = [ "pallet-preimage", "pallet-proxy", "pallet-recovery", + "pallet-referenda", "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", @@ -20272,6 +20310,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", + "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "parity-scale-codec", @@ -20287,11 +20326,13 @@ dependencies = [ "smallvec", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-babe", "sp-consensus-beefy", "sp-core", + "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", diff --git a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs index c1dbc6db36f644c3d123a8c3c5930fce31973146..cd281324ee55fa9a0c4bfcfe454b34ecba9bfd07 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-cumulus/src/lib.rs @@ -52,7 +52,7 @@ pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// This is a copy-paste from the cumulus repo's `parachains-common` crate. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, 0) .saturating_div(2) - .set_proof_size(polkadot_primitives::v5::MAX_POV_SIZE as u64); + .set_proof_size(polkadot_primitives::MAX_POV_SIZE as u64); /// All cumulus bridge hubs assume that about 5 percent of the block weight is consumed by /// `on_initialize` handlers. This is used to limit the maximal weight of a single extrinsic. diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 39eda5075e29e9477ced48a37db3d45e00c67705..bc8d0d430c774bb3255f9494cd1fdced5a2d944d 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -41,6 +41,3 @@ metered = { package = "prioritized-metered-channel", version = "0.5.1", default- # Cumulus cumulus-test-service = { path = "../../test/service" } - -[features] -network-protocol-staging = [ "polkadot-service/network-protocol-staging" ] diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 39056d6b6511666c4d71be224914ad3da6e30d5d..226474d3d38cd1b34c6d1304855338bf3c6f4ff9 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -41,7 +41,3 @@ tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" -[features] -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 57e16bc4283ce00eea494fb1e4c66d08008bc73d..3f4c08ecbb8341c0c758cb60c7eb3063d236e295 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -22,8 +22,8 @@ use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ + async_backing::{AsyncBackingParams, BackingState}, slashing, - vstaging::{AsyncBackingParams, BackingState}, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -346,16 +346,16 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_minimum_backing_votes(at, session_index).await?) } - async fn staging_async_backing_params(&self, at: Hash) -> Result<AsyncBackingParams, ApiError> { - Ok(self.rpc_client.parachain_host_staging_async_backing_params(at).await?) + async fn async_backing_params(&self, at: Hash) -> Result<AsyncBackingParams, ApiError> { + Ok(self.rpc_client.parachain_host_async_backing_params(at).await?) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: cumulus_primitives_core::ParaId, ) -> Result<Option<BackingState>, ApiError> { - Ok(self.rpc_client.parachain_host_staging_para_backing_state(at, para_id).await?) + Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index bea2fc330a24e3c9875f2359cde5f4388de0e6ae..a83a18f7cd965743cdb4490c3966b6ca8b6b7272 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -30,7 +30,7 @@ use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ v1::{self, AvailableDataFetchingRequest}, - vstaging, IncomingRequestReceiver, ReqProtocolNames, + v2, IncomingRequestReceiver, ReqProtocolNames, }, }; use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; @@ -63,9 +63,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> { pub authority_discovery_service: AuthorityDiscoveryService, /// Receiver for collation request protocol v1. pub collation_req_receiver_v1: IncomingRequestReceiver<v1::CollationFetchingRequest>, - /// Receiver for collation request protocol vstaging. - pub collation_req_receiver_vstaging: - IncomingRequestReceiver<vstaging::CollationFetchingRequest>, + /// Receiver for collation request protocol v2. + pub collation_req_receiver_v2: IncomingRequestReceiver<v2::CollationFetchingRequest>, /// Receiver for availability request protocol pub available_data_req_receiver: IncomingRequestReceiver<AvailableDataFetchingRequest>, /// Prometheus registry, commonly used for production systems, less so for test. @@ -88,7 +87,7 @@ fn build_overseer( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry, spawner, @@ -121,7 +120,7 @@ fn build_overseer( peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_receiver_v1, - request_receiver_vstaging: collation_req_receiver_vstaging, + request_receiver_v2: collation_req_receiver_v2, metrics: Metrics::register(registry)?, }; CollatorProtocolSubsystem::new(side) diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 366d428eda70036203f48e253c9d395c58b887ec..08e4e8e34aba4219382b4554bfb8ff97762c2009 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -23,7 +23,7 @@ use polkadot_network_bridge::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1, vstaging, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, + v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, }, }; @@ -182,7 +182,7 @@ async fn new_minimal_relay_chain( } let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) = + let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client @@ -212,7 +212,7 @@ async fn new_minimal_relay_chain( sync_oracle, authority_discovery_service, collation_req_receiver_v1, - collation_req_receiver_vstaging, + collation_req_receiver_v2, available_data_req_receiver, registry: prometheus_registry.as_ref(), spawner: task_manager.spawn_handle(), @@ -234,13 +234,13 @@ fn build_request_response_protocol_receivers( config: &mut FullNetworkConfiguration, ) -> ( IncomingRequestReceiver<v1::CollationFetchingRequest>, - IncomingRequestReceiver<vstaging::CollationFetchingRequest>, + IncomingRequestReceiver<v2::CollationFetchingRequest>, IncomingRequestReceiver<v1::AvailableDataFetchingRequest>, ) { let (collation_req_receiver_v1, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_vstaging, cfg) = + let (collation_req_receiver_v2, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -248,5 +248,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_vstaging, available_data_req_receiver) + (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index c1e92b249d776126f8de30db2b021898e50219fc..b1fd7d1ab7d9cc4b9805c449d1b207d5126a8e5b 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -30,9 +30,8 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ - slashing, - vstaging::{AsyncBackingParams, BackingState}, - BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + async_backing::{AsyncBackingParams, BackingState}, + slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -599,30 +598,22 @@ impl RelayChainRpcClient { } #[allow(missing_docs)] - pub async fn parachain_host_staging_async_backing_params( + pub async fn parachain_host_async_backing_params( &self, at: RelayHash, ) -> Result<AsyncBackingParams, RelayChainError> { - self.call_remote_runtime_function( - "ParachainHost_staging_async_backing_params", - at, - None::<()>, - ) - .await + self.call_remote_runtime_function("ParachainHost_async_backing_params", at, None::<()>) + .await } #[allow(missing_docs)] - pub async fn parachain_host_staging_para_backing_state( + pub async fn parachain_host_para_backing_state( &self, at: RelayHash, para_id: ParaId, ) -> Result<Option<BackingState>, RelayChainError> { - self.call_remote_runtime_function( - "ParachainHost_staging_para_backing_state", - at, - Some(para_id), - ) - .await + self.call_remote_runtime_function("ParachainHost_para_backing_state", at, Some(para_id)) + .await } fn send_register_message_to_worker( diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index b53bdbdfc815665ea5ae193a4823c3b9f7b98874..b7c274ceecdcbb524ee57d228ef475699402ea49 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -40,8 +40,3 @@ cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } -[features] -network-protocol-staging = [ - "cumulus-relay-chain-inprocess-interface/network-protocol-staging", - "cumulus-relay-chain-minimal-node/network-protocol-staging", -] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index a7e59a61c9be2a74025eb3014a11f1632f73ff15..a8f0a49223f53d79a58c26c881fec08874bfb2d6 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1447,7 +1447,7 @@ impl<T: Config> Pallet<T> { hrmp_max_message_num_per_candidate: 2, validation_upgrade_cooldown: 2, validation_upgrade_delay: 2, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 223a78dacc49f5f479c7e351a7ee53768535b4ee..114b25d12611528cce567b8b52ef1a025f596f32 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -89,7 +89,4 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] -network-protocol-staging = [ - "cumulus-client-service/network-protocol-staging", - "polkadot-cli/network-protocol-staging", -] + diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 4e51f16dca168fbcc94309a055e27908e1420ef7..68db2f041ddb3019b62cfbcb1478ee3ee13c6cea 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -44,6 +44,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false} sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} @@ -111,6 +112,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index 038597096f6a56ca7fda99b84a1d4b67e5ef4a26..b9bf97d7786f6c27736929ad5b408a57f9f64a37 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -28,6 +28,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ @@ -742,6 +743,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml index 1038ec8dc42b314e4d46d744aeba9afde5de38a4..17a16d9ccd7da8e0d7fc53973987bb0ed117705b 100644 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml +++ b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml @@ -37,7 +37,12 @@ tests: ] events: - name: hrmp.HrmpChannelForceOpened - result: [*cp_id, *sp_id, *hrmp_proposed_max_capacity, *hrmp_proposed_max_message_size] + result: { + sender: *cp_id, + recipient: *sp_id, + proposed_max_capacity: *hrmp_proposed_max_capacity, + proposed_max_message_size: *hrmp_proposed_max_message_size + } - name: Force Open HRMP Channel From AssetHub Parachain → Collectives Parachain its: - name: Alice calls hrmp.forceOpenHrmpChannel @@ -56,4 +61,9 @@ tests: ] events: - name: hrmp.HrmpChannelForceOpened - result: [*sp_id, *cp_id, *hrmp_proposed_max_capacity, *hrmp_proposed_max_message_size] + result: { + sender: *sp_id, + recipient: *cp_id, + proposed_max_capacity: *hrmp_proposed_max_capacity, + proposed_max_message_size: *hrmp_proposed_max_message_size + } diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs index 7bb64333db52fbee59ba5d83bbf9cfab841c1220..bf583ae33f844ab6717afa09fb65d72a087a3b54 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-kusama/src/tests/hrmp_channels.rs @@ -79,9 +79,12 @@ fn open_hrmp_channel_between_paras_works() { }, // Open channel requested from Para A to Para B RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::OpenChannelRequested( - sender, recipient, max_capacity, max_message_size - ) + polkadot_runtime_parachains::hrmp::Event::OpenChannelRequested { + sender, + recipient, + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size + } ) => { sender: *sender == para_a_id.into(), recipient: *recipient == para_b_id.into(), @@ -133,9 +136,9 @@ fn open_hrmp_channel_between_paras_works() { }, // Open channel accepted for Para A to Para B RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::OpenChannelAccepted( + polkadot_runtime_parachains::hrmp::Event::OpenChannelAccepted { sender, recipient - ) + } ) => { sender: *sender == para_a_id.into(), recipient: *recipient == para_b_id.into(), @@ -175,9 +178,12 @@ fn force_open_hrmp_channel_for_system_para_works() { vec![ // HRMP channel forced opened RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened( - sender, recipient, max_capacity, max_message_size - ) + polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened{ + sender, + recipient, + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size + } ) => { sender: *sender == system_para_id.into(), recipient: *recipient == para_a_id.into(), diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs index a6286c619f64ced17ceddcab7d3dfc44b2e862d1..e5bce267b903f112984924c4f7b426d9c742da18 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs +++ b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/hrmp_channels.rs @@ -79,9 +79,9 @@ fn open_hrmp_channel_between_paras_works() { }, // Open channel requested from Para A to Para B RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::OpenChannelRequested( - sender, recipient, max_capacity, max_message_size - ) + polkadot_runtime_parachains::hrmp::Event::OpenChannelRequested { + sender, recipient, proposed_max_capacity: max_capacity, proposed_max_message_size: max_message_size + } ) => { sender: *sender == para_a_id.into(), recipient: *recipient == para_b_id.into(), @@ -133,9 +133,9 @@ fn open_hrmp_channel_between_paras_works() { }, // Open channel accepted for Para A to Para B RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::OpenChannelAccepted( + polkadot_runtime_parachains::hrmp::Event::OpenChannelAccepted { sender, recipient - ) + } ) => { sender: *sender == para_a_id.into(), recipient: *recipient == para_b_id.into(), @@ -175,9 +175,9 @@ fn force_open_hrmp_channel_for_system_para_works() { vec![ // HRMP channel forced opened RuntimeEvent::Hrmp( - polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened( - sender, recipient, max_capacity, max_message_size - ) + polkadot_runtime_parachains::hrmp::Event::HrmpChannelForceOpened{ + sender, recipient, proposed_max_capacity: max_capacity, proposed_max_message_size: max_message_size + } ) => { sender: *sender == system_para_id.into(), recipient: *recipient == para_a_id.into(), diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 7461165f2a198e8f8b871850545bf50f6e37e885..2804128ec014fbc0aef7e0e7245c1e92378ae920 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -64,7 +64,7 @@ decl_test_relay_chains! { Hrmp: kusama_runtime::Hrmp, } }, - #[api_version(6)] + #[api_version(7)] pub struct Westend { genesis = westend::genesis(), on_init = (), @@ -79,7 +79,7 @@ decl_test_relay_chains! { Balances: westend_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Rococo { genesis = rococo::genesis(), on_init = (), @@ -94,7 +94,7 @@ decl_test_relay_chains! { Balances: rococo_runtime::Balances, } }, - #[api_version(5)] + #[api_version(7)] pub struct Wococo { genesis = rococo::genesis(), on_init = (), diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml index 1d0c7bc98563417169a3a17586de695e1ba36a3a..4853195d329aeb76210c8a3ac632bdd0565478b1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml @@ -42,6 +42,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -209,6 +210,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 828d1b4750a3e0436bc18f7926489a04a4b2bb9e..40ce122112d296d538496cb216ad33602d58e9b9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -52,6 +52,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, @@ -1340,6 +1341,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml index 4a6ce4cbf593108fdc06c81853cd1dafaa7d3fde..f0eae31f53a94a76f4e28a768d8f91cb8c3021d5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml @@ -39,6 +39,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -190,6 +191,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 0051af21f9a32b67ff87b0ca3b82373ff4778715..d4f7d6ef361674e5fa7bbd407e2eb830529a2963 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -84,6 +84,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, @@ -1218,6 +1219,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index f436ae9537f46b5266111f39bb039f841c3d4388..f7f6fdf68e46692182839bdd06b6ea7d7fde5b19 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -41,6 +41,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -198,6 +199,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 4887fce1b0a4a5d633907cae17697b7793e781c8..759cd727f1dcf0001016bfcea3bc52bb2a80a8ed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -39,6 +39,7 @@ use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, traits::{ tokens::nonfungibles_v2::Inspect, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, @@ -1353,6 +1354,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml index 67c3fa37df0480793df8c7a8a0730f34a4533a71..d54e66c42dc92cb9ba11343b099c06d9a8bcfc65 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -118,6 +119,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index 54b15e6b327b51eefefcb22e9987aac287401192..791751e7736851e46c26fb3056bafaed36389baf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -43,6 +43,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ConstantMultiplier, Weight}, @@ -779,6 +780,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml index 77923ee74f89ed495af94ac560668186f55ee414..d9dba55768149c65e23686701446c0a96ed55c1c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -118,6 +119,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index dbfdc249a3cd7aad3cb5482f4f4a5e3a5493e0a0..928b9d091ec5bf85a69582794dd23e933637eccf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -43,6 +43,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything}, weights::{ConstantMultiplier, Weight}, @@ -779,6 +780,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 70c62d1a34fd9fb6f375f456ad5729b52af8e19d..fec00fe0794c5202900da8570b2c174b16769f39 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -37,6 +37,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} @@ -153,6 +154,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index dbdc2133ec8881deca3600ef29fc551e83f0a45b..4c850f92b8de2addf2b1c6a9986d0da008d50800 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -45,6 +45,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything}, weights::{ConstantMultiplier, Weight}, @@ -1233,6 +1234,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml index 6a5806d3b90199726e0ee3e1d6d34dfa2d6578fb..8cb5519a24db5eb7ecac7a32246da79b00e334fa 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml @@ -43,6 +43,7 @@ sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", defau sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -201,6 +202,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index d833c75d470cb75467c00c59b01d9d5c1f309b4b..ff16f93d8f54b39be8a553883949165acf653521 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -68,6 +68,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, @@ -939,6 +940,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index f6fccc0435425adbcd8d9a067dc4de7db7760a81..a85aa8dcb174c5488db06e60d02ff9d60bb4ceba 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -22,6 +22,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -120,6 +121,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 399ada1be2c739c6ea4931fa99c6a38f3c039dd9..70392c5ecbcc2baf012fb9925ff405a7636fa1db 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -47,6 +47,7 @@ use sp_version::RuntimeVersion; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Everything}, weights::{ConstantMultiplier, Weight}, @@ -694,6 +695,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml index 0ffe59b927f9d2435276472e5b5c5e6434b4ca77..63b658ca977a56b80d1ac55b1d3f9deb7898742f 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml @@ -24,6 +24,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -91,6 +92,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index 41cb0fceebb59c42a17b1b528830f3aa58950693..d3369202aac4732f367d3eca5c8c120ea4c5769e 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -64,6 +64,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, @@ -452,6 +453,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 1b68b720d977dd6e01902c358dee9e45ceee3571..d9711b57b3770355474b43245366cf6adb8c6e98 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -20,6 +20,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -63,6 +64,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 34e82737f82bb38409327b9b4cc96166e0ffcda0..c2bcaf8a1266702a0d5c8d6fbbd6463a5d46dc34 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -46,6 +46,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ @@ -366,6 +367,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 46cef8d4ae083062b0c8521642ede074feb70035..675abc07b77359a65f78b21a90ced4978eafede5 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -19,6 +19,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -64,6 +65,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index 477933b5c8d25b04205331ae0dfe17c42e7b555d..4aad553e6a3baf78667cad1271965d223ca3ea2d 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -53,6 +53,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, IsInVec, Randomness}, weights::{ @@ -398,6 +399,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 547695e7621079d8bc7ca33b9fed0c2a41ac15db..6c4f8c3895a884010a6018623e98dd3f667c586d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -44,6 +44,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -117,6 +118,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 9a758cdd97827359ac546c204f66787c36c208b2..fe0f19c306321e8c8f22d1225611ac6281f44340 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -36,6 +36,7 @@ use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, pallet_prelude::Weight, parameter_types, traits::{AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, Everything}, @@ -832,6 +833,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 54055ca732ba994aefd2a646daaf92cc3d21364e..029d5d10f98684d20f8917af3d90a4df29f3410e 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -27,6 +27,7 @@ sp-api = { path = "../../../../../substrate/primitives/api", default-features = sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} @@ -90,6 +91,7 @@ std = [ "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 362ad0383a2319d4ccbd26b826912d5974691dab..50c5a445c25fe0c2c0e09b4979539e9a5f0146c0 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -40,6 +40,7 @@ use sp_version::RuntimeVersion; pub use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, match_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, @@ -796,6 +797,16 @@ impl_runtime_apis! { ParachainSystem::collect_collation_info(header) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index 69a82d05d81693247f0b47adcbb98dfeaebcb068..fbd2692a36b466827376e0917adfc93aac10a739 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -63,7 +63,7 @@ impl Default for RelayStateSproofBuilder { hrmp_max_message_num_per_candidate: 5, validation_upgrade_cooldown: 6, validation_upgrade_delay: 6, - async_backing_params: relay_chain::vstaging::AsyncBackingParams { + async_backing_params: relay_chain::AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0, }, diff --git a/cumulus/test/service/benches/transaction_throughput.rs b/cumulus/test/service/benches/transaction_throughput.rs index 83981a91d46f0803d3a77cfb71e36ec3ee85dee3..81ecc84db7bf211d5d684f7c24bfd9a4abf10c6e 100644 --- a/cumulus/test/service/benches/transaction_throughput.rs +++ b/cumulus/test/service/benches/transaction_throughput.rs @@ -21,7 +21,7 @@ use cumulus_test_runtime::{AccountId, BalancesCall, ExistentialDeposit, SudoCall use futures::{future, StreamExt}; use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; use sp_core::{crypto::Pair, sr25519}; -use sp_runtime::{generic::BlockId, OpaqueExtrinsic}; +use sp_runtime::OpaqueExtrinsic; use cumulus_primitives_core::ParaId; use cumulus_test_service::{ @@ -117,7 +117,7 @@ async fn submit_tx_and_wait_for_inclusion( let best_hash = client.chain_info().best_hash; let mut watch = tx_pool - .submit_and_watch(&BlockId::Hash(best_hash), TransactionSource::External, tx.clone()) + .submit_and_watch(best_hash, TransactionSource::External, tx.clone()) .await .expect("Submits tx to pool") .fuse(); diff --git a/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile b/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile index 80ce82589873dbbb75504a8a424341fe8c9d9ba2..09fc4f764d0443167e7a28e4a885f896a86e1176 100644 --- a/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile +++ b/docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile @@ -28,13 +28,16 @@ RUN apt-get update && \ find /var/lib/apt/lists/ -type f -not -name lock -delete; \ # add user and link ~/.local/share/polkadot to /data useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ - mkdir -p /data /polkadot/.local/share && \ + mkdir -p /data /polkadot/.local/share /polkdot/runtimes && \ chown -R polkadot:polkadot /data && \ ln -s /data /polkadot/.local/share/polkadot # add polkadot binaries to docker image COPY ./artifacts/polkadot ./artifacts/polkadot-execute-worker ./artifacts/polkadot-prepare-worker /usr/local/bin +# add runtime binaries to docker image +COPY ./artifacts/runtimes /polkadot/runtimes/ + USER polkadot # check if executable works in this container diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 245369b702012317c7fdc02d48cc792f1386b848..1e05755a9b8338c5769a48c5e8f477670eb92e25 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -143,4 +143,18 @@ UI tests are used for macros to ensure that the output of a macro doesn’t chan These UI tests are sensible to any changes in the macro generated code or to switching the rust stable version. The tests are only run when the `RUN_UI_TESTS` environment variable is set. So, when the CI is for example complaining about failing UI tests and it is expected that they fail these tests need to be executed locally. -To simplify the updating of the UI test output there is the `.maintain/update-rust-stable +To simplify the updating of the UI test output there is a script +- `./scripts/update-ui-tests.sh` to update the tests for a current rust version locally +- `./scripts/update-ui-tests.sh 1.70` # to update the tests for a specific rust version locally + +Or if you have opened PR and you're member of `paritytech` - you can use command-bot to run the tests for you in CI: +- `bot update-ui` - will run the tests for the current rust version +- `bot update-ui latest --rust_version=1.70.0` - will run the tests for the specified rust version +- `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - +will run the tests for the specified rust version and specified image + +## Command Bot + +If you're member of **paritytech** org - you can use command-bot to run various of common commands in CI: + +Start with comment in PR: `bot help` to see the list of available commands. diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/PULL_REQUEST_TEMPLATE.md index 284ad15afc9e6c5b6ffe5f666d5a5bfa43da550b..c93ac90c7e32e1c6c33d4409920eaa85a1aea814 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/PULL_REQUEST_TEMPLATE.md @@ -24,10 +24,6 @@ Fixes # (issue number, *if applicable*) Closes # (issue number, *if applicable*) -Polkadot companion: (*if applicable*) - -Cumulus companion: (*if applicable*) - # Checklist - [ ] My PR includes a detailed description as outlined in the "Description" section above @@ -35,8 +31,6 @@ Cumulus companion: (*if applicable*) required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) -- [ ] If this PR alters any external APIs or interfaces used by Polkadot, the corresponding Polkadot PR is ready as well - as the corresponding Cumulus PR (optional) You can remove the "Checklist" section once all have been checked. Thank you for your contribution! diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index aacc6ad405cca23ea2e1cb1bd7122137177c8f0b..6e82cb69f6ecd6c4db535c9797a4ea7fde9d67c1 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -68,7 +68,6 @@ jemalloc-allocator = [ # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/polkadot/README.md b/polkadot/README.md index 93e93cfba0ee54aac2b5b61d8bb0685ee10f7bb0..3c234bb8e3f4d3bed19331047f42c9afa2658053 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -2,29 +2,17 @@ Implementation of a <https://polkadot.network> node in Rust based on the Substrate framework. -> **NOTE:** In 2018, we split our implementation of "Polkadot" from its development framework > -"Substrate". See the [Substrate][substrate-repo] repo for git history prior to 2018. - -[substrate-repo]: https://github.com/paritytech/substrate - -This repo contains runtimes for the Polkadot, Kusama, and Westend networks. The README provides -information about installing the `polkadot` binary and developing on the codebase. For more specific -guides, like how to be a validator, see the [Polkadot -Wiki](https://wiki.polkadot.network/docs/getting-started). +The README provides information about installing the `polkadot` binary and developing on the codebase. For more specific +guides, like how to run a validator node, see the [Polkadot Wiki](https://wiki.polkadot.network/docs/getting-started). ## Installation +### Using a pre-compiled binary + If you just wish to run a Polkadot node without compiling it yourself, you may either run the latest binary from our [releases](https://github.com/paritytech/polkadot-sdk/releases) page, or install Polkadot from one of our package repositories. -Installation from the Debian repository will create a `systemd` service that can be used to run a -Polkadot node. This is disabled by default, and can be started by running `systemctl start polkadot` -on demand (use `systemctl enable polkadot` to make it auto-start after reboot). By default, it will -run as the `polkadot` user. Command-line flags passed to the binary can be customized by editing -`/etc/default/polkadot`. This file will not be overwritten on updating Polkadot. You may also just -run the node directly from the command-line. - ### Debian-based (Debian, Ubuntu) Currently supports Debian 10 (Buster) and Ubuntu 20.04 (Focal), and derivatives. Run the following @@ -45,8 +33,18 @@ apt install polkadot ``` +Installation from the Debian repository will create a `systemd` service that can be used to run a +Polkadot node. This is disabled by default, and can be started by running `systemctl start polkadot` +on demand (use `systemctl enable polkadot` to make it auto-start after reboot). By default, it will +run as the `polkadot` user. Command-line flags passed to the binary can be customized by editing +`/etc/default/polkadot`. This file will not be overwritten on updating Polkadot. You may also just +run the node directly from the command-line. + ## Building +Since the Polkadot node is based on Substrate, first set up your build environment according to the +[Substrate installation instructions](https://docs.substrate.io/install/). + ### Install via Cargo Make sure you have the support software installed from the **Build from Source** section below this @@ -60,25 +58,6 @@ cargo install --git https://github.com/paritytech/polkadot-sdk --tag <version> p ### Build from Source -If you'd like to build from source, first install Rust. You may need to add Cargo's bin directory to -your PATH environment variable. Restarting your computer will do this for you automatically. - -```bash -curl https://sh.rustup.rs -sSf | sh -``` - -If you already have Rust installed, make sure you're using the latest version by running: - -```bash -rustup update -``` - -Once done, finish installing the support software: - -```bash -sudo apt install build-essential git clang libclang-dev pkg-config libssl-dev protobuf-compiler -``` - Build the client by cloning this repository and running the following commands from the root directory of the repo: @@ -88,9 +67,6 @@ git checkout <latest tagged release> cargo build --release ``` -**Note:** compilation is a memory intensive process. We recommend having 4 GiB of physical RAM or -swap available (keep in mind that if a build hits swap it tends to be very slow). - **Note:** if you want to move the built `polkadot` binary somewhere (e.g. into $PATH) you will also need to move `polkadot-execute-worker` and `polkadot-prepare-worker`. You can let cargo do all this for you by running: @@ -123,7 +99,7 @@ This repo supports runtimes for Polkadot, Kusama, and Westend. Connect to the global Polkadot Mainnet network by running: ```bash -./target/release/polkadot --chain=polkadot +../target/release/polkadot --chain=polkadot ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -135,7 +111,7 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom Connect to the global Kusama canary network by running: ```bash -./target/release/polkadot --chain=kusama +../target/release/polkadot --chain=kusama ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -147,7 +123,7 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom Connect to the global Westend testnet by running: ```bash -./target/release/polkadot --chain=westend +../target/release/polkadot --chain=westend ``` You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). @@ -157,20 +133,14 @@ You can see your node on [telemetry] (set a custom name with `--name "my custom ### Obtaining DOTs If you want to do anything on Polkadot, Kusama, or Westend, then you'll need to get an account and -some DOT, KSM, or WND tokens, respectively. See the [claims -instructions](https://claims.polkadot.network/) for Polkadot if you have DOTs to claim. For -Westend's WND tokens, see the faucet -[instructions](https://wiki.polkadot.network/docs/learn-DOT#getting-westies) on the Wiki. +some DOT, KSM, or WND tokens, respectively. Follow the +[instructions](https://wiki.polkadot.network/docs/learn-DOT#obtaining-testnet-tokens) on the Wiki to obtain tokens for +your testnet of choice. ## Hacking on Polkadot If you'd actually like to hack on Polkadot, you can grab the source code and build it. Ensure you -have Rust and the support software installed. This script will install or update Rust and install -the required dependencies (this may take up to 30 minutes on Mac machines): - -```bash -curl https://getsubstrate.io -sSf | bash -s -- --fast -``` +have Rust and the support software installed. Then, grab the Polkadot source code: @@ -183,14 +153,15 @@ Then build the code. You will need to build in release mode (`--release`) to sta use debug mode for development (faster compile times for development and testing). ```bash -./scripts/init.sh # Install WebAssembly. Update Rust -cargo build # Builds all native code +cargo build ``` You can run the tests if you like: ```bash -cargo test --workspace --release +cargo test --workspace --profile testnet +# Or run only the tests for specified crated +cargo test -p <crate-name> --profile testnet ``` You can start a development chain with: @@ -202,7 +173,7 @@ cargo run --bin polkadot -- --dev Detailed logs may be shown by running the node with the following environment variables set: ```bash -RUST_LOG=debug RUST_BACKTRACE=1 cargo run --bin polkadot -- --dev +RUST_LOG=debug RUST_BACKTRACE=1 cargo run --bin polkadot -- --dev ``` ### Development @@ -222,13 +193,13 @@ If you want to see the multi-node consensus algorithm in action locally, then yo testnet. You'll need two terminals open. In one, run: ```bash -polkadot --chain=polkadot-local --alice -d /tmp/alice +polkadot --dev --alice -d /tmp/alice ``` And in the other, run: ```bash -polkadot --chain=polkadot-local --bob -d /tmp/bob --port 30334 --bootnodes '/ip4/127.0.0.1/tcp/30333/p2p/ALICE_BOOTNODE_ID_HERE' +polkadot --dev --bob -d /tmp/bob --bootnodes '/ip4/127.0.0.1/tcp/30333/p2p/ALICE_BOOTNODE_ID_HERE' ``` Ensure you replace `ALICE_BOOTNODE_ID_HERE` with the node ID from the output of the first terminal. @@ -242,7 +213,7 @@ that we currently maintain. ### Using Docker -[Using Docker](doc/docker.md) +[Using Docker](../docs/docker.md) ### Shell Completion diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 53961c90a2a817812f77e5432f25ba4cf12b4385..799a229b6ad12b24290afb78748470cb7f5f3316 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -76,4 +76,3 @@ runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] -network-protocol-staging = [ "service/network-protocol-staging" ] diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index 27779f3d1acbb8bd05a1b3f63f1d7ebdc92bd895..4e13755deedfb98f020cdfc969e80db42df7d602 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -43,9 +43,8 @@ use polkadot_node_subsystem::{ SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_availability_cores, request_persisted_validation_data, - request_staging_async_backing_params, request_validation_code, request_validation_code_hash, - request_validators, + request_async_backing_params, request_availability_cores, request_persisted_validation_data, + request_validation_code, request_validation_code_hash, request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -208,7 +207,7 @@ async fn handle_new_activations<Context>( let (availability_cores, validators, async_backing_params) = join!( request_availability_cores(relay_parent, ctx.sender()).await, request_validators(relay_parent, ctx.sender()).await, - request_staging_async_backing_params(relay_parent, ctx.sender()).await, + request_async_backing_params(relay_parent, ctx.sender()).await, ); let availability_cores = availability_cores??; diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs index da6b343e6aee4715e9bf45eb1a7b25aa056fca3a..9094f40cca8419b608d451c1dc6f09854f53f07c 100644 --- a/polkadot/node/collation-generation/src/tests.rs +++ b/polkadot/node/collation-generation/src/tests.rs @@ -153,7 +153,7 @@ fn requests_availability_per_relay_parent() { } Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams( + RuntimeApiRequest::AsyncBackingParams( tx, ), ))) => { @@ -235,7 +235,7 @@ fn requests_validation_data_for_scheduled_matches() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -332,7 +332,7 @@ fn sends_distribute_collation_message() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", @@ -494,7 +494,7 @@ fn fallback_when_no_validation_code_hash_api() { }, Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( _hash, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), ))) => { tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter", diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 4c2fd6becb424033a13d1d1f5b1653cf811c48ad..bdc8b3fa1af84ddba22afd719976b6e0dfb9f831 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -237,7 +237,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == test_state.relay_parent => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 14f720b721f2612e668fecf295232d1a34a766f3..b79515ed37a6ef590680fb834402f6ed3bef73be 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, FragmentTreeMembership}, ActivatedLeaf, TimeoutExt, }; -use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; use super::*; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; struct TestLeaf { activated: ActivatedLeaf, @@ -56,7 +56,7 @@ async fn activate_leaf( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == leaf_hash => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 9cd544a8c5362ea25c30d1ed1fc21c38bd24458e..e44530b3f1bbab0995f850af7e951984bd1dc55b 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -43,7 +43,7 @@ use polkadot_node_subsystem_util::runtime::{ self, key_ownership_proof, submit_report_dispute_lost, RuntimeInfo, }; use polkadot_primitives::{ - vstaging, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + slashing, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; @@ -385,7 +385,7 @@ impl Initialized { &mut self, ctx: &mut Context, relay_parent: Hash, - unapplied_slashes: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + unapplied_slashes: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { for (session_index, candidate_hash, pending) in unapplied_slashes { gum::info!( @@ -422,11 +422,9 @@ impl Initialized { match res { Ok(Some(key_ownership_proof)) => { key_ownership_proofs.push(key_ownership_proof); - let time_slot = vstaging::slashing::DisputesTimeSlot::new( - session_index, - candidate_hash, - ); - let dispute_proof = vstaging::slashing::DisputeProof { + let time_slot = + slashing::DisputesTimeSlot::new(session_index, candidate_hash); + let dispute_proof = slashing::DisputeProof { time_slot, kind: pending.kind, validator_index: *validator_index, diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index ed2988fcb39f2be0768b1d9c0cb927689223e955..292e4ebe5282b4344853ca5f3247caf1d9af6cc7 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,10 +96,10 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; -use polkadot_node_subsystem_util::inclusion_emulator::staging::{ +use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; @@ -981,10 +981,8 @@ impl FragmentNode { mod tests { use super::*; use assert_matches::assert_matches; - use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; - use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, - }; + use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; + use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; fn make_constraints( diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e5844a62a16b2387dc9bc7c3d515102fe0828e8..fcca0dd0b536ba69d27805ae1cf37b54d834fd02 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -22,7 +22,7 @@ //! backing phases of parachain consensus. //! //! This is primarily an implementation of "Fragment Trees", as described in -//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! [`polkadot_node_subsystem_util::inclusion_emulator`]. //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. @@ -42,13 +42,14 @@ use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, + inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CandidatePendingAvailability, CommittedCandidateReceipt, CoreState, - Hash, HeadData, Header, Id as ParaId, PersistedValidationData, +use polkadot_primitives::{ + async_backing::CandidatePendingAvailability, BlockNumber, CandidateHash, + CommittedCandidateReceipt, CoreState, Hash, HeadData, Header, Id as ParaId, + PersistedValidationData, }; use crate::{ @@ -792,7 +793,7 @@ async fn fetch_backing_state<Context>( let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingParaBackingState(para_id, tx), + RuntimeApiRequest::ParaBackingState(para_id, tx), )) .await; diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index eb12ea4537f745c2241865c3d3af7a1d4150105b..d2cd23fe95fc1d8638238c8e30b5227da7e883a0 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - vstaging::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, + async_backing::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; @@ -219,7 +219,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(async_backing_params)).unwrap(); } @@ -284,7 +284,7 @@ async fn handle_leaf_activation( let para_id = match message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, - RuntimeApiRequest::StagingParaBackingState(p_id, _), + RuntimeApiRequest::ParaBackingState(p_id, _), )) => p_id, _ => panic!("received unexpected message {:?}", message), }; @@ -303,7 +303,7 @@ async fn handle_leaf_activation( assert_matches!( message, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingParaBackingState(p_id, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx)) ) if parent == *hash && p_id == para_id => { tx.send(Ok(Some(backing_state))).unwrap(); } @@ -499,7 +499,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } @@ -1569,7 +1569,7 @@ fn uses_ancestry_only_within_session() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == hash => { tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 478d1952d9d9168080cdd14917b25cd051495a60..27f4df117e578ebb8f86dd5eb6570af0b247492b 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] always-assert = "0.1" +cfg-if = "1.0" futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 621f7e24f72bef8650337a05372b1075054d4964..0f7308396d8098293ad1f3e56d3199f4064d0f9d 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true license.workspace = true [dependencies] +cfg-if = "1.0" cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 6eb0d9b7df42f7a25414d5d834a2b3e10d1bfa3a..6fdd06057c8b021cc89b29eb0265c78e316eed02 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -44,7 +44,17 @@ pub enum PrepareError { /// The response from the worker is received, but the file cannot be renamed (moved) to the /// final destination location. This state is reported by the validation host (not by the /// worker). - RenameTmpFileErr(String), + RenameTmpFileErr { + err: String, + // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible + // conversion to `Option<String>`. + src: Option<String>, + dest: Option<String>, + }, + /// The response from the worker is received, but the worker cache could not be cleared. The + /// worker has to be killed to avoid jobs having access to data from other jobs. This state is + /// reported by the validation host (not by the worker). + ClearWorkerDir(String), } impl PrepareError { @@ -58,7 +68,11 @@ impl PrepareError { use PrepareError::*; match self { Prevalidation(_) | Preparation(_) | Panic(_) => true, - TimedOut | IoErr(_) | CreateTmpFileErr(_) | RenameTmpFileErr(_) => false, + TimedOut | + IoErr(_) | + CreateTmpFileErr(_) | + RenameTmpFileErr { .. } | + ClearWorkerDir(_) => false, // Can occur due to issues with the PVF, but also due to local errors. RuntimeConstruction(_) => false, } @@ -76,7 +90,9 @@ impl fmt::Display for PrepareError { TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr(err) => write!(f, "prepare: error renaming tmp file: {}", err), + RenameTmpFileErr { err, src, dest } => + write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), + ClearWorkerDir(err) => write!(f, "prepare: error clearing worker cache: {}", err), } } } @@ -89,8 +105,17 @@ impl fmt::Display for PrepareError { pub enum InternalValidationError { /// Some communication error occurred with the host. HostCommunication(String), + /// Host could not create a hard link to the artifact path. + CouldNotCreateLink(String), /// Could not find or open compiled artifact file. CouldNotOpenFile(String), + /// Host could not clear the worker cache after a job. + CouldNotClearWorkerDir { + err: String, + // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible + // conversion to `Option<String>`. + path: Option<String>, + }, /// An error occurred in the CPU time monitor thread. Should be totally unrelated to /// validation. CpuTimeMonitorThread(String), @@ -104,8 +129,18 @@ impl fmt::Display for InternalValidationError { match self { HostCommunication(err) => write!(f, "validation: some communication error occurred with the host: {}", err), + CouldNotCreateLink(err) => write!( + f, + "validation: host could not create a hard link to the artifact path: {}", + err + ), CouldNotOpenFile(err) => write!(f, "validation: could not find or open compiled artifact file: {}", err), + CouldNotClearWorkerDir { err, path } => write!( + f, + "validation: host could not clear the worker cache ({:?}) after a job: {}", + path, err + ), CpuTimeMonitorThread(err) => write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 399b847791a91e616c6ae6589dae9ce5fef2cdf9..b89ab089af1c02eba401517e8248c292dd7040f8 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -29,7 +29,7 @@ pub struct Handshake { } /// The response from an execution job on the worker. -#[derive(Encode, Decode)] +#[derive(Debug, Encode, Decode)] pub enum Response { /// The job completed successfully. Ok { diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index c358ad6e134de50367bb2941b0b12d91e8dcb21f..53c287ea970907cb8884ec47c74bd4e03e38d7e7 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -22,6 +22,7 @@ pub mod executor_intf; pub mod prepare; pub mod pvf; pub mod worker; +pub mod worker_dir; pub use cpu_time::ProcessTime; @@ -30,8 +31,11 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; -use std::mem; -use tokio::io::{self, AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; +use std::{ + io::{Read, Write}, + mem, +}; +use tokio::io; #[cfg(feature = "test-utils")] pub mod tests { @@ -41,20 +45,31 @@ pub mod tests { pub const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); } -/// Write some data prefixed by its length into `w`. -pub async fn framed_send(w: &mut (impl AsyncWrite + Unpin), buf: &[u8]) -> io::Result<()> { +/// Status of security features on the current system. +#[derive(Debug, Clone, Default)] +pub struct SecurityStatus { + /// Whether the landlock features we use are fully available on this system. + pub can_enable_landlock: bool, + // Whether we are able to unshare the user namespace and change the filesystem root. + pub can_unshare_user_namespace_and_change_root: bool, +} + +/// Write some data prefixed by its length into `w`. Sync version of `framed_send` to avoid +/// dependency on tokio. +pub fn framed_send_blocking(w: &mut (impl Write + Unpin), buf: &[u8]) -> io::Result<()> { let len_buf = buf.len().to_le_bytes(); - w.write_all(&len_buf).await?; - w.write_all(buf).await?; + w.write_all(&len_buf)?; + w.write_all(buf)?; Ok(()) } -/// Read some data prefixed by its length from `r`. -pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result<Vec<u8>> { +/// Read some data prefixed by its length from `r`. Sync version of `framed_recv` to avoid +/// dependency on tokio. +pub fn framed_recv_blocking(r: &mut (impl Read + Unpin)) -> io::Result<Vec<u8>> { let mut len_buf = [0u8; mem::size_of::<usize>()]; - r.read_exact(&mut len_buf).await?; + r.read_exact(&mut len_buf)?; let len = usize::from_le_bytes(len_buf); let mut buf = vec![0; len]; - r.read_exact(&mut buf).await?; + r.read_exact(&mut buf)?; Ok(buf) } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index bcdf882f300c09c14c54e6f5b3cbc131f600c3a4..59973f6cbbc64fae1562f3d93dc3c5ea2875df22 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -18,16 +18,18 @@ pub mod security; -use crate::LOG_TARGET; +use crate::{worker_dir, SecurityStatus, LOG_TARGET}; use cpu_time::ProcessTime; use futures::never::Never; use std::{ any::Any, + fmt, + os::unix::net::UnixStream, path::PathBuf, sync::mpsc::{Receiver, RecvTimeoutError}, time::Duration, }; -use tokio::{io, net::UnixStream, runtime::Runtime}; +use tokio::{io, runtime::Runtime}; /// Use this macro to declare a `fn main() {}` that will create an executable that can be used for /// spawning the desired worker. @@ -41,10 +43,15 @@ macro_rules! decl_worker_main { } fn main() { + #[cfg(target_os = "linux")] + use $crate::worker::security; + // TODO: Remove this dependency, and `pub use sp_tracing` in `lib.rs`. // See <https://github.com/paritytech/polkadot/issues/7117>. $crate::sp_tracing::try_init_simple(); + let worker_pid = std::process::id(); + let args = std::env::args().collect::<Vec<_>>(); if args.len() == 1 { print_help($expected_command); @@ -60,10 +67,43 @@ macro_rules! decl_worker_main { println!("{}", $worker_version); return }, + + "--check-can-enable-landlock" => { + #[cfg(target_os = "linux")] + let status = if security::landlock::check_is_fully_enabled() { 0 } else { -1 }; + #[cfg(not(target_os = "linux"))] + let status = -1; + std::process::exit(status) + }, + "--check-can-unshare-user-namespace-and-change-root" => { + #[cfg(target_os = "linux")] + let status = if let Err(err) = security::unshare_user_namespace_and_change_root( + $crate::worker::WorkerKind::CheckPivotRoot, + worker_pid, + // We're not accessing any files, so we can try to pivot_root in the temp + // dir without conflicts with other processes. + &std::env::temp_dir(), + ) { + // Write the error to stderr, log it on the host-side. + eprintln!("{}", err); + -1 + } else { + 0 + }; + #[cfg(not(target_os = "linux"))] + let status = { + // Write the error to stderr, log it on the host-side. + eprintln!("not available on macos"); + -1 + }; + std::process::exit(status) + }, + "test-sleep" => { std::thread::sleep(std::time::Duration::from_secs(5)); return }, + subcommand => { // Must be passed for compatibility with the single-binary test workers. if subcommand != $expected_command { @@ -75,18 +115,39 @@ macro_rules! decl_worker_main { }, } + let mut worker_dir_path = None; let mut node_version = None; - let mut socket_path: &str = ""; + let mut can_enable_landlock = false; + let mut can_unshare_user_namespace_and_change_root = false; - for i in (2..args.len()).step_by(2) { + let mut i = 2; + while i < args.len() { match args[i].as_ref() { - "--socket-path" => socket_path = args[i + 1].as_str(), - "--node-impl-version" => node_version = Some(args[i + 1].as_str()), + "--worker-dir-path" => { + worker_dir_path = Some(args[i + 1].as_str()); + i += 1 + }, + "--node-impl-version" => { + node_version = Some(args[i + 1].as_str()); + i += 1 + }, + "--can-enable-landlock" => can_enable_landlock = true, + "--can-unshare-user-namespace-and-change-root" => + can_unshare_user_namespace_and_change_root = true, arg => panic!("Unexpected argument found: {}", arg), } + i += 1; } + let worker_dir_path = + worker_dir_path.expect("the --worker-dir-path argument is required"); + + let worker_dir_path = std::path::Path::new(worker_dir_path).to_owned(); + let security_status = $crate::SecurityStatus { + can_enable_landlock, + can_unshare_user_namespace_and_change_root, + }; - $entrypoint(&socket_path, node_version, Some($worker_version)); + $entrypoint(worker_dir_path, node_version, Some($worker_version), security_status); } }; } @@ -95,61 +156,181 @@ macro_rules! decl_worker_main { /// child process. pub const JOB_TIMEOUT_OVERHEAD: Duration = Duration::from_millis(50); -/// Interprets the given bytes as a path. Returns `None` if the given bytes do not constitute a -/// a proper utf-8 string. -pub fn bytes_to_path(bytes: &[u8]) -> Option<PathBuf> { - std::str::from_utf8(bytes).ok().map(PathBuf::from) +#[derive(Debug, Clone, Copy)] +pub enum WorkerKind { + Prepare, + Execute, + CheckPivotRoot, +} + +impl fmt::Display for WorkerKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Prepare => write!(f, "prepare"), + Self::Execute => write!(f, "execute"), + Self::CheckPivotRoot => write!(f, "check pivot root"), + } + } } // The worker version must be passed in so that we accurately get the version of the worker, and not // the version that this crate was compiled with. pub fn worker_event_loop<F, Fut>( - debug_id: &'static str, - socket_path: &str, + worker_kind: WorkerKind, + #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] security_status: &SecurityStatus, mut event_loop: F, ) where - F: FnMut(UnixStream) -> Fut, + F: FnMut(UnixStream, PathBuf) -> Fut, Fut: futures::Future<Output = io::Result<Never>>, { let worker_pid = std::process::id(); - gum::debug!(target: LOG_TARGET, %worker_pid, "starting pvf worker ({})", debug_id); + gum::debug!( + target: LOG_TARGET, + %worker_pid, + ?worker_dir_path, + ?security_status, + "starting pvf worker ({})", + worker_kind + ); // Check for a mismatch between the node and worker versions. if let (Some(node_version), Some(worker_version)) = (node_version, worker_version) { if node_version != worker_version { gum::error!( target: LOG_TARGET, + %worker_kind, %worker_pid, %node_version, %worker_version, "Node and worker version mismatch, node needs restarting, forcing shutdown", ); kill_parent_node_in_emergency(); - let err = io::Error::new(io::ErrorKind::Unsupported, "Version mismatch"); - worker_shutdown_message(debug_id, worker_pid, err); + worker_shutdown_message(worker_kind, worker_pid, "Version mismatch"); return } } - remove_env_vars(debug_id); + // Make sure that we can read the worker dir path, and log its contents. + let entries = || -> Result<Vec<_>, io::Error> { + std::fs::read_dir(&worker_dir_path)? + .map(|res| res.map(|e| e.file_name())) + .collect() + }(); + match entries { + Ok(entries) => + gum::trace!(target: LOG_TARGET, %worker_pid, ?worker_dir_path, "content of worker dir: {:?}", entries), + Err(err) => { + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "Could not read worker dir: {}", + err.to_string() + ); + worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); + return + }, + } + + // Connect to the socket. + let socket_path = worker_dir::socket(&worker_dir_path); + let stream = || -> std::io::Result<UnixStream> { + let stream = UnixStream::connect(&socket_path)?; + // Remove the socket here. We don't also need to do this on the host-side; on failed + // rendezvous, the host will delete the whole worker dir. + std::fs::remove_file(&socket_path)?; + Ok(stream) + }(); + let stream = match stream { + Ok(s) => s, + Err(err) => { + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "{}", + err + ); + worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); + return + }, + }; + + // Enable some security features. + { + // Call based on whether we can change root. Error out if it should work but fails. + // + // NOTE: This should not be called in a multi-threaded context (i.e. inside the tokio + // runtime). `unshare(2)`: + // + // > CLONE_NEWUSER requires that the calling process is not threaded. + #[cfg(target_os = "linux")] + if security_status.can_unshare_user_namespace_and_change_root { + if let Err(err) = security::unshare_user_namespace_and_change_root( + worker_kind, + worker_pid, + &worker_dir_path, + ) { + // The filesystem may be in an inconsistent state, bail out. + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "Could not change root to be the worker cache path: {}", + err + ); + worker_shutdown_message(worker_kind, worker_pid, &err); + return + } + worker_dir_path = std::path::Path::new("/").to_owned(); + } + + #[cfg(target_os = "linux")] + if security_status.can_enable_landlock { + let landlock_status = + security::landlock::enable_for_worker(worker_kind, worker_pid, &worker_dir_path); + if !matches!(landlock_status, Ok(landlock::RulesetStatus::FullyEnforced)) { + // We previously were able to enable, so this should never happen. + // + // TODO: Make this a real error in secure-mode. See: + // <https://github.com/paritytech/polkadot-sdk/issues/1444> + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "could not fully enable landlock: {:?}. This should not happen, please report to the Polkadot devs", + landlock_status + ); + } + } + + if !security::check_env_vars_were_cleared(worker_kind, worker_pid) { + let err = "not all env vars were cleared when spawning the process"; + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + "{}", + err + ); + worker_shutdown_message(worker_kind, worker_pid, err); + return + } + } // Run the main worker loop. let rt = Runtime::new().expect("Creates tokio runtime. If this panics the worker will die and the host will detect that and deal with it."); let err = rt - .block_on(async move { - let stream = UnixStream::connect(socket_path).await?; - let _ = tokio::fs::remove_file(socket_path).await; - - let result = event_loop(stream).await; - - result - }) + .block_on(event_loop(stream, worker_dir_path)) // It's never `Ok` because it's `Ok(Never)`. .unwrap_err(); - worker_shutdown_message(debug_id, worker_pid, err); + worker_shutdown_message(worker_kind, worker_pid, &err.to_string()); // We don't want tokio to wait for the tasks to finish. We want to bring down the worker as fast // as possible and not wait for stalled validation to finish. This isn't strictly necessary now, @@ -157,51 +338,9 @@ pub fn worker_event_loop<F, Fut>( rt.shutdown_background(); } -/// Delete all env vars to prevent malicious code from accessing them. -fn remove_env_vars(debug_id: &'static str) { - for (key, value) in std::env::vars_os() { - // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of - // randomness for malicious code. In the future we can remove it also and log in the host; - // see <https://github.com/paritytech/polkadot/issues/7117>. - if key == "RUST_LOG" { - continue - } - - // In case of a key or value that would cause [`env::remove_var` to - // panic](https://doc.rust-lang.org/std/env/fn.remove_var.html#panics), we first log a - // warning and then proceed to attempt to remove the env var. - let mut err_reasons = vec![]; - let (key_str, value_str) = (key.to_str(), value.to_str()); - if key.is_empty() { - err_reasons.push("key is empty"); - } - if key_str.is_some_and(|s| s.contains('=')) { - err_reasons.push("key contains '='"); - } - if key_str.is_some_and(|s| s.contains('\0')) { - err_reasons.push("key contains null character"); - } - if value_str.is_some_and(|s| s.contains('\0')) { - err_reasons.push("value contains null character"); - } - if !err_reasons.is_empty() { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?key, - ?value, - "Attempting to remove badly-formatted env var, this may cause the PVF worker to crash. Please remove it yourself. Reasons: {:?}", - err_reasons - ); - } - - std::env::remove_var(key); - } -} - /// Provide a consistent message on worker shutdown. -fn worker_shutdown_message(debug_id: &'static str, worker_pid: u32, err: io::Error) { - gum::debug!(target: LOG_TARGET, %worker_pid, "quitting pvf worker ({}): {:?}", debug_id, err); +fn worker_shutdown_message(worker_kind: WorkerKind, worker_pid: u32, err: &str) { + gum::debug!(target: LOG_TARGET, %worker_pid, "quitting pvf worker ({}): {}", worker_kind, err); } /// Loop that runs in the CPU time monitor thread on prepare and execute jobs. Continuously wakes up @@ -305,7 +444,7 @@ pub mod thread { Arc::new((Mutex::new(WaitOutcome::Pending), Condvar::new())) } - /// Runs a worker thread. Will first enable security features, and afterwards notify the threads + /// Runs a worker thread. Will run the requested function, and afterwards notify the threads /// waiting on the condvar. Catches panics during execution and resumes the panics after /// triggering the condvar, so that the waiting thread is notified on panics. /// diff --git a/polkadot/node/core/pvf/common/src/worker/security.rs b/polkadot/node/core/pvf/common/src/worker/security.rs index 6c5f96e0b5dbb3289dc237c91cc003e3b2aa37c5..b7abf028f94108c8d36c257f140936cabc0e237f 100644 --- a/polkadot/node/core/pvf/common/src/worker/security.rs +++ b/polkadot/node/core/pvf/common/src/worker/security.rs @@ -17,30 +17,189 @@ //! Functionality for securing workers. //! //! This is needed because workers are used to compile and execute untrusted code (PVFs). +//! +//! We currently employ the following security measures: +//! +//! - Restrict filesystem +//! - Use Landlock to remove all unnecessary FS access rights. +//! - Unshare the user and mount namespaces. +//! - Change the root directory to a worker-specific temporary directory. +//! - Remove env vars + +use crate::{worker::WorkerKind, LOG_TARGET}; + +/// Unshare the user namespace and change root to be the artifact directory. +/// +/// NOTE: This should not be called in a multi-threaded context. `unshare(2)`: +/// "CLONE_NEWUSER requires that the calling process is not threaded." +#[cfg(target_os = "linux")] +pub fn unshare_user_namespace_and_change_root( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &std::path::Path, +) -> Result<(), String> { + use std::{env, ffi::CString, os::unix::ffi::OsStrExt, path::Path, ptr}; + + // The following was copied from the `cstr_core` crate. + // + // TODO: Remove this once this is stable: https://github.com/rust-lang/rust/issues/105723 + #[inline] + #[doc(hidden)] + const fn cstr_is_valid(bytes: &[u8]) -> bool { + if bytes.is_empty() || bytes[bytes.len() - 1] != 0 { + return false + } + + let mut index = 0; + while index < bytes.len() - 1 { + if bytes[index] == 0 { + return false + } + index += 1; + } + true + } -/// To what degree landlock is enabled. It's a separate struct from `RulesetStatus` because that is -/// only available on Linux, plus this has a nicer name. -pub enum LandlockStatus { - FullyEnforced, - PartiallyEnforced, - NotEnforced, - /// Thread panicked, we don't know what the status is. - Unavailable, + macro_rules! cstr { + ($e:expr) => {{ + const STR: &[u8] = concat!($e, "\0").as_bytes(); + const STR_VALID: bool = cstr_is_valid(STR); + let _ = [(); 0 - (!(STR_VALID) as usize)]; + #[allow(unused_unsafe)] + unsafe { + core::ffi::CStr::from_bytes_with_nul_unchecked(STR) + } + }} + } + + gum::debug!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "unsharing the user namespace and calling pivot_root", + ); + + let worker_dir_path_c = CString::new(worker_dir_path.as_os_str().as_bytes()) + .expect("on unix; the path will never contain 0 bytes; qed"); + + // Wrapper around all the work to prevent repetitive error handling. + // + // # Errors + // + // It's the caller's responsibility to call `Error::last_os_error`. Note that that alone does + // not give the context of which call failed, so we return a &str error. + || -> Result<(), &'static str> { + // SAFETY: We pass null-terminated C strings and use the APIs as documented. In fact, steps + // (2) and (3) are adapted from the example in pivot_root(2), with the additional + // change described in the `pivot_root(".", ".")` section. + unsafe { + // 1. `unshare` the user and the mount namespaces. + if libc::unshare(libc::CLONE_NEWUSER | libc::CLONE_NEWNS) < 0 { + return Err("unshare user and mount namespaces") + } + + // 2. Setup mounts. + // + // Ensure that new root and its parent mount don't have shared propagation (which would + // cause pivot_root() to return an error), and prevent propagation of mount events to + // the initial mount namespace. + if libc::mount( + ptr::null(), + cstr!("/").as_ptr(), + ptr::null(), + libc::MS_REC | libc::MS_PRIVATE, + ptr::null(), + ) < 0 + { + return Err("mount MS_PRIVATE") + } + // Ensure that the new root is a mount point. + let additional_flags = + if let WorkerKind::Execute | WorkerKind::CheckPivotRoot = worker_kind { + libc::MS_RDONLY + } else { + 0 + }; + if libc::mount( + worker_dir_path_c.as_ptr(), + worker_dir_path_c.as_ptr(), + ptr::null(), // ignored when MS_BIND is used + libc::MS_BIND | + libc::MS_REC | libc::MS_NOEXEC | + libc::MS_NODEV | libc::MS_NOSUID | + libc::MS_NOATIME | additional_flags, + ptr::null(), // ignored when MS_BIND is used + ) < 0 + { + return Err("mount MS_BIND") + } + + // 3. `pivot_root` to the artifact directory. + if libc::chdir(worker_dir_path_c.as_ptr()) < 0 { + return Err("chdir to worker dir path") + } + if libc::syscall(libc::SYS_pivot_root, cstr!(".").as_ptr(), cstr!(".").as_ptr()) < 0 { + return Err("pivot_root") + } + if libc::umount2(cstr!(".").as_ptr(), libc::MNT_DETACH) < 0 { + return Err("umount the old root mount point") + } + } + + Ok(()) + }() + .map_err(|err_ctx| { + let err = std::io::Error::last_os_error(); + format!("{}: {}", err_ctx, err) + })?; + + // Do some assertions. + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected current dir after pivot_root to be `/`".into()) + } + env::set_current_dir("..").map_err(|err| err.to_string())?; + if env::current_dir().map_err(|err| err.to_string())? != Path::new("/") { + return Err("expected not to be able to break out of new root by doing `..`".into()) + } + + Ok(()) } -impl LandlockStatus { - #[cfg(target_os = "linux")] - pub fn from_ruleset_status(ruleset_status: ::landlock::RulesetStatus) -> Self { - use ::landlock::RulesetStatus::*; - match ruleset_status { - FullyEnforced => LandlockStatus::FullyEnforced, - PartiallyEnforced => LandlockStatus::PartiallyEnforced, - NotEnforced => LandlockStatus::NotEnforced, +/// Require env vars to have been removed when spawning the process, to prevent malicious code from +/// accessing them. +pub fn check_env_vars_were_cleared(worker_kind: WorkerKind, worker_pid: u32) -> bool { + let mut ok = true; + + for (key, value) in std::env::vars_os() { + // TODO: *theoretically* the value (or mere presence) of `RUST_LOG` can be a source of + // randomness for malicious code. In the future we can remove it also and log in the host; + // see <https://github.com/paritytech/polkadot/issues/7117>. + if key == "RUST_LOG" { + continue + } + // An exception for MacOS. This is not a secure platform anyway, so we let it slide. + #[cfg(target_os = "macos")] + if key == "__CF_USER_TEXT_ENCODING" { + continue } + + gum::error!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?key, + ?value, + "env var was present that should have been removed", + ); + + ok = false; } + + ok } -/// The [landlock] docs say it best: +/// The [landlock] docs say it best: /// /// > "Landlock is a security feature available since Linux 5.13. The goal is to enable to restrict /// ambient rights (e.g., global filesystem access) for a set of processes by creating safe security @@ -52,14 +211,21 @@ impl LandlockStatus { /// [landlock]: https://docs.rs/landlock/latest/landlock/index.html #[cfg(target_os = "linux")] pub mod landlock { - use landlock::{Access, AccessFs, Ruleset, RulesetAttr, RulesetError, RulesetStatus, ABI}; + pub use landlock::RulesetStatus; + + use crate::{worker::WorkerKind, LOG_TARGET}; + use landlock::*; + use std::{ + fmt, + path::{Path, PathBuf}, + }; /// Landlock ABI version. We use ABI V1 because: /// /// 1. It is supported by our reference kernel version. /// 2. Later versions do not (yet) provide additional security. /// - /// # Versions (June 2023) + /// # Versions (as of June 2023) /// /// - Polkadot reference kernel version: 5.16+ /// - ABI V1: 5.13 - introduces landlock, including full restrictions on file reads @@ -83,46 +249,103 @@ pub mod landlock { /// supports it or if it introduces some new feature that is beneficial to security. pub const LANDLOCK_ABI: ABI = ABI::V1; - // TODO: <https://github.com/landlock-lsm/rust-landlock/issues/36> - /// Returns to what degree landlock is enabled with the given ABI on the current Linux - /// environment. - pub fn get_status() -> Result<RulesetStatus, Box<dyn std::error::Error>> { - match std::thread::spawn(|| try_restrict_thread()).join() { - Ok(Ok(status)) => Ok(status), - Ok(Err(ruleset_err)) => Err(ruleset_err.into()), - Err(_err) => Err("a panic occurred in try_restrict_thread".into()), + #[derive(Debug)] + pub enum TryRestrictError { + InvalidExceptionPath(PathBuf), + RulesetError(RulesetError), + } + + impl From<RulesetError> for TryRestrictError { + fn from(err: RulesetError) -> Self { + Self::RulesetError(err) } } - /// Based on the given `status`, returns a single bool indicating whether the given landlock - /// ABI is fully enabled on the current Linux environment. - pub fn status_is_fully_enabled( - status: &Result<RulesetStatus, Box<dyn std::error::Error>>, - ) -> bool { - matches!(status, Ok(RulesetStatus::FullyEnforced)) + impl fmt::Display for TryRestrictError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::InvalidExceptionPath(path) => write!(f, "invalid exception path: {:?}", path), + Self::RulesetError(err) => write!(f, "ruleset error: {}", err.to_string()), + } + } } + impl std::error::Error for TryRestrictError {} + + /// Try to enable landlock for the given kind of worker. + pub fn enable_for_worker( + worker_kind: WorkerKind, + worker_pid: u32, + worker_dir_path: &Path, + ) -> Result<RulesetStatus, Box<dyn std::error::Error>> { + let exceptions: Vec<(PathBuf, BitFlags<AccessFs>)> = match worker_kind { + WorkerKind::Prepare => { + vec![(worker_dir_path.to_owned(), AccessFs::WriteFile.into())] + }, + WorkerKind::Execute => { + vec![(worker_dir_path.to_owned(), AccessFs::ReadFile.into())] + }, + WorkerKind::CheckPivotRoot => + panic!("this should only be passed for checking pivot_root; qed"), + }; + + gum::debug!( + target: LOG_TARGET, + %worker_kind, + %worker_pid, + ?worker_dir_path, + "enabling landlock with exceptions: {:?}", + exceptions, + ); + + Ok(try_restrict(exceptions)?) + } + + // TODO: <https://github.com/landlock-lsm/rust-landlock/issues/36> /// Runs a check for landlock and returns a single bool indicating whether the given landlock /// ABI is fully enabled on the current Linux environment. pub fn check_is_fully_enabled() -> bool { - status_is_fully_enabled(&get_status()) + let status_from_thread: Result<RulesetStatus, Box<dyn std::error::Error>> = + match std::thread::spawn(|| try_restrict(std::iter::empty::<(PathBuf, AccessFs)>())) + .join() + { + Ok(Ok(status)) => Ok(status), + Ok(Err(ruleset_err)) => Err(ruleset_err.into()), + Err(_err) => Err("a panic occurred in try_restrict".into()), + }; + + matches!(status_from_thread, Ok(RulesetStatus::FullyEnforced)) } - /// Tries to restrict the current thread with the following landlock access controls: + /// Tries to restrict the current thread (should only be called in a process' main thread) with + /// the following landlock access controls: /// - /// 1. all global filesystem access - /// 2. ... more may be supported in the future. + /// 1. all global filesystem access restricted, with optional exceptions + /// 2. ... more sandbox types (e.g. networking) may be supported in the future. /// /// If landlock is not supported in the current environment this is simply a noop. /// /// # Returns /// /// The status of the restriction (whether it was fully, partially, or not-at-all enforced). - pub fn try_restrict_thread() -> Result<RulesetStatus, RulesetError> { - let status = Ruleset::new() - .handle_access(AccessFs::from_all(LANDLOCK_ABI))? - .create()? - .restrict_self()?; + fn try_restrict<I, P, A>(fs_exceptions: I) -> Result<RulesetStatus, TryRestrictError> + where + I: IntoIterator<Item = (P, A)>, + P: AsRef<Path>, + A: Into<BitFlags<AccessFs>>, + { + let mut ruleset = + Ruleset::new().handle_access(AccessFs::from_all(LANDLOCK_ABI))?.create()?; + for (fs_path, access_bits) in fs_exceptions { + let paths = &[fs_path.as_ref().to_owned()]; + let mut rules = path_beneath_rules(paths, access_bits).peekable(); + if rules.peek().is_none() { + // `path_beneath_rules` silently ignores missing paths, so check for it manually. + return Err(TryRestrictError::InvalidExceptionPath(fs_path.as_ref().to_owned())) + } + ruleset = ruleset.add_rules(rules)?; + } + let status = ruleset.restrict_self()?; Ok(status.ruleset) } @@ -132,55 +355,114 @@ pub mod landlock { use std::{fs, io::ErrorKind, thread}; #[test] - fn restricted_thread_cannot_access_fs() { + fn restricted_thread_cannot_read_file() { // TODO: This would be nice: <https://github.com/rust-lang/rust/issues/68007>. if !check_is_fully_enabled() { return } // Restricted thread cannot read from FS. - let handle = thread::spawn(|| { - // Write to a tmp file, this should succeed before landlock is applied. - let text = "foo"; - let tmpfile = tempfile::NamedTempFile::new().unwrap(); - let path = tmpfile.path(); - fs::write(path, text).unwrap(); - let s = fs::read_to_string(path).unwrap(); - assert_eq!(s, text); - - let status = try_restrict_thread().unwrap(); - if !matches!(status, RulesetStatus::FullyEnforced) { - panic!("Ruleset should be enforced since we checked if landlock is enabled"); - } - - // Try to read from the tmp file after landlock. - let result = fs::read_to_string(path); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); + let handle = + thread::spawn(|| { + // Create, write, and read two tmp files. This should succeed before any + // landlock restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + let s = fs::read_to_string(path1).unwrap(); + assert_eq!(s, TEXT); + fs::write(path2, TEXT).unwrap(); + let s = fs::read_to_string(path2).unwrap(); + assert_eq!(s, TEXT); + + // Apply Landlock with a read exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::ReadFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); + } + + // Try to read from both files, only tmpfile1 should succeed. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Ok(s) if s == TEXT + )); + let result = fs::read_to_string(path2); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); + } + + // Try to read from tmpfile1 after landlock, it should fail. + let result = fs::read_to_string(path1); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); assert!(handle.join().is_ok()); + } + + #[test] + fn restricted_thread_cannot_write_file() { + // TODO: This would be nice: <https://github.com/rust-lang/rust/issues/68007>. + if !check_is_fully_enabled() { + return + } // Restricted thread cannot write to FS. - let handle = thread::spawn(|| { - let text = "foo"; - let tmpfile = tempfile::NamedTempFile::new().unwrap(); - let path = tmpfile.path(); - - let status = try_restrict_thread().unwrap(); - if !matches!(status, RulesetStatus::FullyEnforced) { - panic!("Ruleset should be enforced since we checked if landlock is enabled"); - } - - // Try to write to the tmp file after landlock. - let result = fs::write(path, text); - assert!(matches!( - result, - Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) - )); - }); + let handle = + thread::spawn(|| { + // Create and write two tmp files. This should succeed before any landlock + // restrictions are applied. + const TEXT: &str = "foo"; + let tmpfile1 = tempfile::NamedTempFile::new().unwrap(); + let path1 = tmpfile1.path(); + let tmpfile2 = tempfile::NamedTempFile::new().unwrap(); + let path2 = tmpfile2.path(); + + fs::write(path1, TEXT).unwrap(); + fs::write(path2, TEXT).unwrap(); + + // Apply Landlock with a write exception for only one of the files. + let status = try_restrict(vec![(path1, AccessFs::WriteFile)]); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); + } + + // Try to write to both files, only tmpfile1 should succeed. + let result = fs::write(path1, TEXT); + assert!(matches!(result, Ok(_))); + let result = fs::write(path2, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + + // Apply Landlock for all files. + let status = try_restrict(std::iter::empty::<(PathBuf, AccessFs)>()); + if !matches!(status, Ok(RulesetStatus::FullyEnforced)) { + panic!("Ruleset should be enforced since we checked if landlock is enabled: {:?}", status); + } + + // Try to write to tmpfile1 after landlock, it should fail. + let result = fs::write(path1, TEXT); + assert!(matches!( + result, + Err(err) if matches!(err.kind(), ErrorKind::PermissionDenied) + )); + }); assert!(handle.join().is_ok()); } diff --git a/polkadot/node/core/pvf/common/src/worker_dir.rs b/polkadot/node/core/pvf/common/src/worker_dir.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2610a4d112856abdfee000d15776dd0a617b400 --- /dev/null +++ b/polkadot/node/core/pvf/common/src/worker_dir.rs @@ -0,0 +1,35 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Shared functions for getting the known worker files. + +use std::path::{Path, PathBuf}; + +const WORKER_EXECUTE_ARTIFACT_NAME: &str = "artifact"; +const WORKER_PREPARE_TMP_ARTIFACT_NAME: &str = "tmp-artifact"; +const WORKER_SOCKET_NAME: &str = "socket"; + +pub fn execute_artifact(worker_dir_path: &Path) -> PathBuf { + worker_dir_path.join(WORKER_EXECUTE_ARTIFACT_NAME) +} + +pub fn prepare_tmp_artifact(worker_dir_path: &Path) -> PathBuf { + worker_dir_path.join(WORKER_PREPARE_TMP_ARTIFACT_NAME) +} + +pub fn socket(worker_dir_path: &Path) -> PathBuf { + worker_dir_path.join(WORKER_SOCKET_NAME) +} diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 36793a5c71eca169c09da8565006dfbc133518ef..9d7bfdf2866998613c14fa2122f3911acd4e4ca2 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,7 +16,7 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. -pub use polkadot_node_core_pvf_common::executor_intf::Executor; +pub use polkadot_node_core_pvf_common::{executor_intf::Executor, worker_dir, SecurityStatus}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. @@ -28,22 +28,21 @@ use polkadot_node_core_pvf_common::{ error::InternalValidationError, execute::{Handshake, Response}, executor_intf::NATIVE_STACK_MAX, - framed_recv, framed_send, + framed_recv_blocking, framed_send_blocking, worker::{ - bytes_to_path, cpu_time_monitor_loop, - security::LandlockStatus, - stringify_panic_payload, + cpu_time_monitor_loop, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, + worker_event_loop, WorkerKind, }, }; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ + os::unix::net::UnixStream, path::PathBuf, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::{io, net::UnixStream}; +use tokio::io; // Wasmtime powers the Substrate Executor. It compiles the wasm bytecode into native code. // That native code does not create any stacks and just reuses the stack of the thread that @@ -81,8 +80,8 @@ use tokio::{io, net::UnixStream}; /// The stack size for the execute thread. pub const EXECUTE_THREAD_STACK_SIZE: usize = 2 * 1024 * 1024 + NATIVE_STACK_MAX as usize; -async fn recv_handshake(stream: &mut UnixStream) -> io::Result<Handshake> { - let handshake_enc = framed_recv(stream).await?; +fn recv_handshake(stream: &mut UnixStream) -> io::Result<Handshake> { + let handshake_enc = framed_recv_blocking(stream)?; let handshake = Handshake::decode(&mut &handshake_enc[..]).map_err(|_| { io::Error::new( io::ErrorKind::Other, @@ -92,57 +91,58 @@ async fn recv_handshake(stream: &mut UnixStream) -> io::Result<Handshake> { Ok(handshake) } -async fn recv_request(stream: &mut UnixStream) -> io::Result<(PathBuf, Vec<u8>, Duration)> { - let artifact_path = framed_recv(stream).await?; - let artifact_path = bytes_to_path(&artifact_path).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "execute pvf recv_request: non utf-8 artifact path".to_string(), - ) - })?; - let params = framed_recv(stream).await?; - let execution_timeout = framed_recv(stream).await?; +fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec<u8>, Duration)> { + let params = framed_recv_blocking(stream)?; + let execution_timeout = framed_recv_blocking(stream)?; let execution_timeout = Duration::decode(&mut &execution_timeout[..]).map_err(|_| { io::Error::new( io::ErrorKind::Other, "execute pvf recv_request: failed to decode duration".to_string(), ) })?; - Ok((artifact_path, params, execution_timeout)) + Ok((params, execution_timeout)) } -async fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { - framed_send(stream, &response.encode()).await +fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { + framed_send_blocking(stream, &response.encode()) } /// The entrypoint that the spawned execute worker should start with. /// /// # Parameters /// -/// The `socket_path` specifies the path to the socket used to communicate with the host. The -/// `node_version`, if `Some`, is checked against the worker version. A mismatch results in -/// immediate worker termination. `None` is used for tests and in other situations when version -/// check is not necessary. +/// - `worker_dir_path`: specifies the path to the worker-specific temporary directory. +/// +/// - `node_version`: if `Some`, is checked against the `worker_version`. A mismatch results in +/// immediate worker termination. `None` is used for tests and in other situations when version +/// check is not necessary. +/// +/// - `worker_version`: see above +/// +/// - `security_status`: contains the detected status of security features. pub fn worker_entrypoint( - socket_path: &str, + worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, + security_status: SecurityStatus, ) { worker_event_loop( - "execute", - socket_path, + WorkerKind::Execute, + worker_dir_path, node_version, worker_version, - |mut stream| async move { + &security_status, + |mut stream, worker_dir_path| async move { let worker_pid = std::process::id(); + let artifact_path = worker_dir::execute_artifact(&worker_dir_path); - let handshake = recv_handshake(&mut stream).await?; - let executor = Executor::new(handshake.executor_params).map_err(|e| { + let Handshake { executor_params } = recv_handshake(&mut stream)?; + let executor = Executor::new(executor_params).map_err(|e| { io::Error::new(io::ErrorKind::Other, format!("cannot create executor: {}", e)) })?; loop { - let (artifact_path, params, execution_timeout) = recv_request(&mut stream).await?; + let (params, execution_timeout) = recv_request(&mut stream)?; gum::debug!( target: LOG_TARGET, %worker_pid, @@ -151,15 +151,13 @@ pub fn worker_entrypoint( ); // Get the artifact bytes. - // - // We do this outside the thread so that we can lock down filesystem access there. - let compiled_artifact_blob = match std::fs::read(artifact_path) { + let compiled_artifact_blob = match std::fs::read(&artifact_path) { Ok(bytes) => bytes, Err(err) => { let response = Response::InternalError( InternalValidationError::CouldNotOpenFile(err.to_string()), ); - send_response(&mut stream, response).await?; + send_response(&mut stream, response)?; continue }, }; @@ -187,22 +185,11 @@ pub fn worker_entrypoint( let execute_thread = thread::spawn_worker_thread_with_stack_size( "execute thread", move || { - // Try to enable landlock. - #[cfg(target_os = "linux")] - let landlock_status = polkadot_node_core_pvf_common::worker::security::landlock::try_restrict_thread() - .map(LandlockStatus::from_ruleset_status) - .map_err(|e| e.to_string()); - #[cfg(not(target_os = "linux"))] - let landlock_status: Result<LandlockStatus, String> = Ok(LandlockStatus::NotEnforced); - - ( - validate_using_artifact( - &compiled_artifact_blob, - ¶ms, - executor_2, - cpu_time_start, - ), - landlock_status, + validate_using_artifact( + &compiled_artifact_blob, + ¶ms, + executor_2, + cpu_time_start, ) }, Arc::clone(&condvar), @@ -215,24 +202,9 @@ pub fn worker_entrypoint( let response = match outcome { WaitOutcome::Finished => { let _ = cpu_time_monitor_tx.send(()); - let (result, landlock_status) = execute_thread.join().unwrap_or_else(|e| { - ( - Response::Panic(stringify_panic_payload(e)), - Ok(LandlockStatus::Unavailable), - ) - }); - - // Log if landlock threw an error. - if let Err(err) = landlock_status { - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "error enabling landlock: {}", - err - ); - } - - result + execute_thread + .join() + .unwrap_or_else(|e| Response::Panic(stringify_panic_payload(e))) }, // If the CPU thread is not selected, we signal it to end, the join handle is // dropped and the thread will finish in the background. @@ -267,7 +239,13 @@ pub fn worker_entrypoint( ), }; - send_response(&mut stream, response).await?; + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "worker: sending response to host: {:?}", + response + ); + send_response(&mut stream, response)?; } }, ); diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index e7a12cd9a809c5ef2fafa32df9b3cf4588dc1b52..886209b78c329f062e674a96992509d0390a4220 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true license.workspace = true [dependencies] +cfg-if = "1.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index caa7d33df12adfc67440caf3a9d68a65257edf99..a24f5024722bb7ea239999093f4e317da58fe7f6 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -33,25 +33,24 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, executor_intf::Executor, - framed_recv, framed_send, + framed_recv_blocking, framed_send_blocking, prepare::{MemoryStats, PrepareJobKind, PrepareStats}, pvf::PvfPrepData, worker::{ - bytes_to_path, cpu_time_monitor_loop, - security::LandlockStatus, - stringify_panic_payload, + cpu_time_monitor_loop, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, + worker_event_loop, WorkerKind, }, - ProcessTime, + worker_dir, ProcessTime, SecurityStatus, }; use polkadot_primitives::ExecutorParams; use std::{ + os::unix::net::UnixStream, path::PathBuf, sync::{mpsc::channel, Arc}, time::Duration, }; -use tokio::{io, net::UnixStream}; +use tokio::io; /// Contains the bytes for a successfully compiled artifact. pub struct CompiledArtifact(Vec<u8>); @@ -69,36 +68,34 @@ impl AsRef<[u8]> for CompiledArtifact { } } -async fn recv_request(stream: &mut UnixStream) -> io::Result<(PvfPrepData, PathBuf)> { - let pvf = framed_recv(stream).await?; +fn recv_request(stream: &mut UnixStream) -> io::Result<PvfPrepData> { + let pvf = framed_recv_blocking(stream)?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("prepare pvf recv_request: failed to decode PvfPrepData: {}", e), ) })?; - let tmp_file = framed_recv(stream).await?; - let tmp_file = bytes_to_path(&tmp_file).ok_or_else(|| { - io::Error::new( - io::ErrorKind::Other, - "prepare pvf recv_request: non utf-8 artifact path".to_string(), - ) - })?; - Ok((pvf, tmp_file)) + Ok(pvf) } -async fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { - framed_send(stream, &result.encode()).await +fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { + framed_send_blocking(stream, &result.encode()) } /// The entrypoint that the spawned prepare worker should start with. /// /// # Parameters /// -/// The `socket_path` specifies the path to the socket used to communicate with the host. The -/// `node_version`, if `Some`, is checked against the worker version. A mismatch results in -/// immediate worker termination. `None` is used for tests and in other situations when version -/// check is not necessary. +/// - `worker_dir_path`: specifies the path to the worker-specific temporary directory. +/// +/// - `node_version`: if `Some`, is checked against the `worker_version`. A mismatch results in +/// immediate worker termination. `None` is used for tests and in other situations when version +/// check is not necessary. +/// +/// - `worker_version`: see above +/// +/// - `security_status`: contains the detected status of security features. /// /// # Flow /// @@ -119,20 +116,23 @@ async fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Re /// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we /// send that in the `PrepareResult`. pub fn worker_entrypoint( - socket_path: &str, + worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, + security_status: SecurityStatus, ) { worker_event_loop( - "prepare", - socket_path, + WorkerKind::Prepare, + worker_dir_path, node_version, worker_version, - |mut stream| async move { + &security_status, + |mut stream, worker_dir_path| async move { let worker_pid = std::process::id(); + let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); loop { - let (pvf, temp_artifact_dest) = recv_request(&mut stream).await?; + let pvf = recv_request(&mut stream)?; gum::debug!( target: LOG_TARGET, %worker_pid, @@ -172,14 +172,6 @@ pub fn worker_entrypoint( let prepare_thread = thread::spawn_worker_thread( "prepare thread", move || { - // Try to enable landlock. - #[cfg(target_os = "linux")] - let landlock_status = polkadot_node_core_pvf_common::worker::security::landlock::try_restrict_thread() - .map(LandlockStatus::from_ruleset_status) - .map_err(|e| e.to_string()); - #[cfg(not(target_os = "linux"))] - let landlock_status: Result<LandlockStatus, String> = Ok(LandlockStatus::NotEnforced); - #[allow(unused_mut)] let mut result = prepare_artifact(pvf, cpu_time_start); @@ -200,7 +192,7 @@ pub fn worker_entrypoint( }); } - (result, landlock_status) + result }, Arc::clone(&condvar), WaitOutcome::Finished, @@ -213,20 +205,20 @@ pub fn worker_entrypoint( let _ = cpu_time_monitor_tx.send(()); match prepare_thread.join().unwrap_or_else(|err| { - ( - Err(PrepareError::Panic(stringify_panic_payload(err))), - Ok(LandlockStatus::Unavailable), - ) + Err(PrepareError::Panic(stringify_panic_payload(err))) }) { - (Err(err), _) => { + Err(err) => { // Serialized error will be written into the socket. Err(err) }, - (Ok(ok), landlock_status) => { - #[cfg(not(target_os = "linux"))] - let (artifact, cpu_time_elapsed) = ok; - #[cfg(target_os = "linux")] - let (artifact, cpu_time_elapsed, max_rss) = ok; + Ok(ok) => { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let (artifact, cpu_time_elapsed, max_rss) = ok; + } else { + let (artifact, cpu_time_elapsed) = ok; + } + } // Stop the memory stats worker and get its observed memory stats. #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] @@ -242,16 +234,6 @@ pub fn worker_entrypoint( max_rss: extract_max_rss_stat(max_rss, worker_pid), }; - // Log if landlock threw an error. - if let Err(err) = landlock_status { - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "error enabling landlock: {}", - err - ); - } - // Write the serialized artifact into a temp file. // // PVF host only keeps artifacts statuses in its memory, @@ -300,7 +282,13 @@ pub fn worker_entrypoint( ), }; - send_response(&mut stream, result).await?; + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "worker: sending response to host: {:?}", + result + ); + send_response(&mut stream, result)?; } }, ); diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index dc5921df96884dd8ce0bbb10b8de3c6eca90b1f6..5a1767af75b778394b31d03f226e016006f3d042 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -172,9 +172,10 @@ impl Artifacts { /// /// The recognized artifacts will be filled in the table and unrecognized will be removed. pub async fn new(cache_path: &Path) -> Self { - // Make sure that the cache path directory and all its parents are created. - // First delete the entire cache. Nodes are long-running so this should populate shortly. + // First delete the entire cache. This includes artifacts and any leftover worker dirs (see + // [`WorkerDir`]). Nodes are long-running so this should populate shortly. let _ = tokio::fs::remove_dir_all(cache_path).await; + // Make sure that the cache path directory and all its parents are created. let _ = tokio::fs::create_dir_all(cache_path).await; Self { artifacts: HashMap::new() } @@ -295,7 +296,7 @@ mod tests { #[tokio::test] async fn artifacts_removes_cache_on_startup() { - let fake_cache_path = crate::worker_intf::tmpfile("test-cache").await.unwrap(); + let fake_cache_path = crate::worker_intf::tmppath("test-cache").await.unwrap(); let fake_artifact_path = { let mut p = fake_cache_path.clone(); p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234"); diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index acb260e256931686c15dadb64341e2155b56ee2e..aca604f0de21dd805e10c68c1a8d80c202010e9b 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -30,6 +30,7 @@ use futures::{ stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, }; +use polkadot_node_core_pvf_common::SecurityStatus; use polkadot_primitives::{ExecutorParams, ExecutorParamsHash}; use slotmap::HopSlotMap; use std::{ @@ -139,8 +140,10 @@ struct Queue { // Some variables related to the current session. program_path: PathBuf, + cache_path: PathBuf, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, /// The queue of jobs that are waiting for a worker to pick up. queue: VecDeque<ExecuteJob>, @@ -152,16 +155,20 @@ impl Queue { fn new( metrics: Metrics, program_path: PathBuf, + cache_path: PathBuf, worker_capacity: usize, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, to_queue_rx: mpsc::Receiver<ToQueue>, ) -> Self { Self { metrics, program_path, + cache_path, spawn_timeout, node_version, + security_status, to_queue_rx, queue: VecDeque::new(), mux: Mux::new(), @@ -405,9 +412,11 @@ fn spawn_extra_worker(queue: &mut Queue, job: ExecuteJob) { queue.mux.push( spawn_worker_task( queue.program_path.clone(), + queue.cache_path.clone(), job, queue.spawn_timeout, queue.node_version.clone(), + queue.security_status.clone(), ) .boxed(), ); @@ -423,18 +432,22 @@ fn spawn_extra_worker(queue: &mut Queue, job: ExecuteJob) { /// execute other jobs with a compatible execution environment. async fn spawn_worker_task( program_path: PathBuf, + cache_path: PathBuf, job: ExecuteJob, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, ) -> QueueEvent { use futures_timer::Delay; loop { match super::worker_intf::spawn( &program_path, + &cache_path, job.executor_params.clone(), spawn_timeout, node_version.as_deref(), + security_status.clone(), ) .await { @@ -496,17 +509,21 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { pub fn start( metrics: Metrics, program_path: PathBuf, + cache_path: PathBuf, worker_capacity: usize, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, ) -> (mpsc::Sender<ToQueue>, impl Future<Output = ()>) { let (to_queue_tx, to_queue_rx) = mpsc::channel(20); let run = Queue::new( metrics, program_path, + cache_path, worker_capacity, spawn_timeout, node_version, + security_status, to_queue_rx, ) .run(); diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index d66444a8102d9124188a48e98e5fa8f321a0bb97..783c7c7abbc8c7526819d85e3e06bd1ae6e9209c 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -19,8 +19,8 @@ use crate::{ artifacts::ArtifactPathId, worker_intf::{ - path_to_bytes, spawn_with_program_path, IdleWorker, SpawnErr, WorkerHandle, - JOB_TIMEOUT_WALL_CLOCK_FACTOR, + clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, + SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; @@ -30,7 +30,7 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, execute::{Handshake, Response}, - framed_recv, framed_send, + worker_dir, SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; @@ -38,21 +38,30 @@ use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. -/// Sends a handshake message to the worker as soon as it is spawned. /// -/// The program should be able to handle `<program-path> execute-worker <socket-path>` invocation. +/// Sends a handshake message to the worker as soon as it is spawned. pub async fn spawn( program_path: &Path, + cache_path: &Path, executor_params: ExecutorParams, spawn_timeout: Duration, node_version: Option<&str>, + security_status: SecurityStatus, ) -> Result<(IdleWorker, WorkerHandle), SpawnErr> { let mut extra_args = vec!["execute-worker"]; if let Some(node_version) = node_version { extra_args.extend_from_slice(&["--node-impl-version", node_version]); } - let (mut idle_worker, worker_handle) = - spawn_with_program_path("execute", program_path, &extra_args, spawn_timeout).await?; + + let (mut idle_worker, worker_handle) = spawn_with_program_path( + "execute", + program_path, + cache_path, + &extra_args, + spawn_timeout, + security_status, + ) + .await?; send_handshake(&mut idle_worker.stream, Handshake { executor_params }) .await .map_err(|error| { @@ -104,89 +113,151 @@ pub async fn start_work( execution_timeout: Duration, validation_params: Vec<u8>, ) -> Outcome { - let IdleWorker { mut stream, pid } = worker; + let IdleWorker { mut stream, pid, worker_dir } = worker; gum::debug!( target: LOG_TARGET, worker_pid = %pid, + ?worker_dir, validation_code_hash = ?artifact.id.code_hash, "starting execute for {}", artifact.path.display(), ); - if let Err(error) = - send_request(&mut stream, &artifact.path, &validation_params, execution_timeout).await - { + with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { + if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + ?error, + "failed to send an execute request", + ); + return Outcome::IoErr + } + + // We use a generous timeout here. This is in addition to the one in the child process, in + // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout + // in the child. We want to use CPU time because it varies less than wall clock time under + // load, but the CPU resources of the child can only be measured from the parent after the + // child process terminates. + let timeout = execution_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; + let response = futures::select! { + response = recv_response(&mut stream).fuse() => { + match response { + Err(error) => { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + ?error, + "failed to recv an execute response", + ); + return Outcome::IoErr + }, + Ok(response) => { + if let Response::Ok{duration, ..} = response { + if duration > execution_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "execute job took {}ms cpu time, exceeded execution timeout {}ms.", + duration.as_millis(), + execution_timeout.as_millis(), + ); + + // Return a timeout error. + return Outcome::HardTimeout; + } + } + + response + }, + } + }, + _ = Delay::new(timeout).fuse() => { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + "execution worker exceeded lenient timeout for execution, child worker likely stalled", + ); + Response::TimedOut + }, + }; + + match response { + Response::Ok { result_descriptor, duration } => Outcome::Ok { + result_descriptor, + duration, + idle_worker: IdleWorker { stream, pid, worker_dir }, + }, + Response::InvalidCandidate(err) => Outcome::InvalidCandidate { + err, + idle_worker: IdleWorker { stream, pid, worker_dir }, + }, + Response::TimedOut => Outcome::HardTimeout, + Response::Panic(err) => Outcome::Panic { err }, + Response::InternalError(err) => Outcome::InternalError { err }, + } + }) + .await +} + +/// Create a temporary file for an artifact in the worker cache, execute the given future/closure +/// passing the file path in, and clean up the worker cache. +/// +/// Failure to clean up the worker cache results in an error - leaving any files here could be a +/// security issue, and we should shut down the worker. This should be very rare. +async fn with_worker_dir_setup<F, Fut>( + worker_dir: WorkerDir, + pid: u32, + artifact_path: &Path, + f: F, +) -> Outcome +where + Fut: futures::Future<Output = Outcome>, + F: FnOnce(WorkerDir) -> Fut, +{ + // Cheaply create a hard link to the artifact. The artifact is always at a known location in the + // worker cache, and the child can't access any other artifacts or gain any information from the + // original filename. + let link_path = worker_dir::execute_artifact(&worker_dir.path); + if let Err(err) = tokio::fs::hard_link(artifact_path, link_path).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to send an execute request", + ?worker_dir, + "failed to clear worker cache after the job: {:?}", + err, ); - return Outcome::IoErr + return Outcome::InternalError { + err: InternalValidationError::CouldNotCreateLink(format!("{:?}", err)), + } } - // We use a generous timeout here. This is in addition to the one in the child process, in - // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout - // in the child. We want to use CPU time because it varies less than wall clock time under - // load, but the CPU resources of the child can only be measured from the parent after the - // child process terminates. - let timeout = execution_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; - let response = futures::select! { - response = recv_response(&mut stream).fuse() => { - match response { - Err(error) => { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to recv an execute response", - ); - return Outcome::IoErr - }, - Ok(response) => { - if let Response::Ok{duration, ..} = response { - if duration > execution_timeout { - // The job didn't complete within the timeout. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "execute job took {}ms cpu time, exceeded execution timeout {}ms.", - duration.as_millis(), - execution_timeout.as_millis(), - ); - - // Return a timeout error. - return Outcome::HardTimeout; - } - } + let worker_dir_path = worker_dir.path.clone(); + let outcome = f(worker_dir).await; - response - }, - } - }, - _ = Delay::new(timeout).fuse() => { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - "execution worker exceeded lenient timeout for execution, child worker likely stalled", - ); - Response::TimedOut - }, - }; - - match response { - Response::Ok { result_descriptor, duration } => - Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid } }, - Response::InvalidCandidate(err) => - Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid } }, - Response::TimedOut => Outcome::HardTimeout, - Response::Panic(err) => Outcome::Panic { err }, - Response::InternalError(err) => Outcome::InternalError { err }, + // Try to clear the worker dir. + if let Err(err) = clear_worker_dir_path(&worker_dir_path) { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + ?worker_dir_path, + "failed to clear worker cache after the job: {:?}", + err, + ); + return Outcome::InternalError { + err: InternalValidationError::CouldNotClearWorkerDir { + err: format!("{:?}", err), + path: worker_dir_path.to_str().map(String::from), + }, + } } + + outcome } async fn send_handshake(stream: &mut UnixStream, handshake: Handshake) -> io::Result<()> { @@ -195,11 +266,9 @@ async fn send_handshake(stream: &mut UnixStream, handshake: Handshake) -> io::Re async fn send_request( stream: &mut UnixStream, - artifact_path: &Path, validation_params: &[u8], execution_timeout: Duration, ) -> io::Result<()> { - framed_send(stream, path_to_bytes(artifact_path)).await?; framed_send(stream, validation_params).await?; framed_send(stream, &execution_timeout.encode()).await } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 5290b2760f423403d9fa9cf334f735e253952e19..81695829122b9d3ba03307f1c597ac5877dc7023 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -34,6 +34,7 @@ use futures::{ use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, + SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ @@ -202,8 +203,13 @@ impl Config { pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future<Output = ()>) { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); - // Run checks for supported security features once per host startup. - warn_if_no_landlock(); + // Run checks for supported security features once per host startup. Warn here if not enabled. + let security_status = { + let can_enable_landlock = check_landlock(&config.prepare_worker_program_path); + let can_unshare_user_namespace_and_change_root = + check_can_unshare_user_namespace_and_change_root(&config.prepare_worker_program_path); + SecurityStatus { can_enable_landlock, can_unshare_user_namespace_and_change_root } + }; let (to_host_tx, to_host_rx) = mpsc::channel(10); @@ -215,6 +221,7 @@ pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future<O config.cache_path.clone(), config.prepare_worker_spawn_timeout, config.node_version.clone(), + security_status.clone(), ); let (to_prepare_queue_tx, from_prepare_queue_rx, run_prepare_queue) = prepare::start_queue( @@ -229,9 +236,11 @@ pub fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future<O let (to_execute_queue_tx, run_execute_queue) = execute::start( metrics, config.execute_worker_program_path.to_owned(), + config.cache_path.clone(), config.execute_workers_max_num, config.execute_worker_spawn_timeout, config.node_version, + security_status, ); let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(100); @@ -873,28 +882,103 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream<Item = ()> .map(|_| ()) } -/// Check if landlock is supported and emit a warning if not. -fn warn_if_no_landlock() { - #[cfg(target_os = "linux")] - { - use polkadot_node_core_pvf_common::worker::security::landlock; - let status = landlock::get_status(); - if !landlock::status_is_fully_enabled(&status) { - let abi = landlock::LANDLOCK_ABI as u8; +/// Check if we can sandbox the root and emit a warning if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +fn check_can_unshare_user_namespace_and_change_root( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> bool { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let output = std::process::Command::new(prepare_worker_program_path) + .arg("--check-can-unshare-user-namespace-and-change-root") + .output(); + + match output { + Ok(output) if output.status.success() => true, + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + // Docs say to always print status using `Display` implementation. + status = %output.status, + %stderr, + "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." + ); + false + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + "Could not start child process: {}", + err + ); + false + }, + } + } else { gum::warn!( target: LOG_TARGET, - ?status, - %abi, - "Cannot fully enable landlock, a Linux kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." + "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." ); + false } } +} - #[cfg(not(target_os = "linux"))] - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); +/// Check if landlock is supported and emit a warning if not. +/// +/// We do this check by spawning a new process and trying to sandbox it. To get as close as possible +/// to running the check in a worker, we try it... in a worker. The expected return status is 0 on +/// success and -1 on failure. +fn check_landlock( + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + prepare_worker_program_path: &Path, +) -> bool { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + match std::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-landlock") + .status() + { + Ok(status) if status.success() => true, + Ok(status) => { + let abi = + polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + ?status, + %abi, + "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." + ); + false + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?prepare_worker_program_path, + "Could not start child process: {}", + err + ); + false + }, + } + } else { + gum::warn!( + target: LOG_TARGET, + "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." + ); + false + } + } } #[cfg(test)] diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 0e4f2444adf72ce01c4444024f589e79653d0e86..1b8d83773813a9619321607eb49ddf83eebd28ed 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -111,6 +111,7 @@ pub use polkadot_node_core_pvf_common::{ error::{InternalValidationError, PrepareError}, prepare::{PrepareJobKind, PrepareStats}, pvf::PvfPrepData, + SecurityStatus, }; /// The log target for this crate. diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 92aa4896c263ce5c4e6aac507ed2eee61a3cdd3b..7933b0319a6f9fcd9fe83cad9f47950827a6ecc9 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -27,6 +27,7 @@ use futures::{ use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, + SecurityStatus, }; use slotmap::HopSlotMap; use std::{ @@ -110,10 +111,12 @@ enum PoolEvent { type Mux = FuturesUnordered<BoxFuture<'static, PoolEvent>>; struct Pool { + // Some variables related to the current session. program_path: PathBuf, cache_path: PathBuf, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, to_pool: mpsc::Receiver<ToPool>, from_pool: mpsc::UnboundedSender<FromPool>, @@ -132,6 +135,7 @@ async fn run( cache_path, spawn_timeout, node_version, + security_status, to_pool, mut from_pool, mut spawned, @@ -160,6 +164,7 @@ async fn run( &cache_path, spawn_timeout, node_version.clone(), + security_status.clone(), &mut spawned, &mut mux, to_pool, @@ -207,6 +212,7 @@ fn handle_to_pool( cache_path: &Path, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, spawned: &mut HopSlotMap<Worker, WorkerData>, mux: &mut Mux, to_pool: ToPool, @@ -216,7 +222,14 @@ fn handle_to_pool( gum::debug!(target: LOG_TARGET, "spawning a new prepare worker"); metrics.prepare_worker().on_begin_spawn(); mux.push( - spawn_worker_task(program_path.to_owned(), spawn_timeout, node_version).boxed(), + spawn_worker_task( + program_path.to_owned(), + cache_path.to_owned(), + spawn_timeout, + node_version, + security_status, + ) + .boxed(), ); }, ToPool::StartWork { worker, pvf, artifact_path } => { @@ -229,7 +242,6 @@ fn handle_to_pool( worker, idle, pvf, - cache_path.to_owned(), artifact_path, preparation_timer, ) @@ -258,13 +270,23 @@ fn handle_to_pool( async fn spawn_worker_task( program_path: PathBuf, + cache_path: PathBuf, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, ) -> PoolEvent { use futures_timer::Delay; loop { - match worker_intf::spawn(&program_path, spawn_timeout, node_version.as_deref()).await { + match worker_intf::spawn( + &program_path, + &cache_path, + spawn_timeout, + node_version.as_deref(), + security_status.clone(), + ) + .await + { Ok((idle, handle)) => break PoolEvent::Spawn(idle, handle), Err(err) => { gum::warn!(target: LOG_TARGET, "failed to spawn a prepare worker: {:?}", err); @@ -281,11 +303,10 @@ async fn start_work_task<Timer>( worker: Worker, idle: IdleWorker, pvf: PvfPrepData, - cache_path: PathBuf, artifact_path: PathBuf, _preparation_timer: Option<Timer>, ) -> PoolEvent { - let outcome = worker_intf::start_work(&metrics, idle, pvf, &cache_path, artifact_path).await; + let outcome = worker_intf::start_work(&metrics, idle, pvf, artifact_path).await; PoolEvent::StartWork(worker, outcome) } @@ -322,14 +343,29 @@ fn handle_mux( ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFileErr { worker: idle, result: _, err } => + Outcome::RenameTmpFileErr { worker: idle, result: _, err, src, dest } => handle_concluded_no_rip( from_pool, spawned, worker, idle, - Err(PrepareError::RenameTmpFileErr(err)), + Err(PrepareError::RenameTmpFileErr { err, src, dest }), ), + // Could not clear worker cache. Kill the worker so other jobs can't see the data. + Outcome::ClearWorkerDir { err } => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::ClearWorkerDir(err)), + }, + )?; + } + + Ok(()) + }, Outcome::Unreachable => { if attempt_retire(metrics, spawned, worker) { reply(from_pool, FromPool::Rip(worker))?; @@ -434,6 +470,7 @@ pub fn start( cache_path: PathBuf, spawn_timeout: Duration, node_version: Option<String>, + security_status: SecurityStatus, ) -> (mpsc::Sender<ToPool>, mpsc::UnboundedReceiver<FromPool>, impl Future<Output = ()>) { let (to_pool_tx, to_pool_rx) = mpsc::channel(10); let (from_pool_tx, from_pool_rx) = mpsc::unbounded(); @@ -444,6 +481,7 @@ pub fn start( cache_path, spawn_timeout, node_version, + security_status, to_pool: to_pool_rx, from_pool: from_pool_tx, spawned: HopSlotMap::with_capacity_and_key(20), diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index 5280ab6b42a235b441fe9d9e25bc415b46ecfa2f..b66c3604434363cdffefaaddf9091397c25b49a0 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -19,17 +19,17 @@ use crate::{ metrics::Metrics, worker_intf::{ - path_to_bytes, spawn_with_program_path, tmpfile_in, IdleWorker, SpawnErr, WorkerHandle, - JOB_TIMEOUT_WALL_CLOCK_FACTOR, + clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, + SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, LOG_TARGET, }; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, - framed_recv, framed_send, prepare::PrepareStats, pvf::PvfPrepData, + worker_dir, SecurityStatus, }; use sp_core::hexdisplay::HexDisplay; @@ -41,19 +41,33 @@ use tokio::{io, net::UnixStream}; /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. /// -/// The program should be able to handle `<program-path> prepare-worker <socket-path>` invocation. +/// Sends a handshake message to the worker as soon as it is spawned. pub async fn spawn( program_path: &Path, + cache_path: &Path, spawn_timeout: Duration, node_version: Option<&str>, + security_status: SecurityStatus, ) -> Result<(IdleWorker, WorkerHandle), SpawnErr> { let mut extra_args = vec!["prepare-worker"]; if let Some(node_version) = node_version { extra_args.extend_from_slice(&["--node-impl-version", node_version]); } - spawn_with_program_path("prepare", program_path, &extra_args, spawn_timeout).await + + spawn_with_program_path( + "prepare", + program_path, + cache_path, + &extra_args, + spawn_timeout, + security_status, + ) + .await } +/// Outcome of PVF preparation. +/// +/// If the idle worker token is not returned, it means the worker must be terminated. pub enum Outcome { /// The worker has finished the work assigned to it. Concluded { worker: IdleWorker, result: PrepareResult }, @@ -62,9 +76,19 @@ pub enum Outcome { Unreachable, /// The temporary file for the artifact could not be created at the given cache path. CreateTmpFileErr { worker: IdleWorker, err: String }, - /// The response from the worker is received, but the file cannot be renamed (moved) to the + /// The response from the worker is received, but the tmp file cannot be renamed (moved) to the /// final destination location. - RenameTmpFileErr { worker: IdleWorker, result: PrepareResult, err: String }, + RenameTmpFileErr { + worker: IdleWorker, + result: PrepareResult, + err: String, + // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible + // conversion to `Option<String>`. + src: Option<String>, + dest: Option<String>, + }, + /// The worker cache could not be cleared for the given reason. + ClearWorkerDir { err: String }, /// The worker failed to finish the job until the given deadline. /// /// The worker is no longer usable and should be killed. @@ -84,83 +108,88 @@ pub async fn start_work( metrics: &Metrics, worker: IdleWorker, pvf: PvfPrepData, - cache_path: &Path, artifact_path: PathBuf, ) -> Outcome { - let IdleWorker { stream, pid } = worker; + let IdleWorker { stream, pid, worker_dir } = worker; gum::debug!( target: LOG_TARGET, worker_pid = %pid, + ?worker_dir, "starting prepare for {}", artifact_path.display(), ); - with_tmp_file(stream, pid, cache_path, |tmp_file, mut stream| async move { - let preparation_timeout = pvf.prep_timeout(); - if let Err(err) = send_request(&mut stream, pvf, &tmp_file).await { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "failed to send a prepare request: {:?}", - err, - ); - return Outcome::Unreachable - } - - // Wait for the result from the worker, keeping in mind that there may be a timeout, the - // worker may get killed, or something along these lines. In that case we should propagate - // the error to the pool. - // - // We use a generous timeout here. This is in addition to the one in the child process, in - // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout - // in the child. We want to use CPU time because it varies less than wall clock time under - // load, but the CPU resources of the child can only be measured from the parent after the - // child process terminates. - let timeout = preparation_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; - let result = tokio::time::timeout(timeout, recv_response(&mut stream, pid)).await; - - match result { - // Received bytes from worker within the time limit. - Ok(Ok(prepare_result)) => - handle_response( - metrics, - IdleWorker { stream, pid }, - prepare_result, - pid, - tmp_file, - artifact_path, - preparation_timeout, - ) - .await, - Ok(Err(err)) => { - // Communication error within the time limit. + with_worker_dir_setup( + worker_dir, + stream, + pid, + |tmp_artifact_file, mut stream, worker_dir| async move { + let preparation_timeout = pvf.prep_timeout(); + if let Err(err) = send_request(&mut stream, pvf).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, - "failed to recv a prepare response: {:?}", + "failed to send a prepare request: {:?}", err, ); - Outcome::IoErr(err.to_string()) - }, - Err(_) => { - // Timed out here on the host. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "did not recv a prepare response within the time limit", - ); - Outcome::TimedOut - }, - } - }) + return Outcome::Unreachable + } + + // Wait for the result from the worker, keeping in mind that there may be a timeout, the + // worker may get killed, or something along these lines. In that case we should + // propagate the error to the pool. + // + // We use a generous timeout here. This is in addition to the one in the child process, + // in case the child stalls. We have a wall clock timeout here in the host, but a CPU + // timeout in the child. We want to use CPU time because it varies less than wall clock + // time under load, but the CPU resources of the child can only be measured from the + // parent after the child process terminates. + let timeout = preparation_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; + let result = tokio::time::timeout(timeout, recv_response(&mut stream, pid)).await; + + match result { + // Received bytes from worker within the time limit. + Ok(Ok(prepare_result)) => + handle_response( + metrics, + IdleWorker { stream, pid, worker_dir }, + prepare_result, + pid, + tmp_artifact_file, + artifact_path, + preparation_timeout, + ) + .await, + Ok(Err(err)) => { + // Communication error within the time limit. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "failed to recv a prepare response: {:?}", + err, + ); + Outcome::IoErr(err.to_string()) + }, + Err(_) => { + // Timed out here on the host. + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + "did not recv a prepare response within the time limit", + ); + Outcome::TimedOut + }, + } + }, + ) .await } /// Handles the case where we successfully received response bytes on the host from the child. /// -/// NOTE: Here we know the artifact exists, but is still located in a temporary file which will be -/// cleared by `with_tmp_file`. +/// Here we know the artifact exists, but is still located in a temporary file which will be cleared +/// by [`with_worker_dir_setup`]. async fn handle_response( metrics: &Metrics, worker: IdleWorker, @@ -209,7 +238,13 @@ async fn handle_response( artifact_path.display(), err, ); - Outcome::RenameTmpFileErr { worker, result, err: format!("{:?}", err) } + Outcome::RenameTmpFileErr { + worker, + result, + err: format!("{:?}", err), + src: tmp_file.to_str().map(String::from), + dest: artifact_path.to_str().map(String::from), + } }, }; @@ -220,61 +255,58 @@ async fn handle_response( outcome } -/// Create a temporary file for an artifact at the given cache path and execute the given -/// future/closure passing the file path in. +/// Create a temporary file for an artifact in the worker cache, execute the given future/closure +/// passing the file path in, and clean up the worker cache. /// -/// The function will try best effort to not leave behind the temporary file. -async fn with_tmp_file<F, Fut>(stream: UnixStream, pid: u32, cache_path: &Path, f: F) -> Outcome +/// Failure to clean up the worker cache results in an error - leaving any files here could be a +/// security issue, and we should shut down the worker. This should be very rare. +async fn with_worker_dir_setup<F, Fut>( + worker_dir: WorkerDir, + stream: UnixStream, + pid: u32, + f: F, +) -> Outcome where Fut: futures::Future<Output = Outcome>, - F: FnOnce(PathBuf, UnixStream) -> Fut, + F: FnOnce(PathBuf, UnixStream, WorkerDir) -> Fut, { - let tmp_file = match tmpfile_in("prepare-artifact-", cache_path).await { - Ok(f) => f, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "failed to create a temp file for the artifact: {:?}", - err, - ); - return Outcome::CreateTmpFileErr { - worker: IdleWorker { stream, pid }, - err: format!("{:?}", err), - } - }, + // Create the tmp file here so that the child doesn't need any file creation rights. This will + // be cleared at the end of this function. + let tmp_file = worker_dir::prepare_tmp_artifact(&worker_dir.path); + if let Err(err) = tokio::fs::File::create(&tmp_file).await { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + ?worker_dir, + "failed to create a temp file for the artifact: {:?}", + err, + ); + return Outcome::CreateTmpFileErr { + worker: IdleWorker { stream, pid, worker_dir }, + err: format!("{:?}", err), + } }; - let outcome = f(tmp_file.clone(), stream).await; + let worker_dir_path = worker_dir.path.clone(); + let outcome = f(tmp_file, stream, worker_dir).await; - // The function called above is expected to move `tmp_file` to a new location upon success. - // However, the function may as well fail and in that case we should remove the tmp file here. - // - // In any case, we try to remove the file here so that there are no leftovers. We only report - // errors that are different from the `NotFound`. - match tokio::fs::remove_file(tmp_file).await { - Ok(()) => (), - Err(err) if err.kind() == std::io::ErrorKind::NotFound => (), - Err(err) => { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "failed to remove the tmp file: {:?}", - err, - ); - }, + // Try to clear the worker dir. + if let Err(err) = clear_worker_dir_path(&worker_dir_path) { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + ?worker_dir_path, + "failed to clear worker cache after the job: {:?}", + err, + ); + return Outcome::ClearWorkerDir { err: format!("{:?}", err) } } outcome } -async fn send_request( - stream: &mut UnixStream, - pvf: PvfPrepData, - tmp_file: &Path, -) -> io::Result<()> { +async fn send_request(stream: &mut UnixStream, pvf: PvfPrepData) -> io::Result<()> { framed_send(stream, &pvf.encode()).await?; - framed_send(stream, path_to_bytes(tmp_file)).await?; Ok(()) } diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs index 795ad4524443bcf1032a40d5931520c110219055..9825506ba88f67fa611dc3d9988e7a532aadee3f 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_intf.rs @@ -20,6 +20,7 @@ use crate::LOG_TARGET; use futures::FutureExt as _; use futures_timer::Delay; use pin_project::pin_project; +use polkadot_node_core_pvf_common::{worker_dir, SecurityStatus}; use rand::Rng; use std::{ fmt, mem, @@ -39,99 +40,106 @@ use tokio::{ pub const JOB_TIMEOUT_WALL_CLOCK_FACTOR: u32 = 4; /// This is publicly exposed only for integration tests. +/// +/// # Parameters +/// +/// - `debug_id`: An identifier for the process (e.g. "execute" or "prepare"). +/// +/// - `program_path`: The path to the program. +/// +/// - `cache_path`: The path to the artifact cache. +/// +/// - `extra_args`: Optional extra CLI arguments to the program. NOTE: Should only contain data +/// required before the handshake, like node/worker versions for the version check. Other data +/// should go through the handshake. +/// +/// - `spawn_timeout`: The amount of time to wait for the child process to spawn. +/// +/// - `security_status`: contains the detected status of security features. #[doc(hidden)] pub async fn spawn_with_program_path( debug_id: &'static str, program_path: impl Into<PathBuf>, + cache_path: &Path, extra_args: &[&str], spawn_timeout: Duration, + security_status: SecurityStatus, ) -> Result<(IdleWorker, WorkerHandle), SpawnErr> { let program_path = program_path.into(); - with_transient_socket_path(debug_id, |socket_path| { - let socket_path = socket_path.to_owned(); - let extra_args: Vec<String> = extra_args.iter().map(|arg| arg.to_string()).collect(); - - async move { - let listener = UnixListener::bind(&socket_path).map_err(|err| { + let worker_dir = WorkerDir::new(debug_id, cache_path).await?; + let socket_path = worker_dir::socket(&worker_dir.path); + + let extra_args: Vec<String> = extra_args.iter().map(|arg| arg.to_string()).collect(); + + let listener = UnixListener::bind(&socket_path).map_err(|err| { + gum::warn!( + target: LOG_TARGET, + %debug_id, + ?program_path, + ?extra_args, + ?worker_dir, + ?socket_path, + "cannot bind unix socket: {:?}", + err, + ); + SpawnErr::Bind + })?; + + let handle = WorkerHandle::spawn(&program_path, &extra_args, &worker_dir.path, security_status) + .map_err(|err| { + gum::warn!( + target: LOG_TARGET, + %debug_id, + ?program_path, + ?extra_args, + ?worker_dir.path, + ?socket_path, + "cannot spawn a worker: {:?}", + err, + ); + SpawnErr::ProcessSpawn + })?; + + let worker_dir_path = worker_dir.path.clone(); + futures::select! { + accept_result = listener.accept().fuse() => { + let (stream, _) = accept_result.map_err(|err| { gum::warn!( target: LOG_TARGET, %debug_id, ?program_path, ?extra_args, - "cannot bind unix socket: {:?}", + ?worker_dir_path, + ?socket_path, + "cannot accept a worker: {:?}", err, ); - SpawnErr::Bind + SpawnErr::Accept })?; - - let handle = - WorkerHandle::spawn(&program_path, &extra_args, socket_path).map_err(|err| { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - "cannot spawn a worker: {:?}", - err, - ); - SpawnErr::ProcessSpawn - })?; - - futures::select! { - accept_result = listener.accept().fuse() => { - let (stream, _) = accept_result.map_err(|err| { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - "cannot accept a worker: {:?}", - err, - ); - SpawnErr::Accept - })?; - Ok((IdleWorker { stream, pid: handle.id() }, handle)) - } - _ = Delay::new(spawn_timeout).fuse() => { - gum::warn!( - target: LOG_TARGET, - %debug_id, - ?program_path, - ?extra_args, - ?spawn_timeout, - "spawning and connecting to socket timed out", - ); - Err(SpawnErr::AcceptTimeout) - } - } + Ok((IdleWorker { stream, pid: handle.id(), worker_dir }, handle)) } - }) - .await -} - -async fn with_transient_socket_path<T, F, Fut>(debug_id: &'static str, f: F) -> Result<T, SpawnErr> -where - F: FnOnce(&Path) -> Fut, - Fut: futures::Future<Output = Result<T, SpawnErr>> + 'static, -{ - let socket_path = tmpfile(&format!("pvf-host-{}", debug_id)) - .await - .map_err(|_| SpawnErr::TmpFile)?; - let result = f(&socket_path).await; - - // Best effort to remove the socket file. Under normal circumstances the socket will be removed - // by the worker. We make sure that it is removed here, just in case a failed rendezvous. - let _ = tokio::fs::remove_file(socket_path).await; - - result + _ = Delay::new(spawn_timeout).fuse() => { + gum::warn!( + target: LOG_TARGET, + %debug_id, + ?program_path, + ?extra_args, + ?worker_dir_path, + ?socket_path, + ?spawn_timeout, + "spawning and connecting to socket timed out", + ); + Err(SpawnErr::AcceptTimeout) + } + } } -/// Returns a path under the given `dir`. The file name will start with the given prefix. +/// Returns a path under the given `dir`. The path name will start with the given prefix. /// /// There is only a certain number of retries. If exceeded this function will give up and return an /// error. -pub async fn tmpfile_in(prefix: &str, dir: &Path) -> io::Result<PathBuf> { - fn tmppath(prefix: &str, dir: &Path) -> PathBuf { +pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result<PathBuf> { + fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { use rand::distributions::Alphanumeric; const DESCRIMINATOR_LEN: usize = 10; @@ -143,27 +151,28 @@ pub async fn tmpfile_in(prefix: &str, dir: &Path) -> io::Result<PathBuf> { let s = std::str::from_utf8(&buf) .expect("the string is collected from a valid utf-8 sequence; qed"); - let mut file = dir.to_owned(); - file.push(s); - file + let mut path = dir.to_owned(); + path.push(s); + path } const NUM_RETRIES: usize = 50; for _ in 0..NUM_RETRIES { - let candidate_path = tmppath(prefix, dir); - if !candidate_path.exists() { - return Ok(candidate_path) + let tmp_path = make_tmppath(prefix, dir); + if !tmp_path.exists() { + return Ok(tmp_path) } } - Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary file")) + Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) } -/// The same as [`tmpfile_in`], but uses [`std::env::temp_dir`] as the directory. -pub async fn tmpfile(prefix: &str) -> io::Result<PathBuf> { +/// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory. +#[cfg(test)] +pub async fn tmppath(prefix: &str) -> io::Result<PathBuf> { let temp_dir = PathBuf::from(std::env::temp_dir()); - tmpfile_in(prefix, &temp_dir).await + tmppath_in(prefix, &temp_dir).await } /// A struct that represents an idle worker. @@ -177,13 +186,19 @@ pub struct IdleWorker { /// The identifier of this process. Used to reset the niceness. pub pid: u32, + + /// The temporary per-worker path. We clean up the worker dir between jobs and delete it when + /// the worker dies. + pub worker_dir: WorkerDir, } /// An error happened during spawning a worker process. #[derive(Clone, Debug)] pub enum SpawnErr { - /// Cannot obtain a temporary file location. - TmpFile, + /// Cannot obtain a temporary path location. + TmpPath, + /// An FS error occurred. + Fs(String), /// Cannot bind the socket to the given path. Bind, /// An error happened during accepting a connection to the socket. @@ -219,12 +234,32 @@ impl WorkerHandle { fn spawn( program: impl AsRef<Path>, extra_args: &[String], - socket_path: impl AsRef<Path>, + worker_dir_path: impl AsRef<Path>, + security_status: SecurityStatus, ) -> io::Result<Self> { - let mut child = process::Command::new(program.as_ref()) + let security_args = { + let mut args = vec![]; + if security_status.can_enable_landlock { + args.push("--can-enable-landlock".to_string()); + } + if security_status.can_unshare_user_namespace_and_change_root { + args.push("--can-unshare-user-namespace-and-change-root".to_string()); + } + args + }; + + // Clear all env vars from the spawned process. + let mut command = process::Command::new(program.as_ref()); + command.env_clear(); + // Add back any env vars we want to keep. + if let Ok(value) = std::env::var("RUST_LOG") { + command.env("RUST_LOG", value); + } + let mut child = command .args(extra_args) - .arg("--socket-path") - .arg(socket_path.as_ref().as_os_str()) + .arg("--worker-dir-path") + .arg(worker_dir_path.as_ref().as_os_str()) + .args(&security_args) .stdout(std::process::Stdio::piped()) .kill_on_drop(true) .spawn()?; @@ -306,16 +341,6 @@ impl fmt::Debug for WorkerHandle { } } -/// Convert the given path into a byte buffer. -pub fn path_to_bytes(path: &Path) -> &[u8] { - // Ideally, we take the `OsStr` of the path, send that and reconstruct this on the other side. - // However, libstd doesn't provide us with such an option. There are crates out there that - // allow for extraction of a path, but TBH it doesn't seem to be a real issue. - // - // However, should be there reports we can incorporate such a crate here. - path.to_str().expect("non-UTF-8 path").as_bytes() -} - /// Write some data prefixed by its length into `w`. pub async fn framed_send(w: &mut (impl AsyncWrite + Unpin), buf: &[u8]) -> io::Result<()> { let len_buf = buf.len().to_le_bytes(); @@ -333,3 +358,84 @@ pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result<Vec<u8> r.read_exact(&mut buf).await?; Ok(buf) } + +/// A temporary worker dir that contains only files needed by the worker. The worker will change its +/// root (the `/` directory) to this directory; it should have access to no other paths on its +/// filesystem. +/// +/// NOTE: This struct cleans up its associated directory when it is dropped. Therefore it should not +/// implement `Clone`. +/// +/// # File structure +/// +/// The overall file structure for the PVF system is as follows. The `worker-dir-X`s are managed by +/// this struct. +/// +/// ```nocompile +/// + /<cache_path>/ +/// - artifact-1 +/// - artifact-2 +/// - [...] +/// - worker-dir-1/ (new `/` for worker-1) +/// + socket (created by host) +/// + tmp-artifact (created by host) (prepare-only) +/// + artifact (link -> artifact-1) (created by host) (execute-only) +/// - worker-dir-2/ (new `/` for worker-2) +/// + [...] +/// ``` +#[derive(Debug)] +pub struct WorkerDir { + pub path: PathBuf, +} + +impl WorkerDir { + /// Creates a new, empty worker dir with a random name in the given cache dir. + pub async fn new(debug_id: &'static str, cache_dir: &Path) -> Result<Self, SpawnErr> { + let prefix = format!("worker-dir-{}-", debug_id); + let path = tmppath_in(&prefix, cache_dir).await.map_err(|_| SpawnErr::TmpPath)?; + tokio::fs::create_dir(&path) + .await + .map_err(|err| SpawnErr::Fs(err.to_string()))?; + Ok(Self { path }) + } +} + +// Try to clean up the temporary worker dir at the end of the worker's lifetime. It should be wiped +// on startup, but we make a best effort not to leave it around. +impl Drop for WorkerDir { + fn drop(&mut self) { + let _ = std::fs::remove_dir_all(&self.path); + } +} + +// Not async since Rust has trouble with async recursion. There should be few files here anyway. +// +// TODO: A lingering malicious job can still access future files in this dir. See +// <https://github.com/paritytech/polkadot-sdk/issues/574> for how to fully secure this. +/// Clear the temporary worker dir without deleting it. Not deleting is important because the worker +/// has mounted its own separate filesystem here. +/// +/// Should be called right after a job has finished. We don't want jobs to have access to +/// artifacts from previous jobs. +pub fn clear_worker_dir_path(worker_dir_path: &Path) -> io::Result<()> { + fn remove_dir_contents(path: &Path) -> io::Result<()> { + for entry in std::fs::read_dir(&path)? { + let entry = entry?; + let path = entry.path(); + + if entry.file_type()?.is_dir() { + remove_dir_contents(&path)?; + std::fs::remove_dir(path)?; + } else { + std::fs::remove_file(path)?; + } + } + Ok(()) + } + + // Note the worker dir may not exist anymore because of the worker dying and being cleaned up. + match remove_dir_contents(worker_dir_path) { + Err(err) if matches!(err.kind(), io::ErrorKind::NotFound) => Ok(()), + result => result, + } +} diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index bad7a66054c9e885c6693f2652e1d6235b7324f4..8bdd09db208a92f5510f0c60856bab6a1c3103e0 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -100,7 +100,7 @@ async fn execute_bad_block_on_parent() { let host = TestHost::new(); - let _ret = host + let _err = host .validate_candidate( adder::wasm_binary_unwrap(), ValidationParams { @@ -145,3 +145,37 @@ async fn stress_spawn() { futures::future::join_all((0..100).map(|_| execute(host.clone()))).await; } + +// With one worker, run multiple execution jobs serially. They should not conflict. +#[tokio::test] +async fn execute_can_run_serially() { + let host = std::sync::Arc::new(TestHost::new_with_config(|cfg| { + cfg.execute_workers_max_num = 1; + })); + + async fn execute(host: std::sync::Arc<TestHost>) { + let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; + let block_data = BlockData { state: 0, add: 512 }; + let ret = host + .validate_candidate( + adder::wasm_binary_unwrap(), + ValidationParams { + parent_head: GenericHeadData(parent_head.encode()), + block_data: GenericBlockData(block_data.encode()), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ) + .await + .unwrap(); + + let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); + + assert_eq!(new_head.number, 1); + assert_eq!(new_head.parent_hash, parent_head.hash()); + assert_eq!(new_head.post_state, hash_state(512)); + } + + futures::future::join_all((0..5).map(|_| execute(host.clone()))).await; +} diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index dc8f00098ec5ebe55fa319116d23b308c094ffd9..f699b5840d8faa7e0a51e859b735caed02f3e621 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -18,8 +18,8 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; use polkadot_node_core_pvf::{ - start, Config, InvalidCandidate, Metrics, PrepareJobKind, PvfPrepData, ValidationError, - ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, + start, Config, InvalidCandidate, Metrics, PrepareError, PrepareJobKind, PrepareStats, + PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::ExecutorParams; @@ -70,6 +70,33 @@ impl TestHost { Self { cache_dir, host: Mutex::new(host) } } + async fn precheck_pvf( + &self, + code: &[u8], + executor_params: ExecutorParams, + ) -> Result<PrepareStats, PrepareError> { + let (result_tx, result_rx) = futures::channel::oneshot::channel(); + + let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) + .expect("Compression works"); + + self.host + .lock() + .await + .precheck_pvf( + PvfPrepData::from_code( + code.into(), + executor_params, + TEST_PREPARATION_TIMEOUT, + PrepareJobKind::Prechecking, + ), + result_tx, + ) + .await + .unwrap(); + result_rx.await.unwrap() + } + async fn validate_candidate( &self, code: &[u8], @@ -291,8 +318,12 @@ async fn deleting_prepared_artifact_does_not_dispute() { { // Get the artifact path (asserting it exists). let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); - assert_eq!(cache_dir.len(), 1); - let artifact_path = cache_dir.pop().unwrap().unwrap(); + // Should contain the artifact and the worker dir. + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } // Delete the artifact. std::fs::remove_file(artifact_path.path()).unwrap(); @@ -317,3 +348,19 @@ async fn deleting_prepared_artifact_does_not_dispute() { r => panic!("{:?}", r), } } + +// With one worker, run multiple preparation jobs serially. They should not conflict. +#[tokio::test] +async fn prepare_can_run_serially() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }); + + let _stats = host + .precheck_pvf(::adder::wasm_binary_unwrap(), Default::default()) + .await + .unwrap(); + + // Prepare a different wasm blob to prevent skipping work. + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); +} diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index 875ae79af09732fd14ebabbc1c711549c43f46fa..5379d29556c76aff3a489d7d0a967d4b6a503637 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. -use polkadot_node_core_pvf::testing::{spawn_with_program_path, SpawnErr}; -use std::time::Duration; +use polkadot_node_core_pvf::{ + testing::{spawn_with_program_path, SpawnErr}, + SecurityStatus, +}; +use std::{env, time::Duration}; fn worker_path(name: &str) -> std::path::PathBuf { let mut worker_path = std::env::current_exe().unwrap(); @@ -33,8 +36,10 @@ async fn spawn_immediate_exit() { let result = spawn_with_program_path( "integration-test", worker_path("polkadot-prepare-worker"), + &env::temp_dir(), &["exit"], Duration::from_secs(2), + SecurityStatus::default(), ) .await; assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); @@ -45,8 +50,10 @@ async fn spawn_timeout() { let result = spawn_with_program_path( "integration-test", worker_path("polkadot-execute-worker"), + &env::temp_dir(), &["test-sleep"], Duration::from_secs(2), + SecurityStatus::default(), ) .await; assert!(matches!(result, Err(SpawnErr::AcceptTimeout))); @@ -57,8 +64,10 @@ async fn should_connect() { let _ = spawn_with_program_path( "integration-test", worker_path("polkadot-prepare-worker"), + &env::temp_dir(), &["prepare-worker"], Duration::from_secs(2), + SecurityStatus::default(), ) .await .unwrap(); diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 7f41d74e616c6152038f6ac58d8189c5d722fab3..6cf7fa744d3f917ac999545725b0e4f67aa68589 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,12 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -61,14 +61,11 @@ pub(crate) struct RequestResultCache { LruMap<(Hash, ParaId, OccupiedCoreAssumption), Option<ValidationCodeHash>>, version: LruMap<Hash, u32>, disputes: LruMap<Hash, Vec<(SessionIndex, CandidateHash, DisputeState<BlockNumber>)>>, - unapplied_slashes: - LruMap<Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>>, - key_ownership_proof: - LruMap<(Hash, ValidatorId), Option<vstaging::slashing::OpaqueKeyOwnershipProof>>, + unapplied_slashes: LruMap<Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>>, + key_ownership_proof: LruMap<(Hash, ValidatorId), Option<slashing::OpaqueKeyOwnershipProof>>, minimum_backing_votes: LruMap<SessionIndex, u32>, - - staging_para_backing_state: LruMap<(Hash, ParaId), Option<vstaging::BackingState>>, - staging_async_backing_params: LruMap<Hash, vstaging::AsyncBackingParams>, + para_backing_state: LruMap<(Hash, ParaId), Option<async_backing::BackingState>>, + async_backing_params: LruMap<Hash, async_backing::AsyncBackingParams>, } impl Default for RequestResultCache { @@ -99,9 +96,8 @@ impl Default for RequestResultCache { unapplied_slashes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - - staging_para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), - staging_async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -401,14 +397,14 @@ impl RequestResultCache { pub(crate) fn unapplied_slashes( &mut self, relay_parent: &Hash, - ) -> Option<&Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>> { + ) -> Option<&Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>> { self.unapplied_slashes.get(relay_parent).map(|v| &*v) } pub(crate) fn cache_unapplied_slashes( &mut self, relay_parent: Hash, - value: Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, + value: Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ) { self.unapplied_slashes.insert(relay_parent, value); } @@ -416,14 +412,14 @@ impl RequestResultCache { pub(crate) fn key_ownership_proof( &mut self, key: (Hash, ValidatorId), - ) -> Option<&Option<vstaging::slashing::OpaqueKeyOwnershipProof>> { + ) -> Option<&Option<slashing::OpaqueKeyOwnershipProof>> { self.key_ownership_proof.get(&key).map(|v| &*v) } pub(crate) fn cache_key_ownership_proof( &mut self, key: (Hash, ValidatorId), - value: Option<vstaging::slashing::OpaqueKeyOwnershipProof>, + value: Option<slashing::OpaqueKeyOwnershipProof>, ) { self.key_ownership_proof.insert(key, value); } @@ -431,7 +427,7 @@ impl RequestResultCache { // This request is never cached, hence always returns `None`. pub(crate) fn submit_report_dispute_lost( &mut self, - _key: (Hash, vstaging::slashing::DisputeProof, vstaging::slashing::OpaqueKeyOwnershipProof), + _key: (Hash, slashing::DisputeProof, slashing::OpaqueKeyOwnershipProof), ) -> Option<&Option<()>> { None } @@ -448,34 +444,34 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } - pub(crate) fn staging_para_backing_state( + pub(crate) fn para_backing_state( &mut self, key: (Hash, ParaId), - ) -> Option<&Option<vstaging::BackingState>> { - self.staging_para_backing_state.get(&key).map(|v| &*v) + ) -> Option<&Option<async_backing::BackingState>> { + self.para_backing_state.get(&key).map(|v| &*v) } - pub(crate) fn cache_staging_para_backing_state( + pub(crate) fn cache_para_backing_state( &mut self, key: (Hash, ParaId), - value: Option<vstaging::BackingState>, + value: Option<async_backing::BackingState>, ) { - self.staging_para_backing_state.insert(key, value); + self.para_backing_state.insert(key, value); } - pub(crate) fn staging_async_backing_params( + pub(crate) fn async_backing_params( &mut self, key: &Hash, - ) -> Option<&vstaging::AsyncBackingParams> { - self.staging_async_backing_params.get(key).map(|v| &*v) + ) -> Option<&async_backing::AsyncBackingParams> { + self.async_backing_params.get(key).map(|v| &*v) } - pub(crate) fn cache_staging_async_backing_params( + pub(crate) fn cache_async_backing_params( &mut self, key: Hash, - value: vstaging::AsyncBackingParams, + value: async_backing::AsyncBackingParams, ) { - self.staging_async_backing_params.insert(key, value); + self.async_backing_params.insert(key, value); } } @@ -515,16 +511,15 @@ pub(crate) enum RequestResult { ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option<ValidationCodeHash>), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState<BlockNumber>)>), - UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>), - KeyOwnershipProof(Hash, ValidatorId, Option<vstaging::slashing::OpaqueKeyOwnershipProof>), + UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), + KeyOwnershipProof(Hash, ValidatorId, Option<slashing::OpaqueKeyOwnershipProof>), // This is a request with side-effects. SubmitReportDisputeLost( Hash, - vstaging::slashing::DisputeProof, - vstaging::slashing::OpaqueKeyOwnershipProof, + slashing::DisputeProof, + slashing::OpaqueKeyOwnershipProof, Option<()>, ), - - StagingParaBackingState(Hash, ParaId, Option<vstaging::BackingState>), - StagingAsyncBackingParams(Hash, vstaging::AsyncBackingParams), + ParaBackingState(Hash, ParaId, Option<async_backing::BackingState>), + AsyncBackingParams(Hash, async_backing::AsyncBackingParams), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index ec9bf10fa6e3add92ca37a274966a1e84277f07e..1b18941e54645d57d51af049888037f8d6fd994f 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -166,12 +166,11 @@ where .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), SubmitReportDisputeLost(_, _, _, _) => {}, - - StagingParaBackingState(relay_parent, para_id, constraints) => self + ParaBackingState(relay_parent, para_id, constraints) => self .requests_cache - .cache_staging_para_backing_state((relay_parent, para_id), constraints), - StagingAsyncBackingParams(relay_parent, params) => - self.requests_cache.cache_staging_async_backing_params(relay_parent, params), + .cache_para_backing_state((relay_parent, para_id), constraints), + AsyncBackingParams(relay_parent, params) => + self.requests_cache.cache_async_backing_params(relay_parent, params), } } @@ -297,13 +296,10 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), - - Request::StagingParaBackingState(para, sender) => - query!(staging_para_backing_state(para), sender) - .map(|sender| Request::StagingParaBackingState(para, sender)), - Request::StagingAsyncBackingParams(sender) => - query!(staging_async_backing_params(), sender) - .map(|sender| Request::StagingAsyncBackingParams(sender)), + Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) + .map(|sender| Request::ParaBackingState(para, sender)), + Request::AsyncBackingParams(sender) => query!(async_backing_params(), sender) + .map(|sender| Request::AsyncBackingParams(sender)), Request::MinimumBackingVotes(index, sender) => { if let Some(value) = self.requests_cache.minimum_backing_votes(index) { self.metrics.on_cached_request(); @@ -569,20 +565,19 @@ where ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, sender ), - - Request::StagingParaBackingState(para, sender) => { + Request::ParaBackingState(para, sender) => { query!( - StagingParaBackingState, - staging_para_backing_state(para), - ver = Request::STAGING_BACKING_STATE, + ParaBackingState, + para_backing_state(para), + ver = Request::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT, sender ) }, - Request::StagingAsyncBackingParams(sender) => { + Request::AsyncBackingParams(sender) => { query!( - StagingAsyncBackingParams, - staging_async_backing_params(), - ver = Request::STAGING_BACKING_STATE, + AsyncBackingParams, + async_backing_params(), + ver = Request::ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT, sender ) }, diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index bb7c2968961171151e5690503b61d0950bd6a56a..fb97139a802873aa928df21263e5f946f9080acd 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,9 +20,9 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -213,7 +213,7 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn unapplied_slashes( &self, _: Hash, - ) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError> { + ) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError> { todo!("Not required for tests") } @@ -221,15 +221,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { &self, _: Hash, _: ValidatorId, - ) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError> { + ) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError> { todo!("Not required for tests") } async fn submit_report_dispute_lost( &self, _: Hash, - _: vstaging::slashing::DisputeProof, - _: vstaging::slashing::OpaqueKeyOwnershipProof, + _: slashing::DisputeProof, + _: slashing::OpaqueKeyOwnershipProof, ) -> Result<Option<()>, ApiError> { todo!("Not required for tests") } @@ -250,18 +250,18 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { Ok(self.authorities.clone()) } - async fn staging_async_backing_params( + async fn async_backing_params( &self, _: Hash, - ) -> Result<vstaging::AsyncBackingParams, ApiError> { + ) -> Result<async_backing::AsyncBackingParams, ApiError> { todo!("Not required for tests") } - async fn staging_para_backing_state( + async fn para_backing_state( &self, _: Hash, _: ParaId, - ) -> Result<Option<vstaging::BackingState>, ApiError> { + ) -> Result<Option<async_backing::BackingState>, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 70c20437d125d234b47b0953fbf019931c72c33d..f76826d7fdf42a5366898cbdba11e221256c8c0e 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::{ValidationVersion, MAX_NOTIFICATION_SIZE}, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, - Versioned, VersionedValidationProtocol, View, + v1 as protocol_v1, v2 as protocol_v2, PeerId, UnifiedReputationChange as Rep, Versioned, + VersionedValidationProtocol, View, }; use polkadot_node_primitives::approval::{ AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, @@ -602,9 +602,7 @@ impl State { { match msg { Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( - assignments, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -644,9 +642,7 @@ impl State { } }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( - approvals, - )) => { + Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -1060,7 +1056,7 @@ impl State { route_random }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1090,9 +1086,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; if !v1_peers.is_empty() { @@ -1103,10 +1099,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_assignments_packet(ValidationVersion::VStaging, assignments.clone()), + v2_peers, + versioned_assignments_packet(ValidationVersion::V2, assignments.clone()), )) .await; } @@ -1395,7 +1391,7 @@ impl State { in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; - let (v1_peers, vstaging_peers) = { + let (v1_peers, v2_peers) = { let peer_data = &self.peer_data; let peers = entry .known_by @@ -1425,9 +1421,9 @@ impl State { } let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); - let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + let v2_peers = filter_peers_by_version(&peers, ValidationVersion::V2); - (v1_peers, vstaging_peers) + (v1_peers, v2_peers) }; let approvals = vec![vote]; @@ -1440,10 +1436,10 @@ impl State { .await; } - if !vstaging_peers.is_empty() { + if !v2_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers, - versioned_approvals_packet(ValidationVersion::VStaging, approvals), + v2_peers, + versioned_approvals_packet(ValidationVersion::V2, approvals), )) .await; } @@ -2017,9 +2013,9 @@ fn versioned_approvals_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Approvals(approvals), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals), )), } } @@ -2033,9 +2029,9 @@ fn versioned_assignments_packet( Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Assignments(assignments), )), - ValidationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments), )), } } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 1e9ae7b620072939b81a13949fbc7299333210ad..29c7d8aa45daa1f88ec6737c448e9e2d8a493bb5 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -2388,9 +2388,9 @@ fn import_versioned_approval() { let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // All peers are aware of relay parent. - setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V2, view![hash]).await; setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::V2, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -2431,8 +2431,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 2); @@ -2450,8 +2450,8 @@ fn import_versioned_approval() { validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_vstaging::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_a, Versioned::VStaging(msg)).await; + let msg = protocol_v2::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer(overseer, &peer_a, Versioned::V2(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -2483,8 +2483,8 @@ fn import_versioned_approval() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals) + Versioned::V2(protocol_v2::ValidationProtocol::ApprovalDistribution( + protocol_v2::ApprovalDistributionMessage::Approvals(approvals) )) )) => { assert_eq!(peers, vec![peer_c]); diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index c85d874bc4db6edad95d15cdfcdd01820948b0bc..68e381ab6be51d65e4ed0a45eb971b84089cb62e 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -31,8 +31,8 @@ use polkadot_node_network_protocol::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_subsystem::{ jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, @@ -96,8 +96,8 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), - Some(ValidationVersion::VStaging) => - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Some(ValidationVersion::V2) => + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), )), @@ -502,8 +502,7 @@ async fn relay_message<Context>( }; let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1); - let vstaging_interested_peers = - filter_by_version(&interested_peers, ValidationVersion::VStaging); + let v2_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V2); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -513,10 +512,10 @@ async fn relay_message<Context>( .await; } - if !vstaging_interested_peers.is_empty() { + if !v2_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_interested_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), + v2_interested_peers, + message.into_validation_protocol(ValidationVersion::V2.into()), )) .await } @@ -538,7 +537,7 @@ async fn process_incoming_peer_message<Context>( relay_parent, bitfield, )) => (relay_parent, bitfield), - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Versioned::V2(protocol_v2::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, )) => (relay_parent, bitfield), diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index d6795247e7866f4bb160d941161f114316c3633d..ba2434ea47d69469e812d6274b17b43c3b6b5ee3 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -1111,9 +1111,9 @@ fn network_protocol_versioning() { let peer_c = PeerId::random(); let peers = [ - (peer_a, ValidationVersion::VStaging), + (peer_a, ValidationVersion::V2), (peer_b, ValidationVersion::V1), - (peer_c, ValidationVersion::VStaging), + (peer_c, ValidationVersion::V2), ]; // validator 0 key pair @@ -1173,7 +1173,7 @@ fn network_protocol_versioning() { &Default::default(), NetworkBridgeEvent::PeerMessage( peer_a, - msg.clone().into_network_message(ValidationVersion::VStaging.into()), + msg.clone().into_network_message(ValidationVersion::V2.into()), ), &mut rng, )); @@ -1201,14 +1201,14 @@ fn network_protocol_versioning() { } ); - // vstaging gossip + // v2 gossip assert_matches!( handle.recv().await, AllMessages::NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), ) => { assert_eq!(peers, vec![peer_c]); - assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::VStaging.into())); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V2.into())); } ); diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 82c67061d9a5053e6c531c82c560f0c124470647..7e86b46a7e0397ed78e5796afd9a9979688350fa 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -33,7 +33,7 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId, + v1 as protocol_v1, v2 as protocol_v2, ObservedRole, OurView, PeerId, UnifiedReputationChange as Rep, View, }; @@ -262,13 +262,13 @@ where ), &metrics, ), - ValidationVersion::VStaging => send_message( + ValidationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Validation, version, &peerset_protocol_names, - WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate( + WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate( local_view, ), &metrics, @@ -304,13 +304,13 @@ where ), &metrics, ), - CollationVersion::VStaging => send_message( + CollationVersion::V2 => send_message( &mut network_service, vec![peer], PeerSet::Collation, version, &peerset_protocol_names, - WireMessage::<protocol_vstaging::CollationProtocol>::ViewUpdate( + WireMessage::<protocol_v2::CollationProtocol>::ViewUpdate( local_view, ), &metrics, @@ -465,9 +465,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) + Some(ValidationVersion::V2.into()) { - handle_peer_messages::<protocol_vstaging::ValidationProtocol, _>( + handle_peer_messages::<protocol_v2::ValidationProtocol, _>( remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, @@ -507,9 +507,9 @@ where &metrics, ) } else if expected_versions[PeerSet::Collation] == - Some(CollationVersion::VStaging.into()) + Some(CollationVersion::V2.into()) { - handle_peer_messages::<protocol_vstaging::CollationProtocol, _>( + handle_peer_messages::<protocol_v2::CollationProtocol, _>( remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, @@ -813,10 +813,8 @@ fn update_our_view<Net, Context>( let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into()); - let vstaging_validation_peers = - filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); - let vstaging_collation_peers = - filter_by_version(&collation_peers, ValidationVersion::VStaging.into()); + let v2_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V2.into()); + let v2_collation_peers = filter_by_version(&collation_peers, ValidationVersion::V2.into()); send_validation_message_v1( net, @@ -834,17 +832,17 @@ fn update_our_view<Net, Context>( metrics, ); - send_validation_message_vstaging( + send_validation_message_v2( net, - vstaging_validation_peers, + v2_validation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, ); - send_collation_message_vstaging( + send_collation_message_v2( net, - vstaging_collation_peers, + v2_collation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view), metrics, @@ -955,36 +953,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec<PeerId>, protocol_names: &PeerSetProtocolNames, - message: WireMessage<protocol_vstaging::ValidationProtocol>, + message: WireMessage<protocol_v2::ValidationProtocol>, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec<PeerId>, protocol_names: &PeerSetProtocolNames, - message: WireMessage<protocol_vstaging::CollationProtocol>, + message: WireMessage<protocol_v2::CollationProtocol>, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index 127f46e0fa37390a440125f8d567c6ff47e53aef..7c69cce48391b65294970232b1a277d7c261ba05 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -1216,10 +1216,10 @@ fn network_protocol_versioning_view_update() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; let head = Hash::repeat_byte(1); @@ -1245,8 +1245,8 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V1 => WireMessage::<protocol_v1::ValidationProtocol>::ViewUpdate(view.clone()) .encode(), - ValidationVersion::VStaging => - WireMessage::<protocol_vstaging::ValidationProtocol>::ViewUpdate(view.clone()) + ValidationVersion::V2 => + WireMessage::<protocol_v2::ValidationProtocol>::ViewUpdate(view.clone()) .encode(), }; assert_network_actions_contains( @@ -1268,12 +1268,7 @@ fn network_protocol_versioning_subsystem_msg() { let peer = PeerId::random(); network_handle - .connect_peer( - peer, - ValidationVersion::VStaging, - PeerSet::Validation, - ObservedRole::Full, - ) + .connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full) .await; // bridge will inform about all connected peers. @@ -1282,7 +1277,7 @@ fn network_protocol_versioning_subsystem_msg() { NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Full, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), None, ), &mut virtual_overseer, @@ -1297,9 +1292,9 @@ fn network_protocol_versioning_subsystem_msg() { } let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -1315,7 +1310,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::ApprovalDistribution( ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); @@ -1330,10 +1325,10 @@ fn network_protocol_versioning_subsystem_msg() { signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), }; let statement_distribution_message = - protocol_vstaging::StatementDistributionMessage::V1Compatibility( + protocol_v2::StatementDistributionMessage::V1Compatibility( protocol_v1::StatementDistributionMessage::LargeStatement(metadata), ); - let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( + let msg = protocol_v2::ValidationProtocol::StatementDistribution( statement_distribution_message.clone(), ); @@ -1349,7 +1344,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::StatementDistribution( StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) ) ) => { assert_eq!(p, peer); diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 7fa1149593cab977edbb321a96a5e8268859476c..f15635f1f41c528f6b15a07870f1f348a6b777a3 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -20,7 +20,7 @@ use super::*; use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, request_response::ReqProtocolNames, - v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, + v1 as protocol_v1, v2 as protocol_v2, PeerId, Versioned, }; use polkadot_node_subsystem::{ @@ -198,7 +198,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -223,7 +223,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V2(msg) => send_validation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -248,7 +248,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -273,7 +273,7 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), - Versioned::VStaging(msg) => send_collation_message_vstaging( + Versioned::V2(msg) => send_collation_message_v2( &mut network_service, peers, peerset_protocol_names, @@ -296,13 +296,11 @@ where Requests::AvailableDataFetchingV1(_) => metrics.on_message("available_data_fetching_v1"), Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), - Requests::CollationFetchingVStaging(_) => - metrics.on_message("collation_fetching_vstaging"), + Requests::CollationFetchingV2(_) => metrics.on_message("collation_fetching_v2"), Requests::PoVFetchingV1(_) => metrics.on_message("pov_fetching_v1"), Requests::DisputeSendingV1(_) => metrics.on_message("dispute_sending_v1"), Requests::StatementFetchingV1(_) => metrics.on_message("statement_fetching_v1"), - Requests::AttestedCandidateVStaging(_) => - metrics.on_message("attested_candidate_vstaging"), + Requests::AttestedCandidateV2(_) => metrics.on_message("attested_candidate_v2"), } network_service @@ -425,36 +423,36 @@ fn send_collation_message_v1( ); } -fn send_validation_message_vstaging( +fn send_validation_message_v2( net: &mut impl Network, peers: Vec<PeerId>, protocol_names: &PeerSetProtocolNames, - message: WireMessage<protocol_vstaging::ValidationProtocol>, + message: WireMessage<protocol_v2::ValidationProtocol>, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), protocol_names, message, metrics, ); } -fn send_collation_message_vstaging( +fn send_collation_message_v2( net: &mut impl Network, peers: Vec<PeerId>, protocol_names: &PeerSetProtocolNames, - message: WireMessage<protocol_vstaging::CollationProtocol>, + message: WireMessage<protocol_v2::CollationProtocol>, metrics: &Metrics, ) { send_message( net, peers, PeerSet::Collation, - CollationVersion::VStaging.into(), + CollationVersion::V2.into(), protocol_names, message, metrics, diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 21cd134c54f2141af111c5dd41ed5cf6038b58fe..48287f8b74c91a526263fed1d176f916753ce985 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -341,10 +341,10 @@ fn network_protocol_versioning_send() { let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ - (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[0], PeerSet::Validation, ValidationVersion::V2), (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), - (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + (peer_ids[3], PeerSet::Collation, ValidationVersion::V2), ]; for &(peer_id, peer_set, version) in &peers { @@ -359,9 +359,9 @@ fn network_protocol_versioning_send() { { let approval_distribution_message = - protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -372,7 +372,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), ), }) .timeout(TIMEOUT) @@ -398,15 +398,14 @@ fn network_protocol_versioning_send() { // send a collation protocol message. { - let collator_protocol_message = protocol_vstaging::CollatorProtocolMessage::Declare( + let collator_protocol_message = protocol_v2::CollatorProtocolMessage::Declare( Sr25519Keyring::Alice.public().into(), 0_u32.into(), dummy_collator_signature(), ); - let msg = protocol_vstaging::CollationProtocol::CollatorProtocol( - collator_protocol_message.clone(), - ); + let msg = + protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); let receivers = vec![peer_ids[1], peer_ids[2]]; @@ -414,7 +413,7 @@ fn network_protocol_versioning_send() { .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendCollationMessages(vec![( receivers.clone(), - Versioned::VStaging(msg.clone()), + Versioned::V2(msg.clone()), )]), }) .await; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index 627c38b776f7c6cffd4ba5ebd848d55ab02f32b4..53f947142d10d615496b184b22ce7d69d815c977 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -22,8 +22,7 @@ use futures::{future::BoxFuture, stream::FuturesUnordered}; use polkadot_node_network_protocol::{ request_response::{ - incoming::OutgoingResponse, v1 as protocol_v1, vstaging as protocol_vstaging, - IncomingRequest, + incoming::OutgoingResponse, v1 as protocol_v1, v2 as protocol_v2, IncomingRequest, }, PeerId, }; @@ -89,7 +88,7 @@ pub struct WaitingCollationFetches { /// Backwards-compatible wrapper for incoming collations requests. pub enum VersionedCollationRequest { V1(IncomingRequest<protocol_v1::CollationFetchingRequest>), - VStaging(IncomingRequest<protocol_vstaging::CollationFetchingRequest>), + V2(IncomingRequest<protocol_v2::CollationFetchingRequest>), } impl From<IncomingRequest<protocol_v1::CollationFetchingRequest>> for VersionedCollationRequest { @@ -98,11 +97,9 @@ impl From<IncomingRequest<protocol_v1::CollationFetchingRequest>> for VersionedC } } -impl From<IncomingRequest<protocol_vstaging::CollationFetchingRequest>> - for VersionedCollationRequest -{ - fn from(req: IncomingRequest<protocol_vstaging::CollationFetchingRequest>) -> Self { - Self::VStaging(req) +impl From<IncomingRequest<protocol_v2::CollationFetchingRequest>> for VersionedCollationRequest { + fn from(req: IncomingRequest<protocol_v2::CollationFetchingRequest>) -> Self { + Self::V2(req) } } @@ -111,7 +108,7 @@ impl VersionedCollationRequest { pub fn para_id(&self) -> ParaId { match self { VersionedCollationRequest::V1(req) => req.payload.para_id, - VersionedCollationRequest::VStaging(req) => req.payload.para_id, + VersionedCollationRequest::V2(req) => req.payload.para_id, } } @@ -119,7 +116,7 @@ impl VersionedCollationRequest { pub fn relay_parent(&self) -> Hash { match self { VersionedCollationRequest::V1(req) => req.payload.relay_parent, - VersionedCollationRequest::VStaging(req) => req.payload.relay_parent, + VersionedCollationRequest::V2(req) => req.payload.relay_parent, } } @@ -127,7 +124,7 @@ impl VersionedCollationRequest { pub fn peer_id(&self) -> PeerId { match self { VersionedCollationRequest::V1(req) => req.peer, - VersionedCollationRequest::VStaging(req) => req.peer, + VersionedCollationRequest::V2(req) => req.peer, } } @@ -138,7 +135,7 @@ impl VersionedCollationRequest { ) -> Result<(), ()> { match self { VersionedCollationRequest::V1(req) => req.send_outgoing_response(response), - VersionedCollationRequest::VStaging(req) => req.send_outgoing_response(response), + VersionedCollationRequest::V2(req) => req.send_outgoing_response(response), } } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index ad2ab99568c8cdc3676984ae3fad43e87ccf4374..304cabbaac80c1a540a352da9ed74faa5f4dbf32 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -31,10 +31,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ incoming::{self, OutgoingResponse}, - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ @@ -577,7 +577,7 @@ async fn determine_our_validators<Context>( fn declare_message( state: &mut State, version: CollationVersion, -) -> Option<Versioned<protocol_v1::CollationProtocol, protocol_vstaging::CollationProtocol>> { +) -> Option<Versioned<protocol_v1::CollationProtocol, protocol_v2::CollationProtocol>> { let para_id = state.collating_on?; Some(match version { CollationVersion::V1 => { @@ -590,17 +590,15 @@ fn declare_message( ); Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)) }, - CollationVersion::VStaging => { + CollationVersion::V2 => { let declare_signature_payload = - protocol_vstaging::declare_signature_payload(&state.local_peer_id); - let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::declare_signature_payload(&state.local_peer_id); + let wire_message = protocol_v2::CollatorProtocolMessage::Declare( state.collator_pair.public(), para_id, state.collator_pair.sign(&declare_signature_payload), ); - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, }) } @@ -706,15 +704,13 @@ async fn advertise_collation<Context>( collation.status.advance_to_advertised(); let collation_message = match protocol_version { - CollationVersion::VStaging => { - let wire_message = protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + CollationVersion::V2 => { + let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash: *candidate_hash, parent_head_data_hash: collation.parent_head_data_hash, }; - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - wire_message, - )) + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)) }, CollationVersion::V1 => { let wire_message = @@ -837,7 +833,7 @@ async fn send_collation( let candidate_hash = receipt.hash(); // The response payload is the same for both versions of protocol - // and doesn't have vstaging alias for simplicity. + // and doesn't have v2 alias for simplicity. let response = OutgoingResponse { result: Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), reputation_changes: Vec::new(), @@ -868,16 +864,13 @@ async fn handle_incoming_peer_message<Context>( runtime: &mut RuntimeInfo, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned<protocol_v1::CollatorProtocolMessage, protocol_v2::CollatorProtocolMessage>, ) -> Result<()> { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; match msg { - Versioned::V1(V1::Declare(..)) | Versioned::VStaging(VStaging::Declare(..)) => { + Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -888,8 +881,7 @@ async fn handle_incoming_peer_message<Context>( ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - Versioned::V1(V1::AdvertiseCollation(_)) | - Versioned::VStaging(VStaging::AdvertiseCollation { .. }) => { + Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -904,7 +896,7 @@ async fn handle_incoming_peer_message<Context>( .await; }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | - Versioned::VStaging(VStaging::CollationSeconded(relay_parent, statement)) => { + Versioned::V2(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, @@ -1006,7 +998,7 @@ async fn handle_incoming_request<Context>( let collation = match &req { VersionedCollationRequest::V1(_) if !mode.is_enabled() => per_relay_parent.collations.values_mut().next(), - VersionedCollationRequest::VStaging(req) => + VersionedCollationRequest::V2(req) => per_relay_parent.collations.get_mut(&req.payload.candidate_hash), _ => { gum::warn!( @@ -1322,7 +1314,7 @@ pub(crate) async fn run<Context>( local_peer_id: PeerId, collator_pair: CollatorPair, req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>, - req_v2_receiver: IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>, + req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>, metrics: Metrics, ) -> std::result::Result<(), FatalError> { run_inner( @@ -1344,7 +1336,7 @@ async fn run_inner<Context>( local_peer_id: PeerId, collator_pair: CollatorPair, mut req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>, - mut req_v2_receiver: IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>, + mut req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>, metrics: Metrics, reputation: ReputationAggregator, reputation_interval: Duration, @@ -1425,7 +1417,7 @@ async fn run_inner<Context>( (ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => { per_relay_parent.collations.values().next() }, - (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::VStaging(req)) => { + (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::V2(req)) => { per_relay_parent.collations.get(&req.payload.candidate_hash) }, _ => { @@ -1476,7 +1468,7 @@ async fn run_inner<Context>( log_error( handle_incoming_request(&mut ctx, &mut state, request).await, - "Handling incoming collation fetch request VStaging" + "Handling incoming collation fetch request V2" )?; } } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index b452c84c2cd8f1091a3c5f0e2c34f3e40f4b2a0b..7dd2287dab684debb39f68f94098c1b225024285 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -198,7 +198,7 @@ impl TestState { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, self.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -212,7 +212,7 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle<CollatorProtocol struct TestHarness { virtual_overseer: VirtualOverseer, req_v1_cfg: sc_network::config::RequestResponseConfig, - req_vstaging_cfg: sc_network::config::RequestResponseConfig, + req_v2_cfg: sc_network::config::RequestResponseConfig, } fn test_harness<T: Future<Output = TestHarness>>( @@ -236,7 +236,7 @@ fn test_harness<T: Future<Output = TestHarness>>( let (collation_req_receiver, req_v1_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - let (collation_req_vstaging_receiver, req_vstaging_cfg) = + let (collation_req_v2_receiver, req_v2_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); let subsystem = async { run_inner( @@ -244,7 +244,7 @@ fn test_harness<T: Future<Output = TestHarness>>( local_peer_id, collator_pair, collation_req_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, Default::default(), reputation, REPUTATION_CHANGE_TEST_INTERVAL, @@ -253,7 +253,7 @@ fn test_harness<T: Future<Output = TestHarness>>( .unwrap(); }; - let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }); + let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); @@ -330,7 +330,7 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, test_state.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -545,7 +545,7 @@ async fn expect_declare_msg( /// Check that the next received message is a collation advertisement message. /// -/// Expects vstaging message if `expected_candidate_hashes` is `Some`, v1 otherwise. +/// Expects v2 message if `expected_candidate_hashes` is `Some`, v1 otherwise. async fn expect_advertise_collation_msg( virtual_overseer: &mut VirtualOverseer, peer: &PeerId, @@ -579,13 +579,13 @@ async fn expect_advertise_collation_msg( }, ( Some(candidate_hashes), - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), ) => { assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, .. @@ -634,7 +634,7 @@ fn advertise_and_send_collation() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -789,7 +789,7 @@ fn advertise_and_send_collation() { None, ) .await; - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } @@ -807,7 +807,7 @@ fn delay_reputation_change() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_v1_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -903,15 +903,15 @@ fn delay_reputation_change() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ); } -/// Tests that collator side works with vstaging network protocol +/// Tests that collator side works with v2 network protocol /// before async backing is enabled. #[test] -fn advertise_collation_vstaging_protocol() { +fn advertise_collation_v2_protocol() { let test_state = TestState::default(); let local_peer_id = test_state.local_peer_id; let collator_pair = test_state.collator_pair.clone(); @@ -941,21 +941,16 @@ fn advertise_collation_vstaging_protocol() { Some(validators[0].clone()), ) .await; - // The rest with vstaging. + // The rest with v2. for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) { - connect_peer( - virtual_overseer, - *peer, - CollationVersion::VStaging, - Some(val.clone()), - ) - .await; + connect_peer(virtual_overseer, *peer, CollationVersion::V2, Some(val.clone())) + .await; } // Declare messages. expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await; for peer_id in peer_ids.iter().skip(1) { - prospective_parachains::expect_declare_msg_vstaging( + prospective_parachains::expect_declare_msg_v2( virtual_overseer, &test_state, &peer_id, @@ -981,7 +976,7 @@ fn advertise_collation_vstaging_protocol() { virtual_overseer, peer_id, test_state.relay_parent, - Some(vec![candidate.hash()]), // This is `Some`, advertisement is vstaging. + Some(vec![candidate.hash()]), // This is `Some`, advertisement is v2. ) .await; } @@ -1405,7 +1400,7 @@ fn connect_to_buffered_groups() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let mut req_cfg = test_harness.req_v1_cfg; - let req_vstaging_cfg = test_harness.req_vstaging_cfg; + let req_v2_cfg = test_harness.req_v2_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -1510,7 +1505,7 @@ fn connect_to_buffered_groups() { } ); - TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_v2_cfg } }, ); } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index bd55c35852fa96bf7a8f5747f2cb904bdf038fba..fd9d7a746ebe49623ff0feebd48793e8e603807b 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,10 +19,10 @@ use super::*; use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; -use polkadot_primitives::{vstaging as vstaging_primitives, Header, OccupiedCore}; +use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -52,7 +52,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) @@ -124,7 +124,7 @@ async fn update_view( } /// Check that the next received message is a `Declare` message. -pub(super) async fn expect_declare_msg_vstaging( +pub(super) async fn expect_declare_msg_v2( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, peer: &PeerId, @@ -133,20 +133,20 @@ pub(super) async fn expect_declare_msg_vstaging( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( to, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( wire_message, )), )) => { assert_eq!(to[0], *peer); assert_matches!( wire_message, - protocol_vstaging::CollatorProtocolMessage::Declare( + protocol_v2::CollatorProtocolMessage::Declare( collator_id, para_id, signature, ) => { assert!(signature.verify( - &*protocol_vstaging::declare_signature_payload(&test_state.local_peer_id), + &*protocol_v2::declare_signature_payload(&test_state.local_peer_id), &collator_id), ); assert_eq!(collator_id, test_state.collator_pair.public()); @@ -203,13 +203,12 @@ fn distribute_collation_from_implicit_view() { .into_iter() .zip(validator_peer_ids.clone()) { - connect_peer(virtual_overseer, peer, CollationVersion::VStaging, Some(val.clone())) - .await; + connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await; } // Collator declared itself to each peer. for peer_id in &validator_peer_ids { - expect_declare_msg_vstaging(virtual_overseer, &test_state, peer_id).await; + expect_declare_msg_v2(virtual_overseer, &test_state, peer_id).await; } let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; @@ -386,7 +385,7 @@ fn advertise_and_send_collation_by_hash() { |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; let req_v1_cfg = test_harness.req_v1_cfg; - let mut req_vstaging_cfg = test_harness.req_vstaging_cfg; + let mut req_v2_cfg = test_harness.req_v2_cfg; let head_a = Hash::from_low_u64_be(128); let head_a_num: u32 = 64; @@ -435,11 +434,11 @@ fn advertise_and_send_collation_by_hash() { connect_peer( &mut virtual_overseer, peer, - CollationVersion::VStaging, + CollationVersion::V2, Some(validator_id.clone()), ) .await; - expect_declare_msg_vstaging(&mut virtual_overseer, &test_state, &peer).await; + expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await; // Head `b` is not a leaf, but both advertisements are still relevant. send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await; @@ -449,13 +448,13 @@ fn advertise_and_send_collation_by_hash() { for (candidate, pov_block) in candidates { let (pending_response, rx) = oneshot::channel(); - req_vstaging_cfg + req_v2_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: request_vstaging::CollationFetchingRequest { + payload: request_v2::CollationFetchingRequest { relay_parent: head_b, para_id: test_state.para_id, candidate_hash: candidate.hash(), @@ -469,7 +468,7 @@ fn advertise_and_send_collation_by_hash() { assert_matches!( rx.await, Ok(full_response) => { - // Response is the same for vstaging. + // Response is the same for v2. let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse = request_v1::CollationFetchingResponse::decode( &mut full_response.result @@ -482,7 +481,7 @@ fn advertise_and_send_collation_by_hash() { ); } - TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg } }, ) } @@ -552,11 +551,11 @@ fn advertise_core_occupied() { connect_peer( virtual_overseer, peer_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, Some(validators[0].clone()), ) .await; - expect_declare_msg_vstaging(virtual_overseer, &test_state, &peer_ids[0]).await; + expect_declare_msg_v2(virtual_overseer, &test_state, &peer_ids[0]).await; // Peer is aware of the leaf. send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index 62c033954f75a00fb2c1b50a3afb0b858b977c5f..1edc67664172400503158f8a451756ca3674a3d9 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem_util::reputation::ReputationAggregator; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver}, + request_response::{v1 as request_v1, v2 as protocol_v2, IncomingRequestReceiver}, PeerId, UnifiedReputationChange as Rep, }; use polkadot_primitives::CollatorPair; @@ -83,9 +83,8 @@ pub enum ProtocolSide { collator_pair: CollatorPair, /// Receiver for v1 collation fetching requests. request_receiver_v1: IncomingRequestReceiver<request_v1::CollationFetchingRequest>, - /// Receiver for vstaging collation fetching requests. - request_receiver_vstaging: - IncomingRequestReceiver<protocol_vstaging::CollationFetchingRequest>, + /// Receiver for v2 collation fetching requests. + request_receiver_v2: IncomingRequestReceiver<protocol_v2::CollationFetchingRequest>, /// Metrics. metrics: collator_side::Metrics, }, @@ -121,14 +120,14 @@ impl<Context> CollatorProtocolSubsystem { peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, } => collator_side::run( ctx, peer_id, collator_pair, request_receiver_v1, - request_receiver_vstaging, + request_receiver_v2, metrics, ) .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 4c92780f2da9448f3b3a0992a3cc16c9b3676c30..a53e0028b9e70455f3dd918bad58fc5ec472c173 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -119,7 +119,7 @@ impl PendingCollation { } } -/// vstaging advertisement that was rejected by the backing +/// v2 advertisement that was rejected by the backing /// subsystem. Validator may fetch it later if its fragment /// membership gets recognized before relay parent goes out of view. #[derive(Debug, Clone)] diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index e8cf769d2e5f336f17053cc6471366ae18d694a9..fcb408d54b1b72b856b2439bd2eb1823020fb513 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -34,10 +34,10 @@ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet}, request_response::{ outgoing::{Recipient, RequestError}, - v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests, + v1 as request_v1, v2 as request_v2, OutgoingRequest, Requests, }, - v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_node_subsystem::{ @@ -624,13 +624,9 @@ async fn notify_collation_seconded( CollationVersion::V1 => Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol( protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( - relay_parent, - statement, - ), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), + )), }; sender .send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![peer_id], wire_message)) @@ -694,16 +690,12 @@ async fn request_collation( let requests = Requests::CollationFetchingV1(req); (requests, response_recv.boxed()) }, - (CollationVersion::VStaging, Some(ProspectiveCandidate { candidate_hash, .. })) => { + (CollationVersion::V2, Some(ProspectiveCandidate { candidate_hash, .. })) => { let (req, response_recv) = OutgoingRequest::new( Recipient::Peer(peer_id), - request_vstaging::CollationFetchingRequest { - relay_parent, - para_id, - candidate_hash, - }, + request_v2::CollationFetchingRequest { relay_parent, para_id, candidate_hash }, ); - let requests = Requests::CollationFetchingVStaging(req); + let requests = Requests::CollationFetchingV2(req); (requests, response_recv.boxed()) }, _ => return Err(FetchError::ProtocolMismatch), @@ -758,18 +750,15 @@ async fn process_incoming_peer_message<Context>( ctx: &mut Context, state: &mut State, origin: PeerId, - msg: Versioned< - protocol_v1::CollatorProtocolMessage, - protocol_vstaging::CollatorProtocolMessage, - >, + msg: Versioned<protocol_v1::CollatorProtocolMessage, protocol_v2::CollatorProtocolMessage>, ) { use protocol_v1::CollatorProtocolMessage as V1; - use protocol_vstaging::CollatorProtocolMessage as VStaging; + use protocol_v2::CollatorProtocolMessage as V2; use sp_runtime::traits::AppVerify; match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | - Versioned::VStaging(VStaging::Declare(collator_id, para_id, signature)) => { + Versioned::V2(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -881,7 +870,7 @@ async fn process_incoming_peer_message<Context>( modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::VStaging(VStaging::AdvertiseCollation { + Versioned::V2(V2::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -901,15 +890,14 @@ async fn process_incoming_peer_message<Context>( ?relay_parent, ?candidate_hash, error = ?err, - "Rejected vstaging advertisement", + "Rejected v2 advertisement", ); if let Some(rep) = err.reputation_changes() { modify_reputation(&mut state.reputation, ctx.sender(), origin, rep).await; } }, - Versioned::V1(V1::CollationSeconded(..)) | - Versioned::VStaging(VStaging::CollationSeconded(..)) => { + Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, @@ -1074,7 +1062,7 @@ where }; if relay_parent_mode.is_enabled() && prospective_candidate.is_none() { - // Expected vstaging advertisement. + // Expected v2 advertisement. return Err(AdvertisementError::ProtocolMismatch) } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 1cb656e325d325e6e824a3ea9ec5669f78d35424..9812998aab763cee54e1dac5232c1c9bddb4651b 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -357,7 +357,7 @@ async fn assert_fetch_collation_request( ), Some(candidate_hash) => assert_matches!( req, - Requests::CollationFetchingVStaging(req) => { + Requests::CollationFetchingV2(req) => { let payload = req.payload; assert_eq!(payload.relay_parent, relay_parent); assert_eq!(payload.para_id, para_id); @@ -394,12 +394,11 @@ async fn connect_and_declare_collator( para_id, collator.sign(&protocol_v1::declare_signature_payload(&peer)), )), - CollationVersion::VStaging => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare( - collator.public(), - para_id, - collator.sign(&protocol_v1::declare_signature_payload(&peer)), - )), + CollationVersion::V2 => Versioned::V2(protocol_v2::CollatorProtocolMessage::Declare( + collator.public(), + para_id, + collator.sign(&protocol_v1::declare_signature_payload(&peer)), + )), }; overseer_send( @@ -421,7 +420,7 @@ async fn advertise_collation( ) { let wire_message = match candidate { Some((candidate_hash, parent_head_data_hash)) => - Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + Versioned::V2(protocol_v2::CollatorProtocolMessage::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -444,7 +443,7 @@ async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOvers overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx) + RuntimeApiRequest::AsyncBackingParams(tx) )) => { assert_eq!(relay_parent, hash); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -499,10 +498,10 @@ fn act_on_advertisement() { }); } -/// Tests that validator side works with vstaging network protocol +/// Tests that validator side works with v2 network protocol /// before async backing is enabled. #[test] -fn act_on_advertisement_vstaging() { +fn act_on_advertisement_v2() { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { @@ -529,13 +528,13 @@ fn act_on_advertisement_vstaging() { peer_b, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; let candidate_hash = CandidateHash::default(); let parent_head_data_hash = Hash::zero(); - // vstaging advertisement. + // v2 advertisement. advertise_collation( &mut virtual_overseer, peer_b, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index e2a007b308e54a9e65af18646f5353363e45ef3d..4da0f11da39002fe035c618608e44ffca2939e94 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -20,12 +20,12 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{ - vstaging as vstaging_primitives, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, - Header, SigningContext, ValidatorId, + AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, + SigningContext, ValidatorId, }; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = - vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -97,7 +97,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParams(tx), + RuntimeApiRequest::AsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) @@ -226,8 +226,8 @@ async fn assert_collation_seconded( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( peers, - Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( - protocol_vstaging::CollatorProtocolMessage::CollationSeconded( + Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol( + protocol_v2::CollatorProtocolMessage::CollationSeconded( _relay_parent, .., ), @@ -306,7 +306,7 @@ fn accept_advertisements_from_implicit_view() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -314,7 +314,7 @@ fn accept_advertisements_from_implicit_view() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -406,7 +406,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -457,7 +457,7 @@ fn second_multiple_candidates_per_relay_parent() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -514,7 +514,7 @@ fn second_multiple_candidates_per_relay_parent() { peer_b, pair_b.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -562,7 +562,7 @@ fn fetched_collation_sanity_check() { peer_a, pair.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -611,7 +611,7 @@ fn fetched_collation_sanity_check() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + .send(Ok(request_v2::CollationFetchingResponse::Collation( candidate.clone(), pov.clone(), ) @@ -668,7 +668,7 @@ fn advertisement_spam_protection() { peer_a, pair_a.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -748,7 +748,7 @@ fn backed_candidate_unblocks_advertisements() { peer_a, pair_a.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; connect_and_declare_collator( @@ -756,7 +756,7 @@ fn backed_candidate_unblocks_advertisements() { peer_b, pair_b.clone(), test_state.chain_ids[1], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; @@ -856,7 +856,7 @@ fn active_leave_unblocks_advertisements() { *peer_id, peer.clone(), test_state.chain_ids[0], - CollationVersion::VStaging, + CollationVersion::V2, ) .await; } diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index c5dc1ba14bd35b5127ff27c0c3d0732e0e56178c..4fa23507e86bd145782d068b0aa84d0cf2c98ba6 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -452,7 +452,7 @@ where // match void -> LLVM unreachable match message { Versioned::V1(m) => match m {}, - Versioned::VStaging(m) => match m {}, + Versioned::V2(m) => match m {}, } }, } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c33b9eae3252606e8d2fcbd954a0e180f4a47acb..379334ded24acde67100de5b61069d5cb5909ce4 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -27,6 +27,3 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" - -[features] -network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 1bed2c12fe202ce51ae07d50b0d07ca6282b669d..901ac99b66933a77b95ccb1c55fd22930fc5a34a 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,26 +253,25 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned<V1, VStaging> { +pub enum Versioned<V1, V2> { /// V1 type. V1(V1), - /// VStaging type. - VStaging(VStaging), + /// V2 type. + V2(V2), } -impl<V1: Clone, VStaging: Clone> Versioned<&'_ V1, &'_ VStaging> { +impl<V1: Clone, V2: Clone> Versioned<&'_ V1, &'_ V2> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned<V1, VStaging> { + pub fn clone_inner(&self) -> Versioned<V1, V2> { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), - Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), + Versioned::V2(inner) => Versioned::V2(inner.clone()), } } } /// All supported versions of the validation protocol message. -pub type VersionedValidationProtocol = - Versioned<v1::ValidationProtocol, vstaging::ValidationProtocol>; +pub type VersionedValidationProtocol = Versioned<v1::ValidationProtocol, v2::ValidationProtocol>; impl From<v1::ValidationProtocol> for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -280,14 +279,14 @@ impl From<v1::ValidationProtocol> for VersionedValidationProtocol { } } -impl From<vstaging::ValidationProtocol> for VersionedValidationProtocol { - fn from(vstaging: vstaging::ValidationProtocol) -> Self { - VersionedValidationProtocol::VStaging(vstaging) +impl From<v2::ValidationProtocol> for VersionedValidationProtocol { + fn from(v2: v2::ValidationProtocol) -> Self { + VersionedValidationProtocol::V2(v2) } } /// All supported versions of the collation protocol message. -pub type VersionedCollationProtocol = Versioned<v1::CollationProtocol, vstaging::CollationProtocol>; +pub type VersionedCollationProtocol = Versioned<v1::CollationProtocol, v2::CollationProtocol>; impl From<v1::CollationProtocol> for VersionedCollationProtocol { fn from(v1: v1::CollationProtocol) -> Self { @@ -295,9 +294,9 @@ impl From<v1::CollationProtocol> for VersionedCollationProtocol { } } -impl From<vstaging::CollationProtocol> for VersionedCollationProtocol { - fn from(vstaging: vstaging::CollationProtocol) -> Self { - VersionedCollationProtocol::VStaging(vstaging) +impl From<v2::CollationProtocol> for VersionedCollationProtocol { + fn from(v2: v2::CollationProtocol) -> Self { + VersionedCollationProtocol::V2(v2) } } @@ -307,7 +306,7 @@ macro_rules! impl_versioned_full_protocol_from { fn from(versioned_from: $from) -> $out { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), - Versioned::VStaging(x) => Versioned::VStaging(x.into()), + Versioned::V2(x) => Versioned::V2(x.into()), } } } @@ -321,7 +320,7 @@ macro_rules! impl_versioned_try_from { $from:ty, $out:ty, $v1_pat:pat => $v1_out:expr, - $vstaging_pat:pat => $vstaging_out:expr + $v2_pat:pat => $v2_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -330,7 +329,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), - Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), _ => Err(crate::WrongVariant), } } @@ -343,8 +342,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), - Versioned::VStaging($vstaging_pat) => - Ok(Versioned::VStaging($vstaging_out.clone())), + Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), _ => Err(crate::WrongVariant), } } @@ -354,7 +352,7 @@ macro_rules! impl_versioned_try_from { /// Version-annotated messages used by the bitfield distribution subsystem. pub type BitfieldDistributionMessage = - Versioned<v1::BitfieldDistributionMessage, vstaging::BitfieldDistributionMessage>; + Versioned<v1::BitfieldDistributionMessage, v2::BitfieldDistributionMessage>; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -364,12 +362,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, - vstaging::ValidationProtocol::BitfieldDistribution(x) => x + v2::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. pub type StatementDistributionMessage = - Versioned<v1::StatementDistributionMessage, vstaging::StatementDistributionMessage>; + Versioned<v1::StatementDistributionMessage, v2::StatementDistributionMessage>; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -379,12 +377,12 @@ impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, - vstaging::ValidationProtocol::StatementDistribution(x) => x + v2::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. pub type ApprovalDistributionMessage = - Versioned<v1::ApprovalDistributionMessage, vstaging::ApprovalDistributionMessage>; + Versioned<v1::ApprovalDistributionMessage, v2::ApprovalDistributionMessage>; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -394,13 +392,13 @@ impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, - vstaging::ValidationProtocol::ApprovalDistribution(x) => x + v2::ValidationProtocol::ApprovalDistribution(x) => x ); /// Version-annotated messages used by the gossip-support subsystem (this is void). pub type GossipSupportNetworkMessage = - Versioned<v1::GossipSupportNetworkMessage, vstaging::GossipSupportNetworkMessage>; + Versioned<v1::GossipSupportNetworkMessage, v2::GossipSupportNetworkMessage>; // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom<VersionedValidationProtocol> for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -418,7 +416,7 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag /// Version-annotated messages used by the bitfield distribution subsystem. pub type CollatorProtocolMessage = - Versioned<v1::CollatorProtocolMessage, vstaging::CollatorProtocolMessage>; + Versioned<v1::CollatorProtocolMessage, v2::CollatorProtocolMessage>; impl_versioned_full_protocol_from!( CollatorProtocolMessage, VersionedCollationProtocol, @@ -428,7 +426,7 @@ impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, v1::CollationProtocol::CollatorProtocol(x) => x, - vstaging::CollationProtocol::CollatorProtocol(x) => x + v2::CollationProtocol::CollatorProtocol(x) => x ); /// v1 notification protocol types. @@ -589,12 +587,12 @@ pub mod v1 { } } -/// vstaging network protocol types. -pub mod vstaging { +/// v2 network protocol types. +pub mod v2 { use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; - use polkadot_primitives::vstaging::{ + use polkadot_primitives::{ CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index c2163783c2ce05d9eaac9b3718745107baaeeb08..8dd68b297e3000ec4fc0847b047a1323aae97353 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -118,16 +118,9 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { - #[cfg(not(feature = "network-protocol-staging"))] match self { - PeerSet::Validation => ValidationVersion::V1.into(), - PeerSet::Collation => CollationVersion::V1.into(), - } - - #[cfg(feature = "network-protocol-staging")] - match self { - PeerSet::Validation => ValidationVersion::VStaging.into(), - PeerSet::Collation => CollationVersion::VStaging.into(), + PeerSet::Validation => ValidationVersion::V2.into(), + PeerSet::Collation => CollationVersion::V2.into(), } } @@ -152,7 +145,7 @@ impl PeerSet { PeerSet::Validation => if version == ValidationVersion::V1.into() { Some("validation/1") - } else if version == ValidationVersion::VStaging.into() { + } else if version == ValidationVersion::V2.into() { Some("validation/2") } else { None @@ -160,7 +153,7 @@ impl PeerSet { PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") - } else if version == CollationVersion::VStaging.into() { + } else if version == CollationVersion::V2.into() { Some("collation/2") } else { None @@ -223,8 +216,8 @@ impl From<ProtocolVersion> for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -232,8 +225,8 @@ pub enum ValidationVersion { pub enum CollationVersion { /// The first version. V1 = 1, - /// The staging version. - VStaging = 2, + /// The second version. + V2 = 2, } /// Marker indicating the version is unknown. diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index baed4b8463160451231d3cbfc11e32337dbd9fec..96f7adeb29ba02987857fbac1eeed7b6770d9a35 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -55,7 +55,7 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons pub mod v1; /// Actual versioned requests and responses that are sent over the wire. -pub mod vstaging; +pub mod v2; /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching /// within protocols. @@ -66,7 +66,7 @@ pub enum Protocol { /// Protocol for fetching collations from collators. CollationFetchingV1, /// Protocol for fetching collations from collators when async backing is enabled. - CollationFetchingVStaging, + CollationFetchingV2, /// Protocol for fetching seconded PoVs from validators of the same group. PoVFetchingV1, /// Protocol for fetching available data. @@ -78,7 +78,7 @@ pub enum Protocol { /// Protocol for requesting candidates with attestations in statement distribution /// when async backing is enabled. - AttestedCandidateVStaging, + AttestedCandidateV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -147,7 +147,7 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; -/// Maximum response sizes for `AttestedCandidateVStaging`. +/// Maximum response sizes for `AttestedCandidateV2`. /// /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and /// additional backing statements. @@ -199,7 +199,7 @@ impl Protocol { request_timeout: CHUNK_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => RequestResponseConfig { name, fallback_names, @@ -254,7 +254,7 @@ impl Protocol { request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, }, - Protocol::AttestedCandidateVStaging => RequestResponseConfig { + Protocol::AttestedCandidateV2 => RequestResponseConfig { name, fallback_names, max_request_size: 1_000, @@ -275,7 +275,7 @@ impl Protocol { // as well. Protocol::ChunkFetchingV1 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. - Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => 10, + Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::PoVFetchingV1 => 10, // Validators are constantly self-selecting to request available data which may lead @@ -307,7 +307,7 @@ impl Protocol { // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, - Protocol::AttestedCandidateVStaging => { + Protocol::AttestedCandidateV2 => { // We assume we can utilize up to 70% of the available bandwidth for statements. // This is just a guess/estimate, with the following considerations: If we are // faster than that, queue size will stay low anyway, even if not - requesters will @@ -344,8 +344,8 @@ impl Protocol { Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), // Introduced after legacy names became legacy. - Protocol::AttestedCandidateVStaging => None, - Protocol::CollationFetchingVStaging => None, + Protocol::AttestedCandidateV2 => None, + Protocol::CollationFetchingV2 => None, } } } @@ -402,8 +402,8 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", - Protocol::CollationFetchingVStaging => "/req_collation/2", - Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2", + Protocol::CollationFetchingV2 => "/req_collation/2", + Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index ddc6b85645bbb1dd457731a0b43f6628d4bf4c29..c613d5778f5eb5d21bbdaecfd8e4493ccac14117 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -23,7 +23,7 @@ use sc_network::PeerId; use polkadot_primitives::AuthorityDiscoveryId; -use super::{v1, vstaging, IsRequest, Protocol}; +use super::{v1, v2, IsRequest, Protocol}; /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`. #[derive(Debug)] @@ -42,10 +42,10 @@ pub enum Requests { DisputeSendingV1(OutgoingRequest<v1::DisputeRequest>), /// Request a candidate and attestations. - AttestedCandidateVStaging(OutgoingRequest<vstaging::AttestedCandidateRequest>), + AttestedCandidateV2(OutgoingRequest<v2::AttestedCandidateRequest>), /// Fetch a collation from a collator which previously announced it. /// Compared to V1 it requires specifying which candidate is requested by its hash. - CollationFetchingVStaging(OutgoingRequest<vstaging::CollationFetchingRequest>), + CollationFetchingV2(OutgoingRequest<v2::CollationFetchingRequest>), } impl Requests { @@ -54,12 +54,12 @@ impl Requests { match self { Self::ChunkFetchingV1(_) => Protocol::ChunkFetchingV1, Self::CollationFetchingV1(_) => Protocol::CollationFetchingV1, - Self::CollationFetchingVStaging(_) => Protocol::CollationFetchingVStaging, + Self::CollationFetchingV2(_) => Protocol::CollationFetchingV2, Self::PoVFetchingV1(_) => Protocol::PoVFetchingV1, Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, - Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging, + Self::AttestedCandidateV2(_) => Protocol::AttestedCandidateV2, } } @@ -74,12 +74,12 @@ impl Requests { match self { Self::ChunkFetchingV1(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), - Self::CollationFetchingVStaging(r) => r.encode_request(), + Self::CollationFetchingV2(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), - Self::AttestedCandidateVStaging(r) => r.encode_request(), + Self::AttestedCandidateV2(r) => r.encode_request(), } } } diff --git a/polkadot/node/network/protocol/src/request_response/vstaging.rs b/polkadot/node/network/protocol/src/request_response/v2.rs similarity index 93% rename from polkadot/node/network/protocol/src/request_response/vstaging.rs rename to polkadot/node/network/protocol/src/request_response/v2.rs index 34a17b4baaa64ab6eda18382cfa4c9c3ccd36d3a..6b90c579237fbff95496035f461c5cc4c5202984 100644 --- a/polkadot/node/network/protocol/src/request_response/vstaging.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -18,13 +18,13 @@ use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, UncheckedSignedStatement, }; use super::{IsRequest, Protocol}; -use crate::vstaging::StatementFilter; +use crate::v2::StatementFilter; /// Request a candidate with statements. #[derive(Debug, Clone, Encode, Decode)] @@ -56,7 +56,7 @@ pub struct AttestedCandidateResponse { impl IsRequest for AttestedCandidateRequest { type Response = AttestedCandidateResponse; - const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging; + const PROTOCOL: Protocol = Protocol::AttestedCandidateV2; } /// Responses as sent by collators. @@ -76,5 +76,5 @@ pub struct CollationFetchingRequest { impl IsRequest for CollationFetchingRequest { // The response is the same as for V1. type Response = CollationFetchingResponse; - const PROTOCOL: Protocol = Protocol::CollationFetchingVStaging; + const PROTOCOL: Protocol = Protocol::CollationFetchingV2; } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index 9ae76047383cde679b4a9393ecdaef38fbcf8930..fc2aff0da305edbb7f6f39834ffd017e2701a550 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -21,8 +21,7 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v2 as protocol_v2, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1062,7 +1061,7 @@ async fn circulate_statement<'a, Context>( "We filter out duplicates above. qed.", ); - let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send + let (v1_peers_to_send, v2_peers_to_send) = peers_to_send .into_iter() .map(|peer_id| { let peer_data = @@ -1074,7 +1073,7 @@ async fn circulate_statement<'a, Context>( }) .partition::<Vec<_>, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::VStaging => false, + ValidationVersion::V2 => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1094,24 +1093,24 @@ async fn circulate_statement<'a, Context>( )) .await; } - if !vstaging_peers_to_send.is_empty() { + if !v2_peers_to_send.is_empty() { gum::trace!( target: LOG_TARGET, - ?vstaging_peers_to_send, + ?v2_peers_to_send, ?relay_parent, statement = ?stored.statement, - "Sending statement to vstaging peers", + "Sending statement to v2 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(), - compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + v2_peers_to_send.iter().map(|(p, _, _)| *p).collect(), + compatible_v1_message(ValidationVersion::V2, payload.clone()).into(), )) .await; } v1_peers_to_send .into_iter() - .chain(vstaging_peers_to_send) + .chain(v2_peers_to_send) .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) .collect() } @@ -1443,10 +1442,8 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( - m, - )) => m, - Versioned::VStaging(_) => { + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) => m, + Versioned::V2(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2170,8 +2167,7 @@ fn compatible_v1_message( ) -> net_protocol::StatementDistributionMessage { match version { ValidationVersion::V1 => Versioned::V1(message), - ValidationVersion::VStaging => Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), - ), + ValidationVersion::V2 => + Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), } } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 17a66a9ff792ffddca4c5e8a750b5f7bdcf205f0..ca3038f9b3f3a4250b5337e70f9e009699afbc3b 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -793,7 +793,7 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1033,7 +1033,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -1563,7 +1563,7 @@ fn delay_reputation_changes() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2043,7 +2043,7 @@ fn share_prioritizes_backing_group() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2365,7 +2365,7 @@ fn peer_cant_flood_with_large_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == hash_a => { @@ -2590,7 +2590,7 @@ fn handle_multiple_seconded_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx)) ) if r == relay_parent_hash => { diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index b2eb9cccced4de0b607d4a4dcd9142d98c80f7ab..eead7df5224d57ddf92bc90a3ba0b18e70a72c9e 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -26,10 +26,8 @@ use error::{log_error, FatalResult}; use std::time::Duration; use polkadot_node_network_protocol::{ - request_response::{ - v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver, - }, - vstaging as protocol_vstaging, Versioned, + request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, + v2 as protocol_v2, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -60,7 +58,7 @@ use legacy_v1::{ ResponderMessage as V1ResponderMessage, }; -mod vstaging; +mod v2; const LOG_TARGET: &str = "parachain::statement-distribution"; @@ -104,9 +102,9 @@ enum MuxedMessage { /// Messages from spawned v1 (legacy) responder background task. V1Responder(Option<V1ResponderMessage>), /// Messages from candidate responder background task. - Responder(Option<vstaging::ResponderMessage>), + Responder(Option<v2::ResponderMessage>), /// Messages from answered requests. - Response(vstaging::UnhandledResponse), + Response(v2::UnhandledResponse), /// Message that a request is ready to be retried. This just acts as a signal that we should /// dispatch all pending requests again. RetryRequest(()), @@ -116,10 +114,10 @@ enum MuxedMessage { impl MuxedMessage { async fn receive<Context>( ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, from_v1_requester: &mut mpsc::Receiver<V1RequesterMessage>, from_v1_responder: &mut mpsc::Receiver<V1ResponderMessage>, - from_responder: &mut mpsc::Receiver<vstaging::ResponderMessage>, + from_responder: &mut mpsc::Receiver<v2::ResponderMessage>, ) -> MuxedMessage { let (request_manager, response_manager) = state.request_and_response_managers(); // We are only fusing here to make `select` happy, in reality we will quit if one of those @@ -128,8 +126,8 @@ impl MuxedMessage { let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); let from_responder = from_responder.next(); - let receive_response = vstaging::receive_response(response_manager).fuse(); - let retry_request = vstaging::next_retry(request_manager).fuse(); + let receive_response = v2::receive_response(response_manager).fuse(); + let retry_request = v2::next_retry(request_manager).fuse(); futures::pin_mut!( from_orchestra, from_v1_requester, @@ -182,7 +180,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { let mut reputation_delay = new_reputation_delay(); let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); - let mut state = crate::vstaging::State::new(self.keystore.clone()); + let mut state = crate::v2::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); @@ -206,7 +204,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { ctx.spawn( "candidate-responder", - vstaging::respond_task( + v2::respond_task( self.req_receiver.take().expect("Mandatory argument to new. qed"), res_sender.clone(), ) @@ -280,14 +278,13 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { )?; }, MuxedMessage::Responder(result) => { - vstaging::answer_request( + v2::answer_request( &mut state, result.ok_or(FatalError::RequesterReceiverFinished)?, ); }, MuxedMessage::Response(result) => { - vstaging::handle_response(&mut ctx, &mut state, result, &mut self.reputation) - .await; + v2::handle_response(&mut ctx, &mut state, result, &mut self.reputation).await; }, MuxedMessage::RetryRequest(()) => { // A pending request is ready to retry. This is only a signal to call @@ -296,7 +293,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { }, }; - vstaging::dispatch_requests(&mut ctx, &mut state).await; + v2::dispatch_requests(&mut ctx, &mut state).await; } Ok(()) } @@ -304,7 +301,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { async fn handle_subsystem_message<Context>( &mut self, ctx: &mut Context, - state: &mut vstaging::State, + state: &mut v2::State, legacy_v1_state: &mut legacy_v1::State, v1_req_sender: &mpsc::Sender<V1RequesterMessage>, message: FromOrchestra<StatementDistributionMessage>, @@ -318,11 +315,11 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { })) => { let _timer = metrics.time_active_leaves_update(); - // vstaging should handle activated first because of implicit view. + // v2 should handle activated first because of implicit view. if let Some(ref activated) = activated { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; if let ProspectiveParachainsMode::Enabled { .. } = mode { - vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?; + v2::handle_active_leaves_update(ctx, state, activated, mode).await?; } else if let ProspectiveParachainsMode::Disabled = mode { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); @@ -339,7 +336,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { for deactivated in &deactivated { crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } - vstaging::handle_deactivate_leaves(state, &deactivated); + v2::handle_deactivate_leaves(state, &deactivated); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { @@ -362,7 +359,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { ) .await?; } else { - vstaging::share_local_statement( + v2::share_local_statement( ctx, state, relay_parent, @@ -399,11 +396,11 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { let target = match &event { NetworkBridgeEvent::PeerMessage(_, message) => match message { - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + Versioned::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::VStaging(_) => VersionTarget::Current, + Versioned::V2(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; @@ -422,14 +419,12 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> { } if target.targets_current() { - // pass to vstaging. - vstaging::handle_network_update(ctx, state, event, &mut self.reputation) - .await; + // pass to v2. + v2::handle_network_update(ctx, state, event, &mut self.reputation).await; } }, StatementDistributionMessage::Backed(candidate_hash) => { - crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash) - .await; + crate::v2::handle_backed_candidate_message(ctx, state, candidate_hash).await; }, }, } diff --git a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/candidates.rs rename to polkadot/node/network/statement-distribution/src/v2/candidates.rs index d6b68510f1c100e5dfd7c4ff7898399b2fd2eacc..e660df5da173e24dce8be7afdbe3ea55c17f94c9 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -27,7 +27,7 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::messages::HypotheticalCandidate; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, PersistedValidationData, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/cluster.rs index 55d847f831574eeef44d8214682c52c5370081b4..8adb8353ca929d75219252c2ea33e894dea41002 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -55,7 +55,7 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex}; use std::collections::{HashMap, HashSet}; @@ -459,7 +459,7 @@ pub enum RejectOutgoing { #[cfg(test)] mod tests { use super::*; - use polkadot_primitives::vstaging::Hash; + use polkadot_primitives::Hash; #[test] fn rejects_incoming_outside_of_group() { diff --git a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/grid.rs index 4fd77d0ced1cef3dcba81a088c1030c444a54d04..3d53ff6d321e2321c7b559aaaf7adc5504e18f49 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -60,12 +60,8 @@ //! - which has sent a `BackedCandidateAcknowledgement` //! - 1st-hop nodes do the same thing -use polkadot_node_network_protocol::{ - grid_topology::SessionGridTopology, vstaging::StatementFilter, -}; -use polkadot_primitives::vstaging::{ - CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, -}; +use polkadot_node_network_protocol::{grid_topology::SessionGridTopology, v2::StatementFilter}; +use polkadot_primitives::{CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex}; use std::collections::{ hash_map::{Entry, HashMap}, diff --git a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs b/polkadot/node/network/statement-distribution/src/v2/groups.rs similarity index 96% rename from polkadot/node/network/statement-distribution/src/vstaging/groups.rs rename to polkadot/node/network/statement-distribution/src/v2/groups.rs index b2daa1c0ac7caf3181c12dfc74fcff0f2f0cc6ed..d917b20905296eed7501c568d9bcd3057963f683 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/groups.rs +++ b/polkadot/node/network/statement-distribution/src/v2/groups.rs @@ -17,8 +17,7 @@ //! A utility for tracking groups and their members within a session. use polkadot_primitives::{ - effective_minimum_backing_votes, - vstaging::{GroupIndex, IndexedVec, ValidatorIndex}, + effective_minimum_backing_votes, GroupIndex, IndexedVec, ValidatorIndex, }; use std::collections::HashMap; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs similarity index 97% rename from polkadot/node/network/statement-distribution/src/vstaging/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/mod.rs index 4639720b3221add0d61eb040f44844e07cd2a502..e11d66c41a04b62377008bb7c0e2b5f9f44cde5b 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -23,11 +23,11 @@ use polkadot_node_network_protocol::{ peer_set::ValidationVersion, request_response::{ incoming::OutgoingResponse, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, IncomingRequest, IncomingRequestReceiver, Requests, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::{self as protocol_vstaging, StatementFilter}, + v2::{self as protocol_v2, StatementFilter}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ @@ -45,7 +45,7 @@ use polkadot_node_subsystem_util::{ reputation::ReputationAggregator, runtime::{request_min_backing_votes, ProspectiveParachainsMode}, }; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, @@ -323,7 +323,7 @@ pub(crate) async fn handle_network_update<Context>( NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); - if protocol_version != ValidationVersion::VStaging.into() { + if protocol_version != ValidationVersion::V2.into() { return } @@ -381,19 +381,19 @@ pub(crate) async fn handle_network_update<Context>( }, NetworkBridgeEvent::PeerMessage(peer_id, message) => match message { net_protocol::StatementDistributionMessage::V1(_) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) => return, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + net_protocol::StatementDistributionMessage::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -727,10 +727,8 @@ fn pending_statement_network_message( statement_store .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) - .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) - }) - .map(|msg| (vec![*peer], Versioned::VStaging(msg).into())) + .map(|signed| protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed)) + .map(|msg| (vec![*peer], Versioned::V2(msg).into())) } /// Send a peer all pending cluster statements for a relay parent. @@ -823,7 +821,7 @@ async fn send_pending_grid_messages<Context>( match kind { grid::ManifestKind::Full => { - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -847,8 +845,8 @@ async fn send_pending_grid_messages<Context>( messages.push(( vec![*peer_id], - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest, ), ) @@ -1192,7 +1190,7 @@ async fn circulate_statement<Context>( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), )) @@ -1672,7 +1670,7 @@ async fn provide_candidate_to_grid<Context>( filter.clone(), ); - let manifest = protocol_vstaging::BackedCandidateManifest { + let manifest = protocol_v2::BackedCandidateManifest { relay_parent, candidate_hash, group_index, @@ -1680,16 +1678,15 @@ async fn provide_candidate_to_grid<Context>( parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), statement_knowledge: filter.clone(), }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: filter.clone(), }; - let manifest_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), - ); - let ack_message = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + let manifest_message = + Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest)); + let ack_message = Versioned::V2( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), ); let mut manifest_peers = Vec::new(); @@ -2062,8 +2059,8 @@ fn post_acknowledgement_statement_messages( statement.payload(), ); - messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( + messages.push(Versioned::V2( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), ) @@ -2079,7 +2076,7 @@ async fn handle_incoming_manifest<Context>( ctx: &mut Context, state: &mut State, peer: PeerId, - manifest: net_protocol::vstaging::BackedCandidateManifest, + manifest: net_protocol::v2::BackedCandidateManifest, reputation: &mut ReputationAggregator, ) { gum::debug!( @@ -2183,14 +2180,14 @@ fn acknowledgement_and_statement_messages( Some(l) => l, }; - let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + let acknowledgement = protocol_v2::BackedCandidateAcknowledgement { candidate_hash, statement_knowledge: local_knowledge.clone(), }; - let msg = Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), - ); + let msg = Versioned::V2(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )); let mut messages = vec![(vec![peer], msg.into())]; @@ -2221,7 +2218,7 @@ async fn handle_incoming_acknowledgement<Context>( ctx: &mut Context, state: &mut State, peer: PeerId, - acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement, + acknowledgement: net_protocol::v2::BackedCandidateAcknowledgement, reputation: &mut ReputationAggregator, ) { // The key difference between acknowledgments and full manifests is that only @@ -2521,7 +2518,7 @@ pub(crate) async fn dispatch_requests<Context>(ctx: &mut Context, state: &mut St ) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AttestedCandidateVStaging(request)], + vec![Requests::AttestedCandidateV2(request)], IfDisconnected::ImmediateError, )) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs similarity index 99% rename from polkadot/node/network/statement-distribution/src/vstaging/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/requests.rs index 79925f2115d430e61232e600b1db13a0886d3e88..f13496024fcfee52f48a3cdb2377c2e24eadda79 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -39,14 +39,14 @@ use crate::LOG_TARGET; use polkadot_node_network_protocol::{ request_response::{ outgoing::{Recipient as RequestRecipient, RequestError}, - vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, - vstaging::StatementFilter, + v2::StatementFilter, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId, +use polkadot_primitives::{ + CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, ValidatorIndex, }; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs similarity index 98% rename from polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs rename to polkadot/node/network/statement-distribution/src/v2/statement_store.rs index 9ea926f24aa833177230aa9bf1c8962517acd49a..74db431eda1d214e0031e98ab304cb0fd0fabd96 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/statement_store.rs +++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs @@ -24,8 +24,8 @@ //! groups, and views based on the validators themselves. use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use polkadot_node_network_protocol::vstaging::StatementFilter; -use polkadot_primitives::vstaging::{ +use polkadot_node_network_protocol::v2::StatementFilter; +use polkadot_primitives::{ CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, }; use std::collections::hash_map::{Entry as HEntry, HashMap}; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 50d0477eb5167cb2d8a179813452373061aedd9e..80dec1d75ab98be43b820bf7382b4436ef87c3b2 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -103,8 +103,8 @@ fn share_seconded_circulated_to_cluster() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -173,7 +173,7 @@ fn cluster_valid_statement_before_seconded_ignored() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, signed_valid.as_unchecked().clone(), ), @@ -252,7 +252,7 @@ fn cluster_statement_bad_signature() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement.clone(), ), @@ -327,7 +327,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -388,7 +388,7 @@ fn statement_from_non_cluster_originator_unexpected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -465,7 +465,7 @@ fn seconded_statement_leads_to_request() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -593,8 +593,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -604,8 +604,8 @@ fn cluster_statements_shared_seconded_first() { assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -699,8 +699,8 @@ fn cluster_accounts_for_implicit_view() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -749,8 +749,8 @@ fn cluster_accounts_for_implicit_view() { &messages[0], ( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -836,10 +836,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -971,10 +968,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1185,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1216,7 +1210,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1241,7 +1235,7 @@ fn ensure_seconding_limit_is_respected() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 0739f30194378e1ec7f5e6540e37f24d60130d89..a0af9579823597514383f57c9cbe691a7282bb20 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -17,9 +17,7 @@ use super::*; use bitvec::order::Lsb0; -use polkadot_node_network_protocol::vstaging::{ - BackedCandidateAcknowledgement, BackedCandidateManifest, -}; +use polkadot_node_network_protocol::v2::{BackedCandidateAcknowledgement, BackedCandidateManifest}; use polkadot_node_subsystem::messages::CandidateBackingMessage; use polkadot_primitives_test_helpers::make_candidate; @@ -156,7 +154,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -181,7 +179,7 @@ fn backed_candidate_leads_to_advertisement() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -210,9 +208,9 @@ fn backed_candidate_leads_to_advertisement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -349,7 +347,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -534,7 +532,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -603,9 +601,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -629,7 +627,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -654,8 +652,8 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); } @@ -782,7 +780,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -842,7 +840,7 @@ fn received_advertisement_after_confirmation_before_backing() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -951,7 +949,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1066,9 +1064,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack), ), ), ) @@ -1104,7 +1102,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { send_peer_message( &mut overseer, peer_d.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1130,15 +1128,15 @@ fn additional_statements_are_shared_after_manifest_exchange() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateKnown(ack) )) if *ack == expected_ack ); assert_matches!( &messages[1].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(r, s) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(r, s) )) if *r == relay_parent && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) && s.unchecked_validator_index() == v_e ); } @@ -1281,7 +1279,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1306,7 +1304,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1357,8 +1355,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { assert_matches!( &messages[0].1, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest) + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest) )) => { assert_eq!(*manifest, expected_manifest); } @@ -1504,7 +1502,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1529,7 +1527,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1558,9 +1556,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1692,7 +1690,7 @@ fn grid_statements_imported_to_backing() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1907,7 +1905,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1925,7 +1923,7 @@ fn advertisements_rejected_from_incorrect_peers() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; @@ -2029,7 +2027,7 @@ fn manifest_rejected_with_unknown_relay_parent() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2131,7 +2129,7 @@ fn manifest_rejected_when_not_a_validator() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2238,7 +2236,7 @@ fn manifest_rejected_when_group_does_not_match_para() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2370,7 +2368,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -2439,7 +2437,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs similarity index 98% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 48ceebb1949bfa2889986554b9583cbf33cd72aa..4150377a0c6c219832104024f6d6c046dccec709 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -31,7 +31,7 @@ use polkadot_node_subsystem::messages::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::vstaging::{ +use polkadot_primitives::{ AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair, @@ -380,7 +380,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap(); } @@ -479,7 +479,7 @@ async fn handle_sent_request( assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash); assert_eq!(outgoing.payload.mask, mask); @@ -537,7 +537,7 @@ async fn connect_peer( NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Authority, - ValidationVersion::VStaging.into(), + ValidationVersion::V2.into(), authority_ids, ), ), @@ -570,12 +570,12 @@ async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: Pee async fn send_peer_message( virtual_overseer: &mut VirtualOverseer, peer: PeerId, - message: protocol_vstaging::StatementDistributionMessage, + message: protocol_v2::StatementDistributionMessage, ) { virtual_overseer .send(FromOrchestra::Communication { msg: StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)), + NetworkBridgeEvent::PeerMessage(peer, Versioned::V2(message)), ), }) .await; diff --git a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs similarity index 95% rename from polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs rename to polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 5eef5809b4d497e9334572ffa4e44f5d13ba48c5..0734b75c9712d50dc8be0f78bde52001ae26b8f0 100644 --- a/polkadot/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -19,7 +19,7 @@ use super::*; use bitvec::order::Lsb0; use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ - request_response::vstaging as request_vstaging, vstaging::BackedCandidateManifest, + request_response::v2 as request_v2, v2::BackedCandidateManifest, }; use polkadot_primitives_test_helpers::make_candidate; use sc_network::config::{ @@ -109,10 +109,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -164,9 +161,9 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -304,7 +301,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -376,7 +373,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -453,7 +450,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -568,9 +565,7 @@ fn peer_reported_for_not_enough_statements() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( - manifest.clone(), - ), + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest.clone()), ) .await; @@ -752,10 +747,7 @@ fn peer_reported_for_duplicate_statements() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -812,9 +804,9 @@ fn peer_reported_for_duplicate_statements() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement(hash, statement), ), ), ) @@ -916,10 +908,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1058,10 +1047,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( - relay_parent, - a_seconded, - ), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), ) .await; @@ -1191,7 +1177,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_c, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1225,8 +1211,8 @@ fn local_node_sanity_checks_incoming_requests() { overseer.recv().await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::Statement( + Versioned::V2(protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::Statement( r, s, ) @@ -1250,7 +1236,7 @@ fn local_node_sanity_checks_incoming_requests() { .send(RawIncomingRequest { // Request from peer that received manifest. peer: peer_d, - payload: request_vstaging::AttestedCandidateRequest { + payload: request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), } @@ -1269,10 +1255,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; @@ -1296,7 +1279,7 @@ fn local_node_sanity_checks_incoming_requests() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask: mask.clone(), }, @@ -1455,7 +1438,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) .await; @@ -1479,7 +1462,7 @@ fn local_node_respects_statement_mask() { send_peer_message( &mut overseer, peer_b.clone(), - protocol_vstaging::StatementDistributionMessage::Statement( + protocol_v2::StatementDistributionMessage::Statement( relay_parent, statement_b.clone(), ), @@ -1511,9 +1494,9 @@ fn local_node_respects_statement_mask() { AllMessages:: NetworkBridgeTx( NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::StatementDistribution( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + Versioned::V2( + protocol_v2::ValidationProtocol::StatementDistribution( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest(manifest), ), ), ) @@ -1547,19 +1530,16 @@ fn local_node_respects_statement_mask() { let response = state .send_request( peer_c, - request_vstaging::AttestedCandidateRequest { - candidate_hash: candidate.hash(), - mask, - }, + request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask }, ) .await .await; let expected_statements = vec![statement_b]; assert_matches!(response, full_response => { - // Response is the same for vstaging. - let request_vstaging::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = - request_vstaging::AttestedCandidateResponse::decode( + // Response is the same for v2. + let request_v2::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = + request_v2::AttestedCandidateResponse::decode( &mut full_response.result.expect("We should have a proper answer").as_ref(), ).expect("Decoding should work"); assert_eq!(candidate_receipt, candidate); @@ -1683,7 +1663,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) @@ -1696,7 +1676,7 @@ fn should_delay_before_retrying_dropped_requests() { assert_eq!(requests.len(), 1); assert_matches!( requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(outgoing) => { + Requests::AttestedCandidateV2(outgoing) => { assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); assert_eq!(outgoing.payload.mask, mask); @@ -1729,7 +1709,7 @@ fn should_delay_before_retrying_dropped_requests() { send_peer_message( &mut overseer, peer_c.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + protocol_v2::StatementDistributionMessage::BackedCandidateManifest( manifest.clone(), ), ) diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index ee092e27733267419a47ee1553d6998be3228ca8..ba5976fdceef89683150d68ec38e2d90ff803c6c 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -223,7 +223,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 7fd9ce61c95eeafb51dcec4dfaadcede46e3843c..ce25c08b8773059497cdc9624f9fb1cd65d20031 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -84,7 +84,7 @@ pub type WestendChainSpec = GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. #[cfg(feature = "rococo-native")] -pub type RococoChainSpec = service::GenericChainSpec<RococoGenesisExt, Extensions>; +pub type RococoChainSpec = service::GenericChainSpec<rococo::RuntimeGenesisConfig, Extensions>; /// The `ChainSpec` parameterized for the `versi` runtime. /// @@ -96,30 +96,6 @@ pub type VersiChainSpec = RococoChainSpec; #[cfg(not(feature = "rococo-native"))] pub type RococoChainSpec = GenericChainSpec; -/// Extension for the Rococo genesis config to support a custom changes to the genesis state. -#[derive(serde::Serialize, serde::Deserialize)] -#[cfg(feature = "rococo-native")] -pub struct RococoGenesisExt { - /// The runtime genesis config. - runtime_genesis_config: rococo::RuntimeGenesisConfig, - /// The session length in blocks. - /// - /// If `None` is supplied, the default value is used. - session_length_in_blocks: Option<u32>, -} - -#[cfg(feature = "rococo-native")] -impl sp_runtime::BuildStorage for RococoGenesisExt { - fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { - sp_state_machine::BasicExternalities::execute_with_storage(storage, || { - if let Some(length) = self.session_length_in_blocks.as_ref() { - rococo_runtime_constants::time::EpochDurationInBlocks::set(length); - } - }); - self.runtime_genesis_config.assimilate_storage(storage) - } -} - pub fn polkadot_config() -> Result<GenericChainSpec, String> { GenericChainSpec::from_json_bytes(&include_bytes!("../chain-specs/polkadot.json")[..]) } @@ -425,6 +401,7 @@ fn westend_staging_testnet_config_genesis(wasm_binary: &[u8]) -> westend::Runtim vesting: westend::VestingConfig { vesting: vec![] }, sudo: westend::SudoConfig { key: Some(endowed_accounts[0].clone()) }, hrmp: Default::default(), + treasury: Default::default(), configuration: westend::ConfigurationConfig { config: default_parachains_host_configuration(), }, @@ -716,7 +693,6 @@ fn rococo_staging_testnet_config_genesis( }) .collect::<Vec<_>>(), }, - phragmen_election: Default::default(), babe: rococo_runtime::BabeConfig { authorities: Default::default(), epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), @@ -724,13 +700,6 @@ fn rococo_staging_testnet_config_genesis( }, grandpa: Default::default(), im_online: Default::default(), - democracy: rococo_runtime::DemocracyConfig::default(), - council: rococo::CouncilConfig { members: vec![], phantom: Default::default() }, - technical_committee: rococo::TechnicalCommitteeConfig { - members: vec![], - phantom: Default::default(), - }, - technical_membership: Default::default(), treasury: Default::default(), authority_discovery: rococo_runtime::AuthorityDiscoveryConfig { keys: vec![], @@ -787,10 +756,7 @@ pub fn rococo_staging_testnet_config() -> Result<RococoChainSpec, String> { "Rococo Staging Testnet", "rococo_staging_testnet", ChainType::Live, - move || RococoGenesisExt { - runtime_genesis_config: rococo_staging_testnet_config_genesis(wasm_binary), - session_length_in_blocks: None, - }, + move || rococo_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some( TelemetryEndpoints::new(vec![(ROCOCO_STAGING_TELEMETRY_URL.to_string(), 0)]) @@ -824,10 +790,7 @@ pub fn versi_staging_testnet_config() -> Result<RococoChainSpec, String> { "Versi Staging Testnet", "versi_staging_testnet", ChainType::Live, - move || RococoGenesisExt { - runtime_genesis_config: rococo_staging_testnet_config_genesis(wasm_binary), - session_length_in_blocks: Some(100), - }, + move || rococo_staging_testnet_config_genesis(wasm_binary), boot_nodes, Some( TelemetryEndpoints::new(vec![(VERSI_STAGING_TELEMETRY_URL.to_string(), 0)]) @@ -992,6 +955,7 @@ pub fn westend_testnet_genesis( vesting: westend::VestingConfig { vesting: vec![] }, sudo: westend::SudoConfig { key: Some(root_key) }, hrmp: Default::default(), + treasury: Default::default(), configuration: westend::ConfigurationConfig { config: default_parachains_host_configuration(), }, @@ -1062,14 +1026,6 @@ pub fn rococo_testnet_genesis( }, grandpa: Default::default(), im_online: Default::default(), - phragmen_election: Default::default(), - democracy: rococo::DemocracyConfig::default(), - council: rococo::CouncilConfig { members: vec![], phantom: Default::default() }, - technical_committee: rococo::TechnicalCommitteeConfig { - members: vec![], - phantom: Default::default(), - }, - technical_membership: Default::default(), treasury: Default::default(), claims: rococo::ClaimsConfig { claims: vec![], vesting: vec![] }, vesting: rococo::VestingConfig { vesting: vec![] }, @@ -1144,11 +1100,7 @@ pub fn rococo_development_config() -> Result<RococoChainSpec, String> { "Development", "rococo_dev", ChainType::Development, - move || RococoGenesisExt { - runtime_genesis_config: rococo_development_config_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || rococo_development_config_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), @@ -1167,11 +1119,7 @@ pub fn versi_development_config() -> Result<RococoChainSpec, String> { "Development", "versi_dev", ChainType::Development, - move || RococoGenesisExt { - runtime_genesis_config: rococo_development_config_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || rococo_development_config_genesis(wasm_binary), vec![], None, Some("versi"), @@ -1191,11 +1139,7 @@ pub fn wococo_development_config() -> Result<RococoChainSpec, String> { "Development", "wococo_dev", ChainType::Development, - move || RococoGenesisExt { - runtime_genesis_config: rococo_development_config_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || rococo_development_config_genesis(wasm_binary), vec![], None, Some(WOCOCO_DEV_PROTOCOL_ID), @@ -1253,11 +1197,7 @@ pub fn rococo_local_testnet_config() -> Result<RococoChainSpec, String> { "Rococo Local Testnet", "rococo_local_testnet", ChainType::Local, - move || RococoGenesisExt { - runtime_genesis_config: rococo_local_testnet_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || rococo_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), @@ -1292,11 +1232,7 @@ pub fn wococo_local_testnet_config() -> Result<RococoChainSpec, String> { "Wococo Local Testnet", "wococo_local_testnet", ChainType::Local, - move || RococoGenesisExt { - runtime_genesis_config: wococo_local_testnet_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || wococo_local_testnet_genesis(wasm_binary), vec![], None, Some(DEFAULT_PROTOCOL_ID), @@ -1331,11 +1267,7 @@ pub fn versi_local_testnet_config() -> Result<RococoChainSpec, String> { "Versi Local Testnet", "versi_local_testnet", ChainType::Local, - move || RococoGenesisExt { - runtime_genesis_config: versi_local_testnet_genesis(wasm_binary), - // Use 1 minute session length. - session_length_in_blocks: Some(10), - }, + move || versi_local_testnet_genesis(wasm_binary), vec![], None, Some("versi"), diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 5286631fbbb5c495ba66cbaa80ad6a583ac9f852..5991744dc3afce5cbc63af412d4c00c6417f8e93 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -854,7 +854,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>( let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (collation_req_vstaging_receiver, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -862,7 +862,7 @@ pub fn new_full<OverseerGenerator: OverseerGen>( net_config.add_request_response_protocol(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (candidate_req_vstaging_receiver, cfg) = + let (candidate_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); @@ -1051,10 +1051,10 @@ pub fn new_full<OverseerGenerator: OverseerGen>( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 33127b638e5a51800d3c5f88af1ac420a941b48c..7d1add118241b123ad122b305264f95b46b8ee78 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -28,7 +28,7 @@ use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, request_response::{ - v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, ReqProtocolNames, + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames, }, }; #[cfg(any(feature = "malus", test))] @@ -104,17 +104,15 @@ where pub chunk_req_receiver: IncomingRequestReceiver<request_v1::ChunkFetchingRequest>, /// Collations request receiver for network protocol v1. pub collation_req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>, - /// Collations request receiver for network protocol vstaging. - pub collation_req_vstaging_receiver: - IncomingRequestReceiver<request_vstaging::CollationFetchingRequest>, + /// Collations request receiver for network protocol v2. + pub collation_req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>, /// Receiver for available data requests. pub available_data_req_receiver: IncomingRequestReceiver<request_v1::AvailableDataFetchingRequest>, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver<request_v1::StatementFetchingRequest>, /// Receiver for incoming candidate requests. - pub candidate_req_vstaging_receiver: - IncomingRequestReceiver<request_vstaging::AttestedCandidateRequest>, + pub candidate_req_v2_receiver: IncomingRequestReceiver<request_v2::AttestedCandidateRequest>, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver<request_v1::DisputeRequest>, /// Prometheus registry, commonly used for production systems, less so for test. @@ -158,10 +156,10 @@ pub fn prepared_overseer_builder<Spawner, RuntimeClient>( pov_req_receiver, chunk_req_receiver, collation_req_v1_receiver, - collation_req_vstaging_receiver, + collation_req_v2_receiver, available_data_req_receiver, statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, dispute_req_receiver, registry, spawner, @@ -288,7 +286,7 @@ where peer_id: network_service.local_peer_id(), collator_pair, request_receiver_v1: collation_req_v1_receiver, - request_receiver_vstaging: collation_req_vstaging_receiver, + request_receiver_v2: collation_req_v2_receiver, metrics: Metrics::register(registry)?, }, IsParachainNode::FullNode => ProtocolSide::None, @@ -309,7 +307,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, - candidate_req_vstaging_receiver, + candidate_req_v2_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index a53908d3c2cb384c1eb004fb0bde00b5d35b6a56..01ccee3add904b4b6179b99322f650646dbde152 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -39,12 +39,12 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - slashing, vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber, - CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, + CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, + CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -695,14 +695,12 @@ pub enum RuntimeApiRequest { ), /// Get the minimum required backing votes. MinimumBackingVotes(SessionIndex, RuntimeApiSender<u32>), - /// Get the backing state of the given para. - /// This is a staging API that will not be available on production runtimes. - StagingParaBackingState(ParaId, RuntimeApiSender<Option<vstaging_primitives::BackingState>>), + ParaBackingState(ParaId, RuntimeApiSender<Option<async_backing::BackingState>>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. - StagingAsyncBackingParams(RuntimeApiSender<vstaging_primitives::AsyncBackingParams>), + AsyncBackingParams(RuntimeApiSender<async_backing::AsyncBackingParams>), } impl RuntimeApiRequest { @@ -726,10 +724,8 @@ impl RuntimeApiRequest { /// `MinimumBackingVotes` pub const MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT: u32 = 6; - /// Minimum version for backing state, required for async backing. - /// - /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. - pub const STAGING_BACKING_STATE: u32 = 99; + /// Minimum version to enable asynchronous backing: `AsyncBackingParams` and `ParaBackingState`. + pub const ASYNC_BACKING_STATE_RUNTIME_REQUIREMENT: u32 = 7; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 06aa351efb4bce18868893b9bbb2163f17e2cbe5..3007e985b4f7b2ec9a561d65ae7435a06959c478 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,9 +16,9 @@ use async_trait::async_trait; use polkadot_primitives::{ - runtime_api::ParachainHost, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, + async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, @@ -190,7 +190,7 @@ pub trait RuntimeApiSubsystemClient { async fn unapplied_slashes( &self, at: Hash, - ) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError>; + ) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError>; /// Returns a merkle proof of a validator session key in a past session. /// @@ -199,7 +199,7 @@ pub trait RuntimeApiSubsystemClient { &self, at: Hash, validator_id: ValidatorId, - ) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError>; + ) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError>; /// Submits an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. @@ -208,8 +208,8 @@ pub trait RuntimeApiSubsystemClient { async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result<Option<()>, ApiError>; // === BABE API === @@ -232,7 +232,7 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result<Option<ExecutorParams>, ApiError>; - // === STAGING v6 === + // === v6 === /// Get the minimum number of backing votes. async fn minimum_backing_votes( &self, @@ -240,21 +240,21 @@ pub trait RuntimeApiSubsystemClient { session_index: SessionIndex, ) -> Result<u32, ApiError>; - // === Asynchronous backing API === + // === v7: Asynchronous backing API === /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result<polkadot_primitives::vstaging::AsyncBackingParams, ApiError>; + ) -> Result<polkadot_primitives::AsyncBackingParams, ApiError>; /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result<Option<polkadot_primitives::vstaging::BackingState>, ApiError>; + ) -> Result<Option<async_backing::BackingState>, ApiError>; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -454,7 +454,7 @@ where async fn unapplied_slashes( &self, at: Hash, - ) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>, ApiError> { + ) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError> { self.client.runtime_api().unapplied_slashes(at) } @@ -462,15 +462,15 @@ where &self, at: Hash, validator_id: ValidatorId, - ) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>, ApiError> { + ) -> Result<Option<slashing::OpaqueKeyOwnershipProof>, ApiError> { self.client.runtime_api().key_ownership_proof(at, validator_id) } async fn submit_report_dispute_lost( &self, at: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result<Option<()>, ApiError> { let mut runtime_api = self.client.runtime_api(); @@ -489,19 +489,19 @@ where self.client.runtime_api().minimum_backing_votes(at) } - async fn staging_para_backing_state( + async fn para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result<Option<polkadot_primitives::vstaging::BackingState>, ApiError> { - self.client.runtime_api().staging_para_backing_state(at, para_id) + ) -> Result<Option<async_backing::BackingState>, ApiError> { + self.client.runtime_api().para_backing_state(at, para_id) } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_params( + async fn async_backing_params( &self, at: Hash, - ) -> Result<polkadot_primitives::vstaging::AsyncBackingParams, ApiError> { - self.client.runtime_api().staging_async_backing_params(at) + ) -> Result<async_backing::AsyncBackingParams, ApiError> { + self.client.runtime_api().async_backing_params(at) } } diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 83c15fdef9593a3268b6c14f4ef67d2f4363dadb..a14536a17666c8e2d9183755e3579990f6185113 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -20,7 +20,7 @@ use polkadot_node_subsystem::{ messages::{ChainApiMessage, ProspectiveParachainsMessage}, SubsystemSender, }; -use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; +use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 1487077d9edaba66a6cbcfa97cb5adae067284cc..c7b91bffb3d705b27ee4c611985328dd1b1b7e77 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -11,4 +11,1437 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -pub mod staging; +/// # Overview +/// +/// A set of utilities for node-side code to emulate the logic the runtime uses for checking +/// parachain blocks in order to build prospective parachains that are produced ahead of the +/// relay chain. These utilities allow the node-side to predict, with high accuracy, what +/// the relay-chain will accept in the near future. +/// +/// This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] +/// exhaustively define the set of valid inputs and outputs to parachain execution. A +/// [`Fragment`] indicates a parachain block, anchored to the relay-chain at a particular +/// relay-chain block, known as the relay-parent. +/// +/// ## Fragment Validity +/// +/// Every relay-parent is implicitly associated with a unique set of [`Constraints`] that +/// describe the properties that must be true for a block to be included in a direct child of +/// that block, assuming there is no intermediate parachain block pending availability. +/// +/// However, the key factor that makes asynchronously-grown prospective chains +/// possible is the fact that the relay-chain accepts candidate blocks based on whether they +/// are valid under the constraints of the present moment, not based on whether they were +/// valid at the time of construction. +/// +/// As such, [`Fragment`]s are often, but not always constructed in such a way that they are +/// invalid at first and become valid later on, as the relay chain grows. +/// +/// # Usage +/// +/// It's expected that the users of this module will be building up trees of +/// [`Fragment`]s and consistently pruning and adding to the tree. +/// +/// ## Operating Constraints +/// +/// The *operating constraints* of a `Fragment` are the constraints with which that fragment +/// was intended to comply. The operating constraints are defined as the base constraints +/// of the relay-parent of the fragment modified by the cumulative modifications of all +/// fragments between the relay-parent and the current fragment. +/// +/// What the operating constraints are, in practice, is a prediction about the state of the +/// relay-chain in the future. The relay-chain is aware of some current state, and we want to +/// make an intelligent prediction about what might be accepted in the future based on +/// prior fragments that also exist off-chain. +/// +/// ## Fragment Trees +/// +/// As the relay-chain grows, some predictions come true and others come false. +/// And new predictions get made. These three changes correspond distinctly to the +/// 3 primary operations on fragment trees. +/// +/// A fragment tree is a mental model for thinking about a forking series of predictions +/// about a single parachain. There may be one or more fragment trees per parachain. +/// +/// In expectation, most parachains will have a plausibly-unique authorship method which means +/// that they should really be much closer to fragment-chains, maybe with an occasional fork. +/// +/// Avoiding fragment-tree blowup is beyond the scope of this module. +/// +/// ### Pruning Fragment Trees +/// +/// When the relay-chain advances, we want to compare the new constraints of that relay-parent +/// to the roots of the fragment trees we have. There are 3 cases: +/// +/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. +/// This is the "prediction still uncertain" case. +/// +/// 2. The root fragment is invalid under the new constraints because it has been subsumed by +/// the relay-chain. In this case, we can discard the root and split & re-root the fragment +/// tree under its descendents and compare to the new constraints again. This is the +/// "prediction came true" case. +/// +/// 3. The root fragment is invalid under the new constraints because a competing parachain +/// block has been included or it would never be accepted for some other reason. In this +/// case we can discard the entire fragment tree. This is the "prediction came false" case. +/// +/// This is all a bit of a simplification because it assumes that the relay-chain advances +/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to +/// be observable from the perspective of a few different possible forks of the relay-chain and +/// not pruned too eagerly. +/// +/// Note that the fragments themselves don't need to change and the only thing we care about +/// is whether the predictions they represent are still valid. +/// +/// ### Extending Fragment Trees +/// +/// As predictions fade into the past, new ones should be stacked on top. +/// +/// Every new relay-chain block is an opportunity to make a new prediction about the future. +/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether +/// to create a new fragment-tree. +/// +/// ### Code Upgrades +/// +/// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade +/// scheduling logic is very path-dependent and intricate so we just assume that code upgrades +/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +/// in practice and code upgrades are fairly rare. So what's likely to happen around code +/// upgrades is that the entire fragment-tree has to get discarded at some point. +/// +/// That means a few blocks of execution time lost, which is not a big deal for code upgrades +/// in practice at most once every few weeks. +use polkadot_primitives::{ + async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, + CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, +}; +use std::{ + borrow::{Borrow, Cow}, + collections::HashMap, +}; + +/// Constraints on inbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec<BlockNumber>, +} + +/// Constraints on outbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: usize, + /// The maximum messages that can be written to the channel. + pub messages_remaining: usize, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(Debug, Clone, PartialEq)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: usize, + /// The amount of UMP messages remaining. + pub ump_remaining: usize, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: usize, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: usize, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec<BlockNumber>, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: HashMap<ParaId, OutboundHrmpChannelLimitations>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: usize, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option<UpgradeRestriction>, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} + +impl From<PrimitiveConstraints> for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, + dmp_remaining_messages: c.dmp_remaining_messages, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + +/// Kinds of errors that can occur when modifying constraints. +#[derive(Debug, Clone, PartialEq)] +pub enum ModificationError { + /// The HRMP watermark is not allowed. + DisallowedHrmpWatermark(BlockNumber), + /// No such HRMP outbound channel. + NoSuchHrmpChannel(ParaId), + /// Too many messages submitted to HRMP channel. + HrmpMessagesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining messages in the capacity of the channel. + messages_remaining: usize, + /// The amount of messages submitted to the channel. + messages_submitted: usize, + }, + /// Too many bytes submitted to HRMP channel. + HrmpBytesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining bytes in the capacity of the channel. + bytes_remaining: usize, + /// The amount of bytes submitted to the channel. + bytes_submitted: usize, + }, + /// Too many messages submitted to UMP. + UmpMessagesOverflow { + /// The amount of remaining messages in the capacity of UMP. + messages_remaining: usize, + /// The amount of messages submitted to UMP. + messages_submitted: usize, + }, + /// Too many bytes submitted to UMP. + UmpBytesOverflow { + /// The amount of remaining bytes in the capacity of UMP. + bytes_remaining: usize, + /// The amount of bytes submitted to UMP. + bytes_submitted: usize, + }, + /// Too many messages processed from DMP. + DmpMessagesUnderflow { + /// The amount of messages waiting to be processed from DMP. + messages_remaining: usize, + /// The amount of messages processed. + messages_processed: usize, + }, + /// No validation code upgrade to apply. + AppliedNonexistentCodeUpgrade, +} + +impl Constraints { + /// Check modifications against constraints. + pub fn check_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<(), ModificationError> { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. + if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = self.hrmp_channels_out.get(&id) { + outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( + ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + }, + )?; + + outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: self.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( + ModificationError::UmpBytesOverflow { + bytes_remaining: self.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + }, + )?; + + self.dmp_remaining_messages + .len() + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: self.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + })?; + + if self.future_validation_code.is_none() && modifications.code_upgrade_applied { + return Err(ModificationError::AppliedNonexistentCodeUpgrade) + } + + Ok(()) + } + + /// Apply modifications to these constraints. If this succeeds, it passes + /// all sanity-checks. + pub fn apply_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<Self, ModificationError> { + let mut new = self.clone(); + + if let Some(required_parent) = modifications.required_parent.as_ref() { + new.required_parent = required_parent.clone(); + } + + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + }, + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + }, + }, + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { + outbound.bytes_remaining = outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound.messages_remaining = outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: new.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + new.ump_remaining_bytes = new + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: new.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { + return Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages.len(), + messages_processed: modifications.dmp_messages_processed, + }) + } else { + new.dmp_remaining_messages = + new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); + } + + if modifications.code_upgrade_applied { + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + } + + Ok(new) + } +} + +/// Information about a relay-chain block. +#[derive(Debug, Clone, PartialEq)] +pub struct RelayChainBlockInfo { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +/// An update to outbound HRMP channels. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutboundHrmpChannelModification { + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, +} + +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + +/// Modifications to constraints as a result of prospective candidates. +#[derive(Debug, Clone, PartialEq)] +pub struct ConstraintModifications { + /// The required parent head to build upon. + pub required_parent: Option<HeadData>, + /// The new HRMP watermark + pub hrmp_watermark: Option<HrmpWatermarkUpdate>, + /// Outbound HRMP channel modifications. + pub outbound_hrmp: HashMap<ParaId, OutboundHrmpChannelModification>, + /// The amount of UMP messages sent. + pub ump_messages_sent: usize, + /// The amount of UMP bytes sent. + pub ump_bytes_sent: usize, + /// The amount of DMP messages processed. + pub dmp_messages_processed: usize, + /// Whether a pending code upgrade has been applied. + pub code_upgrade_applied: bool, +} + +impl ConstraintModifications { + /// The 'identity' modifications: these can be applied to + /// any constraints and yield the exact same result. + pub fn identity() -> Self { + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: HashMap::new(), + ump_messages_sent: 0, + ump_bytes_sent: 0, + dmp_messages_processed: 0, + code_upgrade_applied: false, + } + } + + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + /// + /// This is an addition which is not commutative. + pub fn stack(&mut self, other: &Self) { + if let Some(ref new_parent) = other.required_parent { + self.required_parent = Some(new_parent.clone()); + } + if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { + self.hrmp_watermark = Some(new_hrmp_watermark.clone()); + } + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(*id).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + self.code_upgrade_applied |= other.code_upgrade_applied; + } +} + +/// The prospective candidate. +/// +/// This comprises the key information that represent a candidate +/// without pinning it to a particular session. For example, everything +/// to do with the collator's signature and commitments are represented +/// here. But the erasure-root is not. This means that prospective candidates +/// are not correlated to any session in particular. +#[derive(Debug, Clone, PartialEq)] +pub struct ProspectiveCandidate<'a> { + /// The commitments to the output of the execution. + pub commitments: Cow<'a, CandidateCommitments>, + /// The collator that created the candidate. + pub collator: CollatorId, + /// The signature of the collator on the payload. + pub collator_signature: CollatorSignature, + /// The persisted validation data used to create the candidate. + pub persisted_validation_data: PersistedValidationData, + /// The hash of the PoV. + pub pov_hash: Hash, + /// The validation code hash used by the candidate. + pub validation_code_hash: ValidationCodeHash, +} + +impl<'a> ProspectiveCandidate<'a> { + fn into_owned(self) -> ProspectiveCandidate<'static> { + ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } + } + + /// Partially clone the prospective candidate, but borrow the + /// parts which are potentially heavy. + pub fn partial_clone(&self) -> ProspectiveCandidate { + ProspectiveCandidate { + commitments: Cow::Borrowed(self.commitments.borrow()), + collator: self.collator.clone(), + collator_signature: self.collator_signature.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + pov_hash: self.pov_hash, + validation_code_hash: self.validation_code_hash, + } + } +} + +#[cfg(test)] +impl ProspectiveCandidate<'static> { + fn commitments_mut(&mut self) -> &mut CandidateCommitments { + self.commitments.to_mut() + } +} + +/// Kinds of errors with the validity of a fragment. +#[derive(Debug, Clone, PartialEq)] +pub enum FragmentValidityError { + /// The validation code of the candidate doesn't match the + /// operating constraints. + /// + /// Expected, Got + ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), + /// The persisted-validation-data doesn't match. + /// + /// Expected, Got + PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), + /// The outputs of the candidate are invalid under the operating + /// constraints. + OutputsInvalid(ModificationError), + /// New validation code size too big. + /// + /// Max allowed, new. + CodeSizeTooLarge(usize, usize), + /// Relay parent too old. + /// + /// Min allowed, current. + RelayParentTooOld(BlockNumber, BlockNumber), + /// Para is required to process at least one DMP message from the queue. + DmpAdvancementRule, + /// Too many messages upward messages submitted. + UmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Too many messages submitted to all HRMP channels. + HrmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Code upgrade not allowed. + CodeUpgradeRestricted, + /// HRMP messages are not ascending or are duplicate. + /// + /// The `usize` is the index into the outbound HRMP messages of + /// the candidate. + HrmpMessagesDescendingOrDuplicate(usize), +} + +/// A parachain fragment, representing another prospective parachain block. +/// +/// This is a type which guarantees that the candidate is valid under the +/// operating constraints. +#[derive(Debug, Clone, PartialEq)] +pub struct Fragment<'a> { + /// The new relay-parent. + relay_parent: RelayChainBlockInfo, + /// The constraints this fragment is operating under. + operating_constraints: Constraints, + /// The core information about the prospective candidate. + candidate: ProspectiveCandidate<'a>, + /// Modifications to the constraints based on the outputs of + /// the candidate. + modifications: ConstraintModifications, +} + +impl<'a> Fragment<'a> { + /// Create a new fragment. + /// + /// This fails if the fragment isn't in line with the operating + /// constraints. That is, either its inputs or its outputs fail + /// checks against the constraints. + /// + /// This doesn't check that the collator signature is valid or + /// whether the PoV is small enough. + pub fn new( + relay_parent: RelayChainBlockInfo, + operating_constraints: Constraints, + candidate: ProspectiveCandidate<'a>, + ) -> Result<Self, FragmentValidityError> { + let modifications = { + let commitments = &candidate.commitments; + ConstraintModifications { + required_parent: Some(commitments.head_data.clone()), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + + let mut last_recipient = None::<ParaId>; + for (i, message) in commitments.horizontal_messages.iter().enumerate() { + if let Some(last) = last_recipient { + if last >= message.recipient { + return Err( + FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), + ) + } + } + + last_recipient = Some(message.recipient); + let record = outbound_hrmp.entry(message.recipient).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: operating_constraints + .future_validation_code + .map_or(false, |(at, _)| relay_parent.number >= at), + } + }; + + validate_against_constraints( + &operating_constraints, + &relay_parent, + &candidate, + &modifications, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Access the relay parent information. + pub fn relay_parent(&self) -> &RelayChainBlockInfo { + &self.relay_parent + } + + /// Access the operating constraints + pub fn operating_constraints(&self) -> &Constraints { + &self.operating_constraints + } + + /// Access the underlying prospective candidate. + pub fn candidate(&self) -> &ProspectiveCandidate<'a> { + &self.candidate + } + + /// Modifications to constraints based on the outputs of the candidate. + pub fn constraint_modifications(&self) -> &ConstraintModifications { + &self.modifications + } + + /// Convert the fragment into an owned variant. + pub fn into_owned(self) -> Fragment<'static> { + Fragment { candidate: self.candidate.into_owned(), ..self } + } + + /// Validate this fragment against some set of constraints + /// instead of the operating constraints. + pub fn validate_against_constraints( + &self, + constraints: &Constraints, + ) -> Result<(), FragmentValidityError> { + validate_against_constraints( + constraints, + &self.relay_parent, + &self.candidate, + &self.modifications, + ) + } +} + +fn validate_against_constraints( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + candidate: &ProspectiveCandidate, + modifications: &ConstraintModifications, +) -> Result<(), FragmentValidityError> { + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data.clone(), + )) + } + + if constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + if relay_parent.number < constraints.min_relay_parent_number { + return Err(FragmentValidityError::RelayParentTooOld( + constraints.min_relay_parent_number, + relay_parent.number, + )) + } + + if candidate.commitments.new_validation_code.is_some() { + match constraints.upgrade_restriction { + None => {}, + Some(UpgradeRestriction::Present) => + return Err(FragmentValidityError::CodeUpgradeRestricted), + } + } + + let announced_code_size = candidate + .commitments + .new_validation_code + .as_ref() + .map_or(0, |code| code.0.len()); + + if announced_code_size > constraints.max_code_size { + return Err(FragmentValidityError::CodeSizeTooLarge( + constraints.max_code_size, + announced_code_size, + )) + } + + if modifications.dmp_messages_processed == 0 { + if constraints + .dmp_remaining_messages + .get(0) + .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) + { + return Err(FragmentValidityError::DmpAdvancementRule) + } + } + + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_hrmp_num_per_candidate, + messages_submitted: candidate.commitments.horizontal_messages.len(), + }) + } + + if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_ump_num_per_candidate, + messages_submitted: candidate.commitments.upward_messages.len(), + }) + } + + constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid) +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::{ + CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, + }; + use sp_application_crypto::Pair; + + #[test] + fn stack_modifications() { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + let a = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let b = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let mut c = a.clone(); + c.stack(&b); + + assert_eq!( + c, + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map + }, + ump_messages_sent: 12, + ump_bytes_sent: 2000, + dmp_messages_processed: 10, + code_upgrade_applied: true, + }, + ); + + let mut d = ConstraintModifications::identity(); + d.stack(&a); + d.stack(&b); + + assert_eq!(c, d); + } + + fn make_constraints() -> Constraints { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + Constraints { + min_relay_parent_number: 5, + max_pov_size: 1000, + max_code_size: 1000, + ump_remaining: 10, + ump_remaining_bytes: 1024, + max_ump_num_per_candidate: 5, + dmp_remaining_messages: Vec::new(), + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, + hrmp_channels_out: { + let mut map = HashMap::new(); + + map.insert( + para_a, + OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, + ); + + map + }, + max_hrmp_num_per_candidate: 5, + required_parent: HeadData::from(vec![1, 2, 3]), + validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + #[test] + fn constraints_disallowed_trunk_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + } + + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + + #[test] + fn constraints_no_such_hrmp_channel() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let bad_para = ParaId::from(100u32); + modifications.outbound_hrmp.insert( + bad_para, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + } + + #[test] + fn constraints_hrmp_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + } + + #[test] + fn constraints_hrmp_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + } + + #[test] + fn constraints_ump_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_messages_sent = 11; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + } + + #[test] + fn constraints_ump_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_bytes_sent = 1025; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + } + + #[test] + fn constraints_dmp_messages() { + let mut constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + assert!(constraints.check_modifications(&modifications).is_ok()); + assert!(constraints.apply_modifications(&modifications).is_ok()); + + modifications.dmp_messages_processed = 6; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 0, + messages_processed: 6, + }), + ); + + constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; + modifications.dmp_messages_processed = 2; + assert!(constraints.check_modifications(&modifications).is_ok()); + let constraints = constraints + .apply_modifications(&modifications) + .expect("modifications are valid"); + + assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); + } + + #[test] + fn constraints_nonexistent_code_upgrade() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.code_upgrade_applied = true; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + } + + fn make_candidate( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + ) -> ProspectiveCandidate<'static> { + let collator_pair = CollatorPair::generate().0; + let collator = collator_pair.public(); + + let sig = collator_pair.sign(b"blabla".as_slice()); + + ProspectiveCandidate { + commitments: Cow::Owned(CandidateCommitments { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: HeadData::from(vec![1, 2, 3, 4, 5]), + processed_downward_messages: 0, + hrmp_watermark: relay_parent.number, + }), + collator, + collator_signature: sig, + persisted_validation_data: PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }, + pov_hash: Hash::repeat_byte(1), + validation_code_hash: constraints.validation_code_hash, + } + } + + #[test] + fn fragment_validation_code_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let expected_code = constraints.validation_code_hash; + let got_code = ValidationCode(vec![9, 9, 9]).hash(); + + candidate.validation_code_hash = got_code; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), + ) + } + + #[test] + fn fragment_pvd_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let relay_parent_b = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0b), + storage_root: Hash::repeat_byte(0xee), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent_b.number, + relay_parent_storage_root: relay_parent_b.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + let got_pvd = candidate.persisted_validation_data.clone(); + + assert_eq!( + Fragment::new(relay_parent_b, constraints, candidate), + Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), + ); + } + + #[test] + fn fragment_code_size_too_large() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_code_size = constraints.max_code_size; + candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), + ); + } + + #[test] + fn fragment_relay_parent_too_old() { + let relay_parent = RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::RelayParentTooOld(5, 3,)), + ); + } + + #[test] + fn fragment_hrmp_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_hrmp = constraints.max_hrmp_num_per_candidate; + + candidate + .commitments_mut() + .horizontal_messages + .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + })) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: max_hrmp, + messages_submitted: max_hrmp + 1, + }), + ); + } + + #[test] + fn fragment_dmp_advancement_rule() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + // Empty dmp queue is ok. + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + // Unprocessed message that was sent later is ok. + constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + + for block_number in 0..=relay_parent.number { + constraints.dmp_remaining_messages = vec![block_number]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::DmpAdvancementRule), + ); + } + + candidate.commitments.to_mut().processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + } + + #[test] + fn fragment_ump_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_ump = constraints.max_ump_num_per_candidate; + + candidate + .commitments + .to_mut() + .upward_messages + .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) + .unwrap(); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: max_ump, + messages_submitted: max_ump + 1, + }), + ); + } + + #[test] + fn fragment_code_upgrade_restricted() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + constraints.upgrade_restriction = Some(UpgradeRestriction::Present); + candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeUpgradeRestricted), + ); + } + + #[test] + fn fragment_hrmp_messages_descending_or_duplicate() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + + candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + } +} diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs deleted file mode 100644 index eb06322975282cd7ee7db14eec80b2d1556f57fd..0000000000000000000000000000000000000000 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/staging.rs +++ /dev/null @@ -1,1450 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! The implementation of the inclusion emulator for the 'staging' runtime version. -//! -//! # Overview -//! -//! A set of utilities for node-side code to emulate the logic the runtime uses for checking -//! parachain blocks in order to build prospective parachains that are produced ahead of the -//! relay chain. These utilities allow the node-side to predict, with high accuracy, what -//! the relay-chain will accept in the near future. -//! -//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] -//! exhaustively define the set of valid inputs and outputs to parachain execution. A [`Fragment`] -//! indicates a parachain block, anchored to the relay-chain at a particular relay-chain block, -//! known as the relay-parent. -//! -//! ## Fragment Validity -//! -//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe -//! the properties that must be true for a block to be included in a direct child of that block, -//! assuming there is no intermediate parachain block pending availability. -//! -//! However, the key factor that makes asynchronously-grown prospective chains -//! possible is the fact that the relay-chain accepts candidate blocks based on whether they -//! are valid under the constraints of the present moment, not based on whether they were -//! valid at the time of construction. -//! -//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are -//! invalid at first and become valid later on, as the relay chain grows. -//! -//! # Usage -//! -//! It's expected that the users of this module will be building up trees of -//! [`Fragment`]s and consistently pruning and adding to the tree. -//! -//! ## Operating Constraints -//! -//! The *operating constraints* of a `Fragment` are the constraints with which that fragment -//! was intended to comply. The operating constraints are defined as the base constraints -//! of the relay-parent of the fragment modified by the cumulative modifications of all -//! fragments between the relay-parent and the current fragment. -//! -//! What the operating constraints are, in practice, is a prediction about the state of the -//! relay-chain in the future. The relay-chain is aware of some current state, and we want to -//! make an intelligent prediction about what might be accepted in the future based on -//! prior fragments that also exist off-chain. -//! -//! ## Fragment Trees -//! -//! As the relay-chain grows, some predictions come true and others come false. -//! And new predictions get made. These three changes correspond distinctly to the -//! 3 primary operations on fragment trees. -//! -//! A fragment tree is a mental model for thinking about a forking series of predictions -//! about a single parachain. There may be one or more fragment trees per parachain. -//! -//! In expectation, most parachains will have a plausibly-unique authorship method which means that -//! they should really be much closer to fragment-chains, maybe with an occasional fork. -//! -//! Avoiding fragment-tree blowup is beyond the scope of this module. -//! -//! ### Pruning Fragment Trees -//! -//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to -//! the roots of the fragment trees we have. There are 3 cases: -//! -//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This -//! is the "prediction still uncertain" case. -//! -//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the -//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under -//! its descendents and compare to the new constraints again. This is the "prediction came true" -//! case. -//! -//! 3. The root fragment is invalid under the new constraints because a competing parachain block -//! has been included or it would never be accepted for some other reason. In this case we can -//! discard the entire fragment tree. This is the "prediction came false" case. -//! -//! This is all a bit of a simplification because it assumes that the relay-chain advances without -//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable -//! from the perspective of a few different possible forks of the relay-chain and not pruned -//! too eagerly. -//! -//! Note that the fragments themselves don't need to change and the only thing we care about -//! is whether the predictions they represent are still valid. -//! -//! ### Extending Fragment Trees -//! -//! As predictions fade into the past, new ones should be stacked on top. -//! -//! Every new relay-chain block is an opportunity to make a new prediction about the future. -//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether -//! to create a new fragment-tree. -//! -//! ### Code Upgrades -//! -//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling -//! logic is very path-dependent and intricate so we just assume that code upgrades -//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -//! in practice and code upgrades are fairly rare. So what's likely to happen around code -//! upgrades is that the entire fragment-tree has to get discarded at some point. -//! -//! That means a few blocks of execution time lost, which is not a big deal for code upgrades -//! in practice at most once every few weeks. - -use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, - Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, - UpgradeRestriction, ValidationCodeHash, -}; -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, -}; - -/// Constraints on inbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending - pub valid_watermarks: Vec<BlockNumber>, -} - -/// Constraints on outbound HRMP channels. -#[derive(Debug, Clone, PartialEq)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: usize, - /// The maximum messages that can be written to the channel. - pub messages_remaining: usize, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(Debug, Clone, PartialEq)] -pub struct Constraints { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: BlockNumber, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: usize, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: usize, - /// The amount of UMP messages remaining. - pub ump_remaining: usize, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: usize, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: usize, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec<BlockNumber>, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: HashMap<ParaId, OutboundHrmpChannelLimitations>, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: usize, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option<UpgradeRestriction>, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, -} - -impl From<PrimitiveConstraints> for Constraints { - fn from(c: PrimitiveConstraints) -> Self { - Constraints { - min_relay_parent_number: c.min_relay_parent_number, - max_pov_size: c.max_pov_size as _, - max_code_size: c.max_code_size as _, - ump_remaining: c.ump_remaining as _, - ump_remaining_bytes: c.ump_remaining_bytes as _, - max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, - dmp_remaining_messages: c.dmp_remaining_messages, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: c.hrmp_inbound.valid_watermarks, - }, - hrmp_channels_out: c - .hrmp_channels_out - .into_iter() - .map(|(para_id, limits)| { - ( - para_id, - OutboundHrmpChannelLimitations { - bytes_remaining: limits.bytes_remaining as _, - messages_remaining: limits.messages_remaining as _, - }, - ) - }) - .collect(), - max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, - required_parent: c.required_parent, - validation_code_hash: c.validation_code_hash, - upgrade_restriction: c.upgrade_restriction, - future_validation_code: c.future_validation_code, - } - } -} - -/// Kinds of errors that can occur when modifying constraints. -#[derive(Debug, Clone, PartialEq)] -pub enum ModificationError { - /// The HRMP watermark is not allowed. - DisallowedHrmpWatermark(BlockNumber), - /// No such HRMP outbound channel. - NoSuchHrmpChannel(ParaId), - /// Too many messages submitted to HRMP channel. - HrmpMessagesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining messages in the capacity of the channel. - messages_remaining: usize, - /// The amount of messages submitted to the channel. - messages_submitted: usize, - }, - /// Too many bytes submitted to HRMP channel. - HrmpBytesOverflow { - /// The ID of the recipient. - para_id: ParaId, - /// The amount of remaining bytes in the capacity of the channel. - bytes_remaining: usize, - /// The amount of bytes submitted to the channel. - bytes_submitted: usize, - }, - /// Too many messages submitted to UMP. - UmpMessagesOverflow { - /// The amount of remaining messages in the capacity of UMP. - messages_remaining: usize, - /// The amount of messages submitted to UMP. - messages_submitted: usize, - }, - /// Too many bytes submitted to UMP. - UmpBytesOverflow { - /// The amount of remaining bytes in the capacity of UMP. - bytes_remaining: usize, - /// The amount of bytes submitted to UMP. - bytes_submitted: usize, - }, - /// Too many messages processed from DMP. - DmpMessagesUnderflow { - /// The amount of messages waiting to be processed from DMP. - messages_remaining: usize, - /// The amount of messages processed. - messages_processed: usize, - }, - /// No validation code upgrade to apply. - AppliedNonexistentCodeUpgrade, -} - -impl Constraints { - /// Check modifications against constraints. - pub fn check_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result<(), ModificationError> { - if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { - // head updates are always valid. - if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { - return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = self.hrmp_channels_out.get(&id) { - outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( - ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - }, - )?; - - outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: self.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( - ModificationError::UmpBytesOverflow { - bytes_remaining: self.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - }, - )?; - - self.dmp_remaining_messages - .len() - .checked_sub(modifications.dmp_messages_processed) - .ok_or(ModificationError::DmpMessagesUnderflow { - messages_remaining: self.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - })?; - - if self.future_validation_code.is_none() && modifications.code_upgrade_applied { - return Err(ModificationError::AppliedNonexistentCodeUpgrade) - } - - Ok(()) - } - - /// Apply modifications to these constraints. If this succeeds, it passes - /// all sanity-checks. - pub fn apply_modifications( - &self, - modifications: &ConstraintModifications, - ) -> Result<Self, ModificationError> { - let mut new = self.clone(); - - if let Some(required_parent) = modifications.required_parent.as_ref() { - new.required_parent = required_parent.clone(); - } - - if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { - Ok(pos) => { - // Exact match, so this is OK in all cases. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); - }, - Err(pos) => match hrmp_watermark { - HrmpWatermarkUpdate::Head(_) => { - // Updates to Head are always OK. - let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); - }, - HrmpWatermarkUpdate::Trunk(n) => { - // Trunk update landing on disallowed watermark is not OK. - return Err(ModificationError::DisallowedHrmpWatermark(*n)) - }, - }, - } - } - - for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { - if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { - outbound.bytes_remaining = outbound - .bytes_remaining - .checked_sub(outbound_hrmp_mod.bytes_submitted) - .ok_or(ModificationError::HrmpBytesOverflow { - para_id: *id, - bytes_remaining: outbound.bytes_remaining, - bytes_submitted: outbound_hrmp_mod.bytes_submitted, - })?; - - outbound.messages_remaining = outbound - .messages_remaining - .checked_sub(outbound_hrmp_mod.messages_submitted) - .ok_or(ModificationError::HrmpMessagesOverflow { - para_id: *id, - messages_remaining: outbound.messages_remaining, - messages_submitted: outbound_hrmp_mod.messages_submitted, - })?; - } else { - return Err(ModificationError::NoSuchHrmpChannel(*id)) - } - } - - new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( - ModificationError::UmpMessagesOverflow { - messages_remaining: new.ump_remaining, - messages_submitted: modifications.ump_messages_sent, - }, - )?; - - new.ump_remaining_bytes = new - .ump_remaining_bytes - .checked_sub(modifications.ump_bytes_sent) - .ok_or(ModificationError::UmpBytesOverflow { - bytes_remaining: new.ump_remaining_bytes, - bytes_submitted: modifications.ump_bytes_sent, - })?; - - if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { - return Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: new.dmp_remaining_messages.len(), - messages_processed: modifications.dmp_messages_processed, - }) - } else { - new.dmp_remaining_messages = - new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); - } - - if modifications.code_upgrade_applied { - new.validation_code_hash = new - .future_validation_code - .take() - .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? - .1; - } - - Ok(new) - } -} - -/// Information about a relay-chain block. -#[derive(Debug, Clone, PartialEq)] -pub struct RelayChainBlockInfo { - /// The hash of the relay-chain block. - pub hash: Hash, - /// The number of the relay-chain block. - pub number: BlockNumber, - /// The storage-root of the relay-chain block. - pub storage_root: Hash, -} - -/// An update to outbound HRMP channels. -#[derive(Debug, Clone, PartialEq, Default)] -pub struct OutboundHrmpChannelModification { - /// The number of bytes submitted to the channel. - pub bytes_submitted: usize, - /// The number of messages submitted to the channel. - pub messages_submitted: usize, -} - -/// An update to the HRMP Watermark. -#[derive(Debug, Clone, PartialEq)] -pub enum HrmpWatermarkUpdate { - /// This is an update placing the watermark at the head of the chain, - /// which is always legal. - Head(BlockNumber), - /// This is an update placing the watermark behind the head of the - /// chain, which is only legal if it lands on a block where messages - /// were queued. - Trunk(BlockNumber), -} - -impl HrmpWatermarkUpdate { - fn watermark(&self) -> BlockNumber { - match *self { - HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, - } - } -} - -/// Modifications to constraints as a result of prospective candidates. -#[derive(Debug, Clone, PartialEq)] -pub struct ConstraintModifications { - /// The required parent head to build upon. - pub required_parent: Option<HeadData>, - /// The new HRMP watermark - pub hrmp_watermark: Option<HrmpWatermarkUpdate>, - /// Outbound HRMP channel modifications. - pub outbound_hrmp: HashMap<ParaId, OutboundHrmpChannelModification>, - /// The amount of UMP messages sent. - pub ump_messages_sent: usize, - /// The amount of UMP bytes sent. - pub ump_bytes_sent: usize, - /// The amount of DMP messages processed. - pub dmp_messages_processed: usize, - /// Whether a pending code upgrade has been applied. - pub code_upgrade_applied: bool, -} - -impl ConstraintModifications { - /// The 'identity' modifications: these can be applied to - /// any constraints and yield the exact same result. - pub fn identity() -> Self { - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: HashMap::new(), - ump_messages_sent: 0, - ump_bytes_sent: 0, - dmp_messages_processed: 0, - code_upgrade_applied: false, - } - } - - /// Stack other modifications on top of these. - /// - /// This does no sanity-checking, so if `other` is garbage relative - /// to `self`, then the new value will be garbage as well. - /// - /// This is an addition which is not commutative. - pub fn stack(&mut self, other: &Self) { - if let Some(ref new_parent) = other.required_parent { - self.required_parent = Some(new_parent.clone()); - } - if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { - self.hrmp_watermark = Some(new_hrmp_watermark.clone()); - } - - for (id, mods) in &other.outbound_hrmp { - let record = self.outbound_hrmp.entry(*id).or_default(); - record.messages_submitted += mods.messages_submitted; - record.bytes_submitted += mods.bytes_submitted; - } - - self.ump_messages_sent += other.ump_messages_sent; - self.ump_bytes_sent += other.ump_bytes_sent; - self.dmp_messages_processed += other.dmp_messages_processed; - self.code_upgrade_applied |= other.code_upgrade_applied; - } -} - -/// The prospective candidate. -/// -/// This comprises the key information that represent a candidate -/// without pinning it to a particular session. For example, everything -/// to do with the collator's signature and commitments are represented -/// here. But the erasure-root is not. This means that prospective candidates -/// are not correlated to any session in particular. -#[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate<'a> { - /// The commitments to the output of the execution. - pub commitments: Cow<'a, CandidateCommitments>, - /// The collator that created the candidate. - pub collator: CollatorId, - /// The signature of the collator on the payload. - pub collator_signature: CollatorSignature, - /// The persisted validation data used to create the candidate. - pub persisted_validation_data: PersistedValidationData, - /// The hash of the PoV. - pub pov_hash: Hash, - /// The validation code hash used by the candidate. - pub validation_code_hash: ValidationCodeHash, -} - -impl<'a> ProspectiveCandidate<'a> { - fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } - } - - /// Partially clone the prospective candidate, but borrow the - /// parts which are potentially heavy. - pub fn partial_clone(&self) -> ProspectiveCandidate { - ProspectiveCandidate { - commitments: Cow::Borrowed(self.commitments.borrow()), - collator: self.collator.clone(), - collator_signature: self.collator_signature.clone(), - persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash, - validation_code_hash: self.validation_code_hash, - } - } -} - -#[cfg(test)] -impl ProspectiveCandidate<'static> { - fn commitments_mut(&mut self) -> &mut CandidateCommitments { - self.commitments.to_mut() - } -} - -/// Kinds of errors with the validity of a fragment. -#[derive(Debug, Clone, PartialEq)] -pub enum FragmentValidityError { - /// The validation code of the candidate doesn't match the - /// operating constraints. - /// - /// Expected, Got - ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), - /// The persisted-validation-data doesn't match. - /// - /// Expected, Got - PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), - /// The outputs of the candidate are invalid under the operating - /// constraints. - OutputsInvalid(ModificationError), - /// New validation code size too big. - /// - /// Max allowed, new. - CodeSizeTooLarge(usize, usize), - /// Relay parent too old. - /// - /// Min allowed, current. - RelayParentTooOld(BlockNumber, BlockNumber), - /// Para is required to process at least one DMP message from the queue. - DmpAdvancementRule, - /// Too many messages upward messages submitted. - UmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Too many messages submitted to all HRMP channels. - HrmpMessagesPerCandidateOverflow { - /// The amount of messages a single candidate can submit. - messages_allowed: usize, - /// The amount of messages sent to all HRMP channels. - messages_submitted: usize, - }, - /// Code upgrade not allowed. - CodeUpgradeRestricted, - /// HRMP messages are not ascending or are duplicate. - /// - /// The `usize` is the index into the outbound HRMP messages of - /// the candidate. - HrmpMessagesDescendingOrDuplicate(usize), -} - -/// A parachain fragment, representing another prospective parachain block. -/// -/// This is a type which guarantees that the candidate is valid under the -/// operating constraints. -#[derive(Debug, Clone, PartialEq)] -pub struct Fragment<'a> { - /// The new relay-parent. - relay_parent: RelayChainBlockInfo, - /// The constraints this fragment is operating under. - operating_constraints: Constraints, - /// The core information about the prospective candidate. - candidate: ProspectiveCandidate<'a>, - /// Modifications to the constraints based on the outputs of - /// the candidate. - modifications: ConstraintModifications, -} - -impl<'a> Fragment<'a> { - /// Create a new fragment. - /// - /// This fails if the fragment isn't in line with the operating - /// constraints. That is, either its inputs or its outputs fail - /// checks against the constraints. - /// - /// This doesn't check that the collator signature is valid or - /// whether the PoV is small enough. - pub fn new( - relay_parent: RelayChainBlockInfo, - operating_constraints: Constraints, - candidate: ProspectiveCandidate<'a>, - ) -> Result<Self, FragmentValidityError> { - let modifications = { - let commitments = &candidate.commitments; - ConstraintModifications { - required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some({ - if commitments.hrmp_watermark == relay_parent.number { - HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) - } else { - HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) - } - }), - outbound_hrmp: { - let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); - - let mut last_recipient = None::<ParaId>; - for (i, message) in commitments.horizontal_messages.iter().enumerate() { - if let Some(last) = last_recipient { - if last >= message.recipient { - return Err( - FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), - ) - } - } - - last_recipient = Some(message.recipient); - let record = outbound_hrmp.entry(message.recipient).or_default(); - - record.bytes_submitted += message.data.len(); - record.messages_submitted += 1; - } - - outbound_hrmp - }, - ump_messages_sent: commitments.upward_messages.len(), - ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), - dmp_messages_processed: commitments.processed_downward_messages as _, - code_upgrade_applied: operating_constraints - .future_validation_code - .map_or(false, |(at, _)| relay_parent.number >= at), - } - }; - - validate_against_constraints( - &operating_constraints, - &relay_parent, - &candidate, - &modifications, - )?; - - Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) - } - - /// Access the relay parent information. - pub fn relay_parent(&self) -> &RelayChainBlockInfo { - &self.relay_parent - } - - /// Access the operating constraints - pub fn operating_constraints(&self) -> &Constraints { - &self.operating_constraints - } - - /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate<'a> { - &self.candidate - } - - /// Modifications to constraints based on the outputs of the candidate. - pub fn constraint_modifications(&self) -> &ConstraintModifications { - &self.modifications - } - - /// Convert the fragment into an owned variant. - pub fn into_owned(self) -> Fragment<'static> { - Fragment { candidate: self.candidate.into_owned(), ..self } - } - - /// Validate this fragment against some set of constraints - /// instead of the operating constraints. - pub fn validate_against_constraints( - &self, - constraints: &Constraints, - ) -> Result<(), FragmentValidityError> { - validate_against_constraints( - constraints, - &self.relay_parent, - &self.candidate, - &self.modifications, - ) - } -} - -fn validate_against_constraints( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - candidate: &ProspectiveCandidate, - modifications: &ConstraintModifications, -) -> Result<(), FragmentValidityError> { - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - if expected_pvd != candidate.persisted_validation_data { - return Err(FragmentValidityError::PersistedValidationDataMismatch( - expected_pvd, - candidate.persisted_validation_data.clone(), - )) - } - - if constraints.validation_code_hash != candidate.validation_code_hash { - return Err(FragmentValidityError::ValidationCodeMismatch( - constraints.validation_code_hash, - candidate.validation_code_hash, - )) - } - - if relay_parent.number < constraints.min_relay_parent_number { - return Err(FragmentValidityError::RelayParentTooOld( - constraints.min_relay_parent_number, - relay_parent.number, - )) - } - - if candidate.commitments.new_validation_code.is_some() { - match constraints.upgrade_restriction { - None => {}, - Some(UpgradeRestriction::Present) => - return Err(FragmentValidityError::CodeUpgradeRestricted), - } - } - - let announced_code_size = candidate - .commitments - .new_validation_code - .as_ref() - .map_or(0, |code| code.0.len()); - - if announced_code_size > constraints.max_code_size { - return Err(FragmentValidityError::CodeSizeTooLarge( - constraints.max_code_size, - announced_code_size, - )) - } - - if modifications.dmp_messages_processed == 0 { - if constraints - .dmp_remaining_messages - .get(0) - .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) - { - return Err(FragmentValidityError::DmpAdvancementRule) - } - } - - if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { - return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_hrmp_num_per_candidate, - messages_submitted: candidate.commitments.horizontal_messages.len(), - }) - } - - if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { - return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: constraints.max_ump_num_per_candidate, - messages_submitted: candidate.commitments.upward_messages.len(), - }) - } - - constraints - .check_modifications(&modifications) - .map_err(FragmentValidityError::OutputsInvalid) -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_primitives::vstaging::{ - CollatorPair, HorizontalMessages, OutboundHrmpMessage, ValidationCode, - }; - use sp_application_crypto::Pair; - - #[test] - fn stack_modifications() { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - let a = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let b = ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_b, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, - ); - - map - }, - ump_messages_sent: 6, - ump_bytes_sent: 1000, - dmp_messages_processed: 5, - code_upgrade_applied: true, - }; - - let mut c = a.clone(); - c.stack(&b); - - assert_eq!( - c, - ConstraintModifications { - required_parent: None, - hrmp_watermark: None, - outbound_hrmp: { - let mut map = HashMap::new(); - map.insert( - para_a, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map.insert( - para_b, - OutboundHrmpChannelModification { - bytes_submitted: 200, - messages_submitted: 10, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelModification { - bytes_submitted: 100, - messages_submitted: 5, - }, - ); - - map - }, - ump_messages_sent: 12, - ump_bytes_sent: 2000, - dmp_messages_processed: 10, - code_upgrade_applied: true, - }, - ); - - let mut d = ConstraintModifications::identity(); - d.stack(&a); - d.stack(&b); - - assert_eq!(c, d); - } - - fn make_constraints() -> Constraints { - let para_a = ParaId::from(1u32); - let para_b = ParaId::from(2u32); - let para_c = ParaId::from(3u32); - - Constraints { - min_relay_parent_number: 5, - max_pov_size: 1000, - max_code_size: 1000, - ump_remaining: 10, - ump_remaining_bytes: 1024, - max_ump_num_per_candidate: 5, - dmp_remaining_messages: Vec::new(), - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, - hrmp_channels_out: { - let mut map = HashMap::new(); - - map.insert( - para_a, - OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, - ); - - map.insert( - para_b, - OutboundHrmpChannelLimitations { - messages_remaining: 10, - bytes_remaining: 1024, - }, - ); - - map.insert( - para_c, - OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, - ); - - map - }, - max_hrmp_num_per_candidate: 5, - required_parent: HeadData::from(vec![1, 2, 3]), - validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), - upgrade_restriction: None, - future_validation_code: None, - } - } - - #[test] - fn constraints_disallowed_trunk_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DisallowedHrmpWatermark(7)), - ); - } - - #[test] - fn constraints_always_allow_head_watermark() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); - - assert!(constraints.check_modifications(&modifications).is_ok()); - - let new_constraints = constraints.apply_modifications(&modifications).unwrap(); - assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); - } - - #[test] - fn constraints_no_such_hrmp_channel() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let bad_para = ParaId::from(100u32); - modifications.outbound_hrmp.insert( - bad_para, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::NoSuchHrmpChannel(bad_para)), - ); - } - - #[test] - fn constraints_hrmp_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpMessagesOverflow { - para_id: para_a, - messages_remaining: 5, - messages_submitted: 6, - }), - ); - } - - #[test] - fn constraints_hrmp_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - let para_a = ParaId::from(1u32); - modifications.outbound_hrmp.insert( - para_a, - OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, - ); - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::HrmpBytesOverflow { - para_id: para_a, - bytes_remaining: 512, - bytes_submitted: 513, - }), - ); - } - - #[test] - fn constraints_ump_messages_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_messages_sent = 11; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpMessagesOverflow { - messages_remaining: 10, - messages_submitted: 11, - }), - ); - } - - #[test] - fn constraints_ump_bytes_overflow() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.ump_bytes_sent = 1025; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::UmpBytesOverflow { - bytes_remaining: 1024, - bytes_submitted: 1025, - }), - ); - } - - #[test] - fn constraints_dmp_messages() { - let mut constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - assert!(constraints.check_modifications(&modifications).is_ok()); - assert!(constraints.apply_modifications(&modifications).is_ok()); - - modifications.dmp_messages_processed = 6; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 0, - messages_processed: 6, - }), - ); - - constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; - modifications.dmp_messages_processed = 2; - assert!(constraints.check_modifications(&modifications).is_ok()); - let constraints = constraints - .apply_modifications(&modifications) - .expect("modifications are valid"); - - assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); - } - - #[test] - fn constraints_nonexistent_code_upgrade() { - let constraints = make_constraints(); - let mut modifications = ConstraintModifications::identity(); - modifications.code_upgrade_applied = true; - - assert_eq!( - constraints.check_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - - assert_eq!( - constraints.apply_modifications(&modifications), - Err(ModificationError::AppliedNonexistentCodeUpgrade), - ); - } - - fn make_candidate( - constraints: &Constraints, - relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate<'static> { - let collator_pair = CollatorPair::generate().0; - let collator = collator_pair.public(); - - let sig = collator_pair.sign(b"blabla".as_slice()); - - ProspectiveCandidate { - commitments: Cow::Owned(CandidateCommitments { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: HeadData::from(vec![1, 2, 3, 4, 5]), - processed_downward_messages: 0, - hrmp_watermark: relay_parent.number, - }), - collator, - collator_signature: sig, - persisted_validation_data: PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent.number, - relay_parent_storage_root: relay_parent.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }, - pov_hash: Hash::repeat_byte(1), - validation_code_hash: constraints.validation_code_hash, - } - } - - #[test] - fn fragment_validation_code_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let expected_code = constraints.validation_code_hash; - let got_code = ValidationCode(vec![9, 9, 9]).hash(); - - candidate.validation_code_hash = got_code; - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), - ) - } - - #[test] - fn fragment_pvd_mismatch() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let relay_parent_b = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0b), - storage_root: Hash::repeat_byte(0xee), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - let expected_pvd = PersistedValidationData { - parent_head: constraints.required_parent.clone(), - relay_parent_number: relay_parent_b.number, - relay_parent_storage_root: relay_parent_b.storage_root, - max_pov_size: constraints.max_pov_size as u32, - }; - - let got_pvd = candidate.persisted_validation_data.clone(); - - assert_eq!( - Fragment::new(relay_parent_b, constraints, candidate), - Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), - ); - } - - #[test] - fn fragment_code_size_too_large() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_code_size = constraints.max_code_size; - candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), - ); - } - - #[test] - fn fragment_relay_parent_too_old() { - let relay_parent = RelayChainBlockInfo { - number: 3, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let candidate = make_candidate(&constraints, &relay_parent); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::RelayParentTooOld(5, 3,)), - ); - } - - #[test] - fn fragment_hrmp_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_hrmp = constraints.max_hrmp_num_per_candidate; - - candidate - .commitments_mut() - .horizontal_messages - .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { - recipient: ParaId::from(i as u32), - data: vec![1, 2, 3], - })) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { - messages_allowed: max_hrmp, - messages_submitted: max_hrmp + 1, - }), - ); - } - - #[test] - fn fragment_dmp_advancement_rule() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - // Empty dmp queue is ok. - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - // Unprocessed message that was sent later is ok. - constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); - - for block_number in 0..=relay_parent.number { - constraints.dmp_remaining_messages = vec![block_number]; - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::DmpAdvancementRule), - ); - } - - candidate.commitments.to_mut().processed_downward_messages = 1; - assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); - } - - #[test] - fn fragment_ump_messages_overflow() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - let max_ump = constraints.max_ump_num_per_candidate; - - candidate - .commitments - .to_mut() - .upward_messages - .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) - .unwrap(); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { - messages_allowed: max_ump, - messages_submitted: max_ump + 1, - }), - ); - } - - #[test] - fn fragment_code_upgrade_restricted() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let mut constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::CodeUpgradeRestricted), - ); - } - - #[test] - fn fragment_hrmp_messages_descending_or_duplicate() { - let relay_parent = RelayChainBlockInfo { - number: 6, - hash: Hash::repeat_byte(0x0a), - storage_root: Hash::repeat_byte(0xff), - }; - - let constraints = make_constraints(); - let mut candidate = make_candidate(&constraints, &relay_parent); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ - OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, - OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, - ]); - - assert_eq!( - Fragment::new(relay_parent, constraints, candidate), - Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), - ); - } -} diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index daee4a8350e5af321f9a040c2c943879588037bf..57e4f9cde09af09374214a01b570be5e25cdfa4d 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -43,7 +43,7 @@ use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; use polkadot_primitives::{ - vstaging as vstaging_primitives, AuthorityDiscoveryId, CandidateEvent, CandidateHash, + AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, @@ -226,8 +226,7 @@ specialize_requests! { fn request_unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; UnappliedSlashes; fn request_key_ownership_proof(validator_id: ValidatorId) -> Option<slashing::OpaqueKeyOwnershipProof>; KeyOwnershipProof; fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost; - - fn request_staging_async_backing_params() -> vstaging_primitives::AsyncBackingParams; StagingAsyncBackingParams; + fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams; } /// Requests executor parameters from the runtime effective at given relay-parent. First obtains diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index c078b17d217517c3cdb7fcccbfd590adf128bb3f..8d7cef88a70e082779cd6e0892502f27e486c7c6 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,16 +30,16 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - vstaging, CandidateEvent, CandidateHash, CoreState, EncodeAs, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, + slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; use crate::{ - request_availability_cores, request_candidate_events, request_from_runtime, - request_key_ownership_proof, request_on_chain_votes, request_session_executor_params, - request_session_index_for_child, request_session_info, request_staging_async_backing_params, + request_async_backing_params, request_availability_cores, request_candidate_events, + request_from_runtime, request_key_ownership_proof, request_on_chain_votes, + request_session_executor_params, request_session_index_for_child, request_session_info, request_submit_report_dispute_lost, request_unapplied_slashes, request_validation_code_by_hash, request_validator_groups, }; @@ -377,7 +377,7 @@ where pub async fn get_unapplied_slashes<Sender>( sender: &mut Sender, relay_parent: Hash, -) -> Result<Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>> +) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>> where Sender: SubsystemSender<RuntimeApiMessage>, { @@ -392,7 +392,7 @@ pub async fn key_ownership_proof<Sender>( sender: &mut Sender, relay_parent: Hash, validator_id: ValidatorId, -) -> Result<Option<vstaging::slashing::OpaqueKeyOwnershipProof>> +) -> Result<Option<slashing::OpaqueKeyOwnershipProof>> where Sender: SubsystemSender<RuntimeApiMessage>, { @@ -403,8 +403,8 @@ where pub async fn submit_report_dispute_lost<Sender>( sender: &mut Sender, relay_parent: Hash, - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Result<Option<()>> where Sender: SubsystemSender<RuntimeApiMessage>, @@ -429,7 +429,7 @@ where pub enum ProspectiveParachainsMode { /// Runtime API without support of `async_backing_params`: no prospective parachains. Disabled, - /// vstaging runtime API: prospective parachains. + /// v6 runtime API: prospective parachains. Enabled { /// The maximum number of para blocks between the para head in a relay parent /// and a new candidate. Restricts nodes from building arbitrary long chains @@ -457,8 +457,7 @@ pub async fn prospective_parachains_mode<Sender>( where Sender: SubsystemSender<RuntimeApiMessage>, { - let result = - recv_runtime(request_staging_async_backing_params(relay_parent, sender).await).await; + let result = recv_runtime(request_async_backing_params(relay_parent, sender).await).await; if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) = &result @@ -472,7 +471,7 @@ where Ok(ProspectiveParachainsMode::Disabled) } else { - let vstaging::AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; + let AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?; Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth: max_candidate_depth as _, allowed_ancestry_len: allowed_ancestry_len as _, diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index fcbba9bbe21220362d8428f3d4e988b298003112..73b1fab529ef4996626bc485269eddd076f991eb 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -39,6 +39,3 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 9121b3790858339352d04c7a4f83eb2800929940..5adb6d25313475cb7ca389d80ba510edecdc58c4 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -19,8 +19,8 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -// `v5` is currently the latest stable version of the runtime API. -pub mod v5; +// `v6` is currently the latest stable version of the runtime API. +pub mod v6; // The 'staging' version is special - it contains primitives which are // still in development. Once they are considered stable, they will be @@ -33,20 +33,21 @@ pub mod runtime_api; // Current primitives not requiring versioning are exported here. // Primitives requiring versioning must not be exported and must be referred by an exact version. -pub use v5::{ - byzantine_threshold, check_candidate_backing, collator_signature_payload, +pub use v6::{ + async_backing, byzantine_threshold, check_candidate_backing, collator_signature_payload, effective_minimum_backing_votes, metric_definitions, slashing, supermajority_threshold, well_known_keys, AbridgedHostConfiguration, AbridgedHrmpChannel, AccountId, AccountIndex, - AccountPublic, ApprovalVote, AssignmentId, AuthorityDiscoveryId, AvailabilityBitfield, - BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, - CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, - CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, - DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, ExecutorParam, - ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, - Hash, HashT, HeadData, Header, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, - IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, - OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, + AccountPublic, ApprovalVote, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, + AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, + CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, + CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, + CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, + ExecutorParam, ExecutorParams, ExecutorParamsHash, ExplicitDisputeStatement, GroupIndex, + GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, + InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, + InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, + OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, @@ -61,4 +62,4 @@ pub use v5::{ }; #[cfg(feature = "std")] -pub use v5::{AssignmentPair, CollatorPair, ValidatorPair}; +pub use v6::{AssignmentPair, CollatorPair, ValidatorPair}; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index e5f1aa4276ef5ab993daf02cf778ecf652f4c712..6cb66d40204d124da3f51c966dbe7c65348ecb31 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,11 @@ //! separated from the stable primitives. use crate::{ - vstaging, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, + CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, + GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; @@ -224,38 +225,37 @@ sp_api::decl_runtime_apis! { /// Returns a list of validators that lost a past session dispute and need to be slashed. /// NOTE: This function is only available since parachain host version 5. - fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, vstaging::slashing::PendingSlashes)>; + fn unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; /// Returns a merkle proof of a validator session key. /// NOTE: This function is only available since parachain host version 5. fn key_ownership_proof( validator_id: ValidatorId, - ) -> Option<vstaging::slashing::OpaqueKeyOwnershipProof>; + ) -> Option<slashing::OpaqueKeyOwnershipProof>; /// Submit an unsigned extrinsic to slash validators who lost a dispute about /// a candidate of a past session. /// NOTE: This function is only available since parachain host version 5. fn submit_report_dispute_lost( - dispute_proof: vstaging::slashing::DisputeProof, - key_ownership_proof: vstaging::slashing::OpaqueKeyOwnershipProof, + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, ) -> Option<()>; - /***** Staging *****/ + /***** Added in v6 *****/ /// Get the minimum number of backing votes for a parachain candidate. /// This is a staging method! Do not use on production runtimes! #[api_version(6)] fn minimum_backing_votes() -> u32; - /***** Asynchronous backing *****/ + /***** Added in v7: Asynchronous backing *****/ /// Returns the state of parachain backing for a given para. - /// This is a staging method! Do not use on production runtimes! - #[api_version(99)] - fn staging_para_backing_state(_: ppp::Id) -> Option<vstaging::BackingState<H, N>>; + #[api_version(7)] + fn para_backing_state(_: ppp::Id) -> Option<async_backing::BackingState<H, N>>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - #[api_version(99)] - fn staging_async_backing_params() -> vstaging::AsyncBackingParams; + #[api_version(7)] + fn async_backing_params() -> AsyncBackingParams; } } diff --git a/polkadot/primitives/src/v6/async_backing.rs b/polkadot/primitives/src/v6/async_backing.rs new file mode 100644 index 0000000000000000000000000000000000000000..1abe87b6dec433122a6918f2f834a4a8e57817e8 --- /dev/null +++ b/polkadot/primitives/src/v6/async_backing.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Asynchronous backing primitives. + +use super::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Candidate's acceptance limitations for asynchronous backing per relay parent. +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] + +pub struct AsyncBackingParams { + /// The maximum number of para blocks between the para head in a relay parent + /// and a new candidate. Restricts nodes from building arbitrary long chains + /// and spamming other validators. + /// + /// When async backing is disabled, the only valid value is 0. + pub max_candidate_depth: u32, + /// How many ancestors of a relay parent are allowed to build candidates on top + /// of. + /// + /// When async backing is disabled, the only valid value is 0. + pub allowed_ancestry_len: u32, +} + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct InboundHrmpLimitations<N = BlockNumber> { + /// An exhaustive set of all valid watermarks, sorted ascending. + /// + /// It's only expected to contain block numbers at which messages were + /// previously sent to a para, excluding most recent head. + pub valid_watermarks: Vec<N>, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct Constraints<N = BlockNumber> { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: u32, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec<N>, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations<N>, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(Id, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option<UpgradeRestriction>, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(N, ValidationCodeHash)>, +} + +/// A candidate pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct CandidatePendingAvailability<H = Hash, N = BlockNumber> { + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The candidate's descriptor. + pub descriptor: CandidateDescriptor<H>, + /// The commitments of the candidate. + pub commitments: CandidateCommitments, + /// The candidate's relay parent's number. + pub relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, +} + +/// The per-parachain state of the backing system, including +/// state-machine constraints and candidates pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct BackingState<H = Hash, N = BlockNumber> { + /// The state-machine constraints of the parachain. + pub constraints: Constraints<N>, + /// The candidates pending availability. These should be ordered, i.e. they should form + /// a sub-chain, where the first candidate builds on top of the required parent of the + /// constraints and each subsequent builds on top of the previous head-data. + pub pending_availability: Vec<CandidatePendingAvailability<H, N>>, +} diff --git a/polkadot/primitives/src/v5/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs similarity index 100% rename from polkadot/primitives/src/v5/executor_params.rs rename to polkadot/primitives/src/v6/executor_params.rs diff --git a/polkadot/primitives/src/v5/metrics.rs b/polkadot/primitives/src/v6/metrics.rs similarity index 100% rename from polkadot/primitives/src/v5/metrics.rs rename to polkadot/primitives/src/v6/metrics.rs diff --git a/polkadot/primitives/src/v5/mod.rs b/polkadot/primitives/src/v6/mod.rs similarity index 99% rename from polkadot/primitives/src/v5/mod.rs rename to polkadot/primitives/src/v6/mod.rs index 81743225403d6159fd86a48e5d8e7fa38d84d06a..cf90083551788c6877bd44bd509bf137f5b9bf38 100644 --- a/polkadot/primitives/src/v5/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. -//! `V2` Primitives. +//! `V6` Primitives. use bitvec::vec::BitVec; use parity_scale_codec::{Decode, Encode}; @@ -57,8 +57,13 @@ pub use sp_staking::SessionIndex; mod signed; pub use signed::{EncodeAs, Signed, UncheckedSigned}; +pub mod async_backing; +pub mod executor_params; pub mod slashing; +pub use async_backing::AsyncBackingParams; +pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; + mod metrics; pub use metrics::{ metric_definitions, RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, @@ -1116,7 +1121,7 @@ pub struct AbridgedHostConfiguration { /// The delay, in blocks, before a validation upgrade is applied. pub validation_upgrade_delay: BlockNumber, /// Asynchronous backing parameters. - pub async_backing_params: super::vstaging::AsyncBackingParams, + pub async_backing_params: AsyncBackingParams, } /// Abridged version of `HrmpChannel` (from the `Hrmp` parachains host runtime module) meant to be @@ -1803,9 +1808,6 @@ pub enum PvfExecTimeoutKind { Approval, } -pub mod executor_params; -pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; - #[cfg(test)] mod tests { use super::*; diff --git a/polkadot/primitives/src/v5/signed.rs b/polkadot/primitives/src/v6/signed.rs similarity index 100% rename from polkadot/primitives/src/v5/signed.rs rename to polkadot/primitives/src/v6/signed.rs diff --git a/polkadot/primitives/src/v5/slashing.rs b/polkadot/primitives/src/v6/slashing.rs similarity index 100% rename from polkadot/primitives/src/v5/slashing.rs rename to polkadot/primitives/src/v6/slashing.rs diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index ea341ee5b4fc973b587ecd0c23a415b5807c6350..1429b0c326aceef4b9088bd4ddef6828f8dcfbd8 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,121 +17,3 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here -pub use crate::v5::*; -use sp_std::prelude::*; - -use parity_scale_codec::{Decode, Encode}; -use primitives::RuntimeDebug; -use scale_info::TypeInfo; - -/// Useful type alias for Para IDs. -pub type ParaId = Id; - -/// Candidate's acceptance limitations for asynchronous backing per relay parent. -#[derive( - RuntimeDebug, - Copy, - Clone, - PartialEq, - Encode, - Decode, - TypeInfo, - serde::Serialize, - serde::Deserialize, -)] - -pub struct AsyncBackingParams { - /// The maximum number of para blocks between the para head in a relay parent - /// and a new candidate. Restricts nodes from building arbitrary long chains - /// and spamming other validators. - /// - /// When async backing is disabled, the only valid value is 0. - pub max_candidate_depth: u32, - /// How many ancestors of a relay parent are allowed to build candidates on top - /// of. - /// - /// When async backing is disabled, the only valid value is 0. - pub allowed_ancestry_len: u32, -} - -/// Constraints on inbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct InboundHrmpLimitations<N = BlockNumber> { - /// An exhaustive set of all valid watermarks, sorted ascending. - /// - /// It's only expected to contain block numbers at which messages were - /// previously sent to a para, excluding most recent head. - pub valid_watermarks: Vec<N>, -} - -/// Constraints on outbound HRMP channels. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct OutboundHrmpChannelLimitations { - /// The maximum bytes that can be written to the channel. - pub bytes_remaining: u32, - /// The maximum messages that can be written to the channel. - pub messages_remaining: u32, -} - -/// Constraints on the actions that can be taken by a new parachain -/// block. These limitations are implicitly associated with some particular -/// parachain, which should be apparent from usage. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct Constraints<N = BlockNumber> { - /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, - /// The maximum new validation code size allowed, in bytes. - pub max_code_size: u32, - /// The amount of UMP messages remaining. - pub ump_remaining: u32, - /// The amount of UMP bytes remaining. - pub ump_remaining_bytes: u32, - /// The maximum number of UMP messages allowed per candidate. - pub max_ump_num_per_candidate: u32, - /// Remaining DMP queue. Only includes sent-at block numbers. - pub dmp_remaining_messages: Vec<N>, - /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations<N>, - /// The limitations of all registered outbound HRMP channels. - pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, - /// The maximum number of HRMP messages allowed per candidate. - pub max_hrmp_num_per_candidate: u32, - /// The required parent head-data of the parachain. - pub required_parent: HeadData, - /// The expected validation-code-hash of this parachain. - pub validation_code_hash: ValidationCodeHash, - /// The code upgrade restriction signal as-of this parachain. - pub upgrade_restriction: Option<UpgradeRestriction>, - /// The future validation code hash, if any, and at what relay-parent - /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(N, ValidationCodeHash)>, -} - -/// A candidate pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct CandidatePendingAvailability<H = Hash, N = BlockNumber> { - /// The hash of the candidate. - pub candidate_hash: CandidateHash, - /// The candidate's descriptor. - pub descriptor: CandidateDescriptor<H>, - /// The commitments of the candidate. - pub commitments: CandidateCommitments, - /// The candidate's relay parent's number. - pub relay_parent_number: N, - /// The maximum Proof-of-Validity size allowed, in bytes. - pub max_pov_size: u32, -} - -/// The per-parachain state of the backing system, including -/// state-machine constraints and candidates pending availability. -#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] -pub struct BackingState<H = Hash, N = BlockNumber> { - /// The state-machine constraints of the parachain. - pub constraints: Constraints<N>, - /// The candidates pending availability. These should be ordered, i.e. they should form - /// a sub-chain, where the first candidate builds on top of the required parent of the - /// constraints and each subsequent builds on top of the previous head-data. - pub pending_availability: Vec<CandidatePendingAvailability<H, N>>, -} diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index a48444a46e402b6c30e2280f3923db50943ef2ce..286aeddb986d3db3c7077d8d6227ebcf979073cb 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -122,7 +122,7 @@ prospective validation data. This is unlikely to change. ### Outgoing -- `RuntimeApiRequest::StagingParaBackingState` +- `RuntimeApiRequest::ParaBackingState` - Gets the backing state of the given para (the constraints of the para and candidates pending availability). - `RuntimeApiRequest::AvailabilityCores` diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index bcf01b61f2173f024dc50cccacc877534f22aaf2..6a14a3a013d45f2e823be5ef8cf35617e2614dbe 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -121,10 +121,10 @@ So what are we actually worried about? Things that come to mind: ### Restricting file-system access -A basic security mechanism is to make sure that any thread directly interfacing -with untrusted code does not have access to the file-system. This provides some -protection against attackers accessing sensitive data or modifying data on the -host machine. +A basic security mechanism is to make sure that any process directly interfacing +with untrusted code does not have unnecessary access to the file-system. This +provides some protection against attackers accessing sensitive data or modifying +data on the host machine. ### Clearing env vars diff --git a/polkadot/runtime/kusama/Cargo.toml b/polkadot/runtime/kusama/Cargo.toml index 565b34637e29875169f2de6701e4862f2e7ea984..ebbcfc71e953f3fef63c699e51d97a01f7c19ebf 100644 --- a/polkadot/runtime/kusama/Cargo.toml +++ b/polkadot/runtime/kusama/Cargo.toml @@ -28,6 +28,7 @@ offchain-primitives = { package = "sp-offchain", path = "../../../substrate/prim sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } @@ -211,6 +212,7 @@ std = [ "sp-application-crypto/std", "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", diff --git a/polkadot/runtime/kusama/src/lib.rs b/polkadot/runtime/kusama/src/lib.rs index 8d8bd4baacf8e612263c7453a38b0e53ce14974a..1709c1bf8b1c47188d23fea67399aee76534911e 100644 --- a/polkadot/runtime/kusama/src/lib.rs +++ b/polkadot/runtime/kusama/src/lib.rs @@ -46,7 +46,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -60,8 +60,11 @@ use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, generate_solution_type, onchain, NposSolution, SequentialPhragmen, }; + use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, @@ -2478,6 +2481,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } #[cfg(test)] diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index fe9a4e52bd076e2698c9e153a770c0261f6349fe..d07964b691654b9de57b3f108b6439e2c720514f 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -28,7 +28,7 @@ use crate::{ }; use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use pallet_balances::Error as BalancesError; -use primitives::{v5::ValidationCode, BlockNumber, SessionIndex}; +use primitives::{BlockNumber, SessionIndex, ValidationCode}; use sp_std::collections::btree_map::BTreeMap; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 33039cd08ca4b90e58b2a1e415242402d81dad92..f53f986a553fe408b4e9087f9605508baf8e4940 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,7 +26,7 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v6.rs b/polkadot/runtime/parachains/src/configuration/migration/v6.rs index beed54deaffa8ea6c9331948027ecf57766ce67d..19031a90bab45c5c4cf58e50f07111db547365a6 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v6.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v6.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use sp_std::vec::Vec; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; #[cfg(feature = "try-runtime")] use sp_std::prelude::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v7.rs b/polkadot/runtime/parachains/src/configuration/migration/v7.rs index 11365138120782c7b5909d320f2778401f0e2916..1754b78e0a1d3bd1d764f44352601fdf187b85b5 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v7.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v7.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; +use primitives::{AsyncBackingParams, Balance, ExecutorParams, SessionIndex}; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index 5c5b34821835ed2a3295b272ee0987f57584a480..d1bc9005112529d55749a82de78fe17ff28391f7 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -24,8 +24,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::Perbill; use sp_std::vec::Vec; diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index b27a7ab1ad734162e11f808825db46e59979b23b..9b2b7a48dc8b3ace0b4feea937b7efbe98e4950e 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -51,7 +51,7 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - vstaging::slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, + slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes, SlashingOffenceKind}, CandidateHash, SessionIndex, ValidatorId, ValidatorIndex, }; use scale_info::TypeInfo; diff --git a/polkadot/runtime/parachains/src/hrmp.rs b/polkadot/runtime/parachains/src/hrmp.rs index 3f0a5e0830cb0ecfacfeeff9b6e3317d7376dd1f..b3bbcb433c0b2da20a77fe0243292e9c80a37cd2 100644 --- a/polkadot/runtime/parachains/src/hrmp.rs +++ b/polkadot/runtime/parachains/src/hrmp.rs @@ -278,24 +278,34 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// Open HRMP channel requested. - /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` - OpenChannelRequested(ParaId, ParaId, u32, u32), + OpenChannelRequested { + sender: ParaId, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + }, /// An HRMP channel request sent by the receiver was canceled by either party. - /// `[by_parachain, channel_id]` - OpenChannelCanceled(ParaId, HrmpChannelId), - /// Open HRMP channel accepted. `[sender, recipient]` - OpenChannelAccepted(ParaId, ParaId), - /// HRMP channel closed. `[by_parachain, channel_id]` - ChannelClosed(ParaId, HrmpChannelId), + OpenChannelCanceled { by_parachain: ParaId, channel_id: HrmpChannelId }, + /// Open HRMP channel accepted. + OpenChannelAccepted { sender: ParaId, recipient: ParaId }, + /// HRMP channel closed. + ChannelClosed { by_parachain: ParaId, channel_id: HrmpChannelId }, /// An HRMP channel was opened via Root origin. - /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` - HrmpChannelForceOpened(ParaId, ParaId, u32, u32), + HrmpChannelForceOpened { + sender: ParaId, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + }, /// An HRMP channel was opened between two system chains. - /// `[sender, recipient, proposed_max_capacity, proposed_max_message_size]` - HrmpSystemChannelOpened(ParaId, ParaId, u32, u32), + HrmpSystemChannelOpened { + sender: ParaId, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + }, /// An HRMP channel's deposits were updated. - /// `[sender, recipient]` - OpenChannelDepositsUpdated(ParaId, ParaId), + OpenChannelDepositsUpdated { sender: ParaId, recipient: ParaId }, } #[pallet::error] @@ -499,12 +509,12 @@ pub mod pallet { proposed_max_capacity, proposed_max_message_size, )?; - Self::deposit_event(Event::OpenChannelRequested( - origin, + Self::deposit_event(Event::OpenChannelRequested { + sender: origin, recipient, proposed_max_capacity, proposed_max_message_size, - )); + }); Ok(()) } @@ -516,7 +526,7 @@ pub mod pallet { pub fn hrmp_accept_open_channel(origin: OriginFor<T>, sender: ParaId) -> DispatchResult { let origin = ensure_parachain(<T as Config>::RuntimeOrigin::from(origin))?; Self::accept_open_channel(origin, sender)?; - Self::deposit_event(Event::OpenChannelAccepted(sender, origin)); + Self::deposit_event(Event::OpenChannelAccepted { sender, recipient: origin }); Ok(()) } @@ -532,7 +542,7 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_parachain(<T as Config>::RuntimeOrigin::from(origin))?; Self::close_channel(origin, channel_id.clone())?; - Self::deposit_event(Event::ChannelClosed(origin, channel_id)); + Self::deposit_event(Event::ChannelClosed { by_parachain: origin, channel_id }); Ok(()) } @@ -611,7 +621,7 @@ pub mod pallet { Error::<T>::WrongWitness ); Self::cancel_open_request(origin, channel_id.clone())?; - Self::deposit_event(Event::OpenChannelCanceled(origin, channel_id)); + Self::deposit_event(Event::OpenChannelCanceled { by_parachain: origin, channel_id }); Ok(()) } @@ -651,12 +661,12 @@ pub mod pallet { // that it will not require deposits from either member. Self::init_open_channel(sender, recipient, max_capacity, max_message_size)?; Self::accept_open_channel(recipient, sender)?; - Self::deposit_event(Event::HrmpChannelForceOpened( + Self::deposit_event(Event::HrmpChannelForceOpened { sender, recipient, - max_capacity, - max_message_size, - )); + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + }); Ok(Some(<T as Config>::WeightInfo::force_open_hrmp_channel(cancel_request)).into()) } @@ -695,12 +705,12 @@ pub mod pallet { Self::init_open_channel(sender, recipient, max_capacity, max_message_size)?; Self::accept_open_channel(recipient, sender)?; - Self::deposit_event(Event::HrmpSystemChannelOpened( + Self::deposit_event(Event::HrmpSystemChannelOpened { sender, recipient, - max_capacity, - max_message_size, - )); + proposed_max_capacity: max_capacity, + proposed_max_message_size: max_message_size, + }); Ok(Pays::No.into()) } @@ -796,7 +806,7 @@ pub mod pallet { Ok(()) })?; - Self::deposit_event(Event::OpenChannelDepositsUpdated(sender, recipient)); + Self::deposit_event(Event::OpenChannelDepositsUpdated { sender, recipient }); Ok(()) } diff --git a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs index 9ffa264a7dc86f4a1c5ef637a4b4ab85c7027841..2cb49c88d437cb3e0a4c01a40d343a179316ec9d 100644 --- a/polkadot/runtime/parachains/src/hrmp/benchmarking.rs +++ b/polkadot/runtime/parachains/src/hrmp/benchmarking.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. +#![cfg(feature = "runtime-benchmarks")] + use crate::{ configuration::Pallet as Configuration, hrmp::{Pallet as Hrmp, *}, paras::{Pallet as Paras, ParaKind, ParachainsCache}, shared::Pallet as Shared, }; +use frame_benchmarking::{impl_benchmark_test_suite, v2::*, whitelisted_caller}; use frame_support::{assert_ok, traits::Currency}; type BalanceOf<T> = @@ -138,10 +141,12 @@ static_assertions::const_assert!(MAX_UNIQUE_CHANNELS < PREFIX_0); static_assertions::const_assert!(HRMP_MAX_INBOUND_CHANNELS_BOUND < PREFIX_0); static_assertions::const_assert!(HRMP_MAX_OUTBOUND_CHANNELS_BOUND < PREFIX_0); -frame_benchmarking::benchmarks! { - where_clause { where <T as frame_system::Config>::RuntimeOrigin: From<crate::Origin> } +#[benchmarks(where <T as frame_system::Config>::RuntimeOrigin: From<crate::Origin>)] +mod benchmarks { + use super::*; - hrmp_init_open_channel { + #[benchmark] + fn hrmp_init_open_channel() { let sender_id: ParaId = 1u32.into(); let sender_origin: crate::Origin = 1u32.into(); @@ -149,51 +154,75 @@ frame_benchmarking::benchmarks! { // make sure para is registered, and has enough balance. let ed = T::Currency::minimum_balance(); - let deposit: BalanceOf<T> = Configuration::<T>::config().hrmp_sender_deposit.unique_saturated_into(); + let deposit: BalanceOf<T> = + Configuration::<T>::config().hrmp_sender_deposit.unique_saturated_into(); register_parachain_with_balance::<T>(sender_id, deposit + ed); register_parachain_with_balance::<T>(recipient_id, deposit + ed); let capacity = Configuration::<T>::config().hrmp_channel_max_capacity; let message_size = Configuration::<T>::config().hrmp_channel_max_message_size; - }: _(sender_origin, recipient_id, capacity, message_size) - verify { + + #[extrinsic_call] + _(sender_origin, recipient_id, capacity, message_size); + assert_last_event::<T>( - Event::<T>::OpenChannelRequested(sender_id, recipient_id, capacity, message_size).into() + Event::<T>::OpenChannelRequested { + sender: sender_id, + recipient: recipient_id, + proposed_max_capacity: capacity, + proposed_max_message_size: message_size, + } + .into(), ); } - hrmp_accept_open_channel { + #[benchmark] + fn hrmp_accept_open_channel() { let [(sender, _), (recipient, recipient_origin)] = establish_para_connection::<T>(1, 2, ParachainSetupStep::Requested); - }: _(recipient_origin, sender) - verify { - assert_last_event::<T>(Event::<T>::OpenChannelAccepted(sender, recipient).into()); + + #[extrinsic_call] + _(recipient_origin, sender); + + assert_last_event::<T>(Event::<T>::OpenChannelAccepted { sender, recipient }.into()); } - hrmp_close_channel { + #[benchmark] + fn hrmp_close_channel() { let [(sender, sender_origin), (recipient, _)] = establish_para_connection::<T>(1, 2, ParachainSetupStep::Established); let channel_id = HrmpChannelId { sender, recipient }; - }: _(sender_origin, channel_id.clone()) - verify { - assert_last_event::<T>(Event::<T>::ChannelClosed(sender, channel_id).into()); + + #[extrinsic_call] + _(sender_origin, channel_id.clone()); + + assert_last_event::<T>( + Event::<T>::ChannelClosed { by_parachain: sender, channel_id }.into(), + ); } // NOTE: a single parachain should have the maximum number of allowed ingress and egress // channels. - force_clean_hrmp { + #[benchmark] + fn force_clean_hrmp( // ingress channels to a single leaving parachain that need to be closed. - let i in 0 .. (HRMP_MAX_INBOUND_CHANNELS_BOUND - 1); + i: Linear<0, { HRMP_MAX_INBOUND_CHANNELS_BOUND - 1 }>, // egress channels to a single leaving parachain that need to be closed. - let e in 0 .. (HRMP_MAX_OUTBOUND_CHANNELS_BOUND - 1); - + e: Linear<0, { HRMP_MAX_OUTBOUND_CHANNELS_BOUND - 1 }>, + ) { // first, update the configs to support this many open channels... - assert_ok!( - Configuration::<T>::set_hrmp_max_parachain_outbound_channels(frame_system::RawOrigin::Root.into(), e + 1) - ); - assert_ok!( - Configuration::<T>::set_hrmp_max_parachain_inbound_channels(frame_system::RawOrigin::Root.into(), i + 1) - ); + assert_ok!(Configuration::<T>::set_hrmp_max_parachain_outbound_channels( + frame_system::RawOrigin::Root.into(), + e + 1 + )); + assert_ok!(Configuration::<T>::set_hrmp_max_parachain_inbound_channels( + frame_system::RawOrigin::Root.into(), + i + 1 + )); + assert_ok!(Configuration::<T>::set_max_downward_message_size( + frame_system::RawOrigin::Root.into(), + 1024 + )); // .. and enact it. Configuration::<T>::initializer_on_new_session(&Shared::<T>::scheduled_session()); @@ -201,14 +230,17 @@ frame_benchmarking::benchmarks! { let deposit: BalanceOf<T> = config.hrmp_sender_deposit.unique_saturated_into(); let para: ParaId = 1u32.into(); - let para_origin: crate::Origin = 1u32.into(); register_parachain_with_balance::<T>(para, deposit); T::Currency::make_free_balance_be(¶.into_account_truncating(), deposit * 256u32.into()); for ingress_para_id in 0..i { // establish ingress channels to `para`. let ingress_para_id = ingress_para_id + PREFIX_0; - let _ = establish_para_connection::<T>(ingress_para_id, para.into(), ParachainSetupStep::Established); + let _ = establish_para_connection::<T>( + ingress_para_id, + para.into(), + ParachainSetupStep::Established, + ); } // nothing should be left unprocessed. @@ -217,7 +249,11 @@ frame_benchmarking::benchmarks! { for egress_para_id in 0..e { // establish egress channels to `para`. let egress_para_id = egress_para_id + PREFIX_1; - let _ = establish_para_connection::<T>(para.into(), egress_para_id, ParachainSetupStep::Established); + let _ = establish_para_connection::<T>( + para.into(), + egress_para_id, + ParachainSetupStep::Established, + ); } // nothing should be left unprocessed. @@ -225,7 +261,10 @@ frame_benchmarking::benchmarks! { // all in all, we have created this many channels. assert_eq!(HrmpChannels::<T>::iter().count() as u32, i + e); - }: _(frame_system::Origin::<T>::Root, para, i, e) verify { + + #[extrinsic_call] + _(frame_system::Origin::<T>::Root, para, i, e); + // all in all, all of them must be gone by now. assert_eq!(HrmpChannels::<T>::iter().count() as u32, 0); // borrow this function from the tests to make sure state is clear, given that we do a lot @@ -233,73 +272,108 @@ frame_benchmarking::benchmarks! { Hrmp::<T>::assert_storage_consistency_exhaustive(); } - force_process_hrmp_open { + #[benchmark] + fn force_process_hrmp_open( // number of channels that need to be processed. Worse case is an N-M relation: unique // sender and recipients for all channels. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::<T>(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Accepted); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::<T>( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Accepted, + ); } assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, c); - }: _(frame_system::Origin::<T>::Root, c) - verify { + + #[extrinsic_call] + _(frame_system::Origin::<T>::Root, c); + assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, 0); } - force_process_hrmp_close { + #[benchmark] + fn force_process_hrmp_close( // number of channels that need to be processed. Worse case is an N-M relation: unique // sender and recipients for all channels. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::<T>(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::CloseRequested); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::<T>( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::CloseRequested, + ); } assert_eq!(HrmpCloseChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, c); - }: _(frame_system::Origin::<T>::Root, c) - verify { + + #[extrinsic_call] + _(frame_system::Origin::<T>::Root, c); + assert_eq!(HrmpCloseChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, 0); } - hrmp_cancel_open_request { + #[benchmark] + fn hrmp_cancel_open_request( // number of items already existing in the `HrmpOpenChannelRequestsList`, other than the // one that we remove. - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::<T>(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Requested); + c: Linear<0, MAX_UNIQUE_CHANNELS>, + ) { + for id in 0..c { + let _ = establish_para_connection::<T>( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Requested, + ); } let [(sender, sender_origin), (recipient, _)] = establish_para_connection::<T>(1, 2, ParachainSetupStep::Requested); - assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, c + 1); + assert_eq!( + HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, + c + 1 + ); let channel_id = HrmpChannelId { sender, recipient }; - }: _(sender_origin, channel_id, c + 1) - verify { + + #[extrinsic_call] + _(sender_origin, channel_id, c + 1); + assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, c); } // worse case will be `n` parachain channel requests, where in all of them either the sender or // the recipient need to be cleaned. This enforces the deposit of at least one to be processed. // No code path for triggering two deposit process exists. - clean_open_channel_requests { - let c in 0 .. MAX_UNIQUE_CHANNELS; - - for id in 0 .. c { - let _ = establish_para_connection::<T>(PREFIX_0 + id, PREFIX_1 + id, ParachainSetupStep::Requested); + #[benchmark] + fn clean_open_channel_requests(c: Linear<0, MAX_UNIQUE_CHANNELS>) { + for id in 0..c { + let _ = establish_para_connection::<T>( + PREFIX_0 + id, + PREFIX_1 + id, + ParachainSetupStep::Requested, + ); } assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, c); let outgoing = (0..c).map(|id| (id + PREFIX_1).into()).collect::<Vec<ParaId>>(); let config = Configuration::<T>::config(); - }: { - Hrmp::<T>::clean_open_channel_requests(&config, &outgoing); - } verify { + + #[block] + { + Hrmp::<T>::clean_open_channel_requests(&config, &outgoing); + } + assert_eq!(HrmpOpenChannelRequestsList::<T>::decode_len().unwrap_or_default() as u32, 0); } - force_open_hrmp_channel { + #[benchmark] + fn force_open_hrmp_channel( + // Weight parameter only accepts `u32`, `0` and `1` used to represent `false` and `true`, + // respectively. + c: Linear<0, 1>, + ) { let sender_id: ParaId = 1u32.into(); let sender_origin: crate::Origin = 1u32.into(); let recipient_id: ParaId = 2u32.into(); @@ -315,47 +389,51 @@ frame_benchmarking::benchmarks! { let capacity = Configuration::<T>::config().hrmp_channel_max_capacity; let message_size = Configuration::<T>::config().hrmp_channel_max_message_size; - // Weight parameter only accepts `u32`, `0` and `1` used to represent `false` and `true`, - // respectively. - let c = [0, 1]; let channel_id = HrmpChannelId { sender: sender_id, recipient: recipient_id }; - for channels_to_close in c { - if channels_to_close == 1 { - // this will consume more weight if a channel _request_ already exists, because it - // will need to clear the request. - assert_ok!(Hrmp::<T>::hrmp_init_open_channel( + if c == 1 { + // this will consume more weight if a channel _request_ already exists, because it + // will need to clear the request. + assert_ok!(Hrmp::<T>::hrmp_init_open_channel( + sender_origin.clone().into(), + recipient_id, + capacity, + message_size + )); + assert!(HrmpOpenChannelRequests::<T>::get(&channel_id).is_some()); + } else { + if HrmpOpenChannelRequests::<T>::get(&channel_id).is_some() { + assert_ok!(Hrmp::<T>::hrmp_cancel_open_request( sender_origin.clone().into(), - recipient_id, - capacity, - message_size + channel_id.clone(), + MAX_UNIQUE_CHANNELS, )); - assert!(HrmpOpenChannelRequests::<T>::get(&channel_id).is_some()); - } else { - if HrmpOpenChannelRequests::<T>::get(&channel_id).is_some() { - assert_ok!(Hrmp::<T>::hrmp_cancel_open_request( - sender_origin.clone().into(), - channel_id.clone(), - MAX_UNIQUE_CHANNELS, - )); - } - assert!(HrmpOpenChannelRequests::<T>::get(&channel_id).is_none()); } + assert!(HrmpOpenChannelRequests::<T>::get(&channel_id).is_none()); } // but the _channel_ should not exist assert!(HrmpChannels::<T>::get(&channel_id).is_none()); - }: _(frame_system::Origin::<T>::Root, sender_id, recipient_id, capacity, message_size) - verify { + + #[extrinsic_call] + _(frame_system::Origin::<T>::Root, sender_id, recipient_id, capacity, message_size); + assert_last_event::<T>( - Event::<T>::HrmpChannelForceOpened(sender_id, recipient_id, capacity, message_size).into() + Event::<T>::HrmpChannelForceOpened { + sender: sender_id, + recipient: recipient_id, + proposed_max_capacity: capacity, + proposed_max_message_size: message_size, + } + .into(), ); } - establish_system_channel { + #[benchmark] + fn establish_system_channel() { let sender_id: ParaId = 1u32.into(); let recipient_id: ParaId = 2u32.into(); - let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let caller: T::AccountId = whitelisted_caller(); let config = Configuration::<T>::config(); // make sure para is registered, and has zero balance. @@ -364,19 +442,28 @@ frame_benchmarking::benchmarks! { let capacity = config.hrmp_channel_max_capacity; let message_size = config.hrmp_channel_max_message_size; - }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) - verify { + + #[extrinsic_call] + _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id); + assert_last_event::<T>( - Event::<T>::HrmpSystemChannelOpened(sender_id, recipient_id, capacity, message_size).into() + Event::<T>::HrmpSystemChannelOpened { + sender: sender_id, + recipient: recipient_id, + proposed_max_capacity: capacity, + proposed_max_message_size: message_size, + } + .into(), ); } - poke_channel_deposits { + #[benchmark] + fn poke_channel_deposits() { let sender_id: ParaId = 1u32.into(); let recipient_id: ParaId = 2u32.into(); - let channel_id = HrmpChannelId {sender: sender_id, recipient: recipient_id }; + let channel_id = HrmpChannelId { sender: sender_id, recipient: recipient_id }; - let caller: T::AccountId = frame_benchmarking::whitelisted_caller(); + let caller: T::AccountId = whitelisted_caller(); let config = Configuration::<T>::config(); // make sure para is registered, and has balance to reserve. @@ -403,23 +490,32 @@ frame_benchmarking::benchmarks! { // Actually reserve the deposits. let _ = T::Currency::reserve(&sender_id.into_account_truncating(), sender_deposit); let _ = T::Currency::reserve(&recipient_id.into_account_truncating(), recipient_deposit); - }: _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id) - verify { + + #[extrinsic_call] + _(frame_system::RawOrigin::Signed(caller), sender_id, recipient_id); + assert_last_event::<T>( - Event::<T>::OpenChannelDepositsUpdated(sender_id, recipient_id).into() + Event::<T>::OpenChannelDepositsUpdated { sender: sender_id, recipient: recipient_id } + .into(), ); let channel = HrmpChannels::<T>::get(&channel_id).unwrap(); // Check that the deposit was updated in the channel state. assert_eq!(channel.sender_deposit, 0); assert_eq!(channel.recipient_deposit, 0); // And that the funds were unreserved. - assert_eq!(T::Currency::reserved_balance(&sender_id.into_account_truncating()), 0u128.unique_saturated_into()); - assert_eq!(T::Currency::reserved_balance(&recipient_id.into_account_truncating()), 0u128.unique_saturated_into()); + assert_eq!( + T::Currency::reserved_balance(&sender_id.into_account_truncating()), + 0u128.unique_saturated_into() + ); + assert_eq!( + T::Currency::reserved_balance(&recipient_id.into_account_truncating()), + 0u128.unique_saturated_into() + ); } -} -frame_benchmarking::impl_benchmark_test_suite!( - Hrmp, - crate::mock::new_test_ext(crate::hrmp::tests::GenesisConfigBuilder::default().build()), - crate::mock::Test -); + impl_benchmark_test_suite!( + Hrmp, + crate::mock::new_test_ext(crate::hrmp::tests::GenesisConfigBuilder::default().build()), + crate::mock::Test + ); +} diff --git a/polkadot/runtime/parachains/src/hrmp/tests.rs b/polkadot/runtime/parachains/src/hrmp/tests.rs index 236745b7cc359370f99c48129cbcc93318f43f28..4fc0b0b448a50658758d4f76e46c7034d2edce01 100644 --- a/polkadot/runtime/parachains/src/hrmp/tests.rs +++ b/polkadot/runtime/parachains/src/hrmp/tests.rs @@ -151,14 +151,17 @@ fn open_channel_works() { Hrmp::hrmp_init_open_channel(para_a_origin.into(), para_b, 2, 8).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::OpenChannelRequested(para_a, para_b, 2, 8)))); + MockEvent::Hrmp(Event::OpenChannelRequested { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 2, + proposed_max_message_size: 8 + }))); Hrmp::hrmp_accept_open_channel(para_b_origin.into(), para_a).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); - assert!(System::events() - .iter() - .any(|record| record.event == - MockEvent::Hrmp(Event::OpenChannelAccepted(para_a, para_b)))); + assert!(System::events().iter().any(|record| record.event == + MockEvent::Hrmp(Event::OpenChannelAccepted { sender: para_a, recipient: para_b }))); // Advance to a block 6, but without session change. That means that the channel has // not been created yet. @@ -189,7 +192,12 @@ fn force_open_channel_works() { Hrmp::force_open_hrmp_channel(RuntimeOrigin::root(), para_a, para_b, 2, 8).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::HrmpChannelForceOpened(para_a, para_b, 2, 8)))); + MockEvent::Hrmp(Event::HrmpChannelForceOpened { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 2, + proposed_max_message_size: 8 + }))); // Advance to a block 6, but without session change. That means that the channel has // not been created yet. @@ -224,7 +232,12 @@ fn force_open_channel_works_with_existing_request() { Hrmp::hrmp_init_open_channel(para_a_origin.into(), para_b, 2, 8).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::OpenChannelRequested(para_a, para_b, 2, 8)))); + MockEvent::Hrmp(Event::OpenChannelRequested { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 2, + proposed_max_message_size: 8 + }))); run_to_block(5, Some(vec![4, 5])); // the request exists, but no channel. @@ -238,7 +251,12 @@ fn force_open_channel_works_with_existing_request() { Hrmp::force_open_hrmp_channel(RuntimeOrigin::root(), para_a, para_b, 2, 8).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::HrmpChannelForceOpened(para_a, para_b, 2, 8)))); + MockEvent::Hrmp(Event::HrmpChannelForceOpened { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 2, + proposed_max_message_size: 8 + }))); // Advance to a block 6, but without session change. That means that the channel has // not been created yet. @@ -266,7 +284,12 @@ fn open_system_channel_works() { Hrmp::establish_system_channel(RuntimeOrigin::signed(1), para_a, para_b).unwrap(); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::HrmpSystemChannelOpened(para_a, para_b, 2, 8)))); + MockEvent::Hrmp(Event::HrmpSystemChannelOpened { + sender: para_a, + recipient: para_b, + proposed_max_capacity: 2, + proposed_max_message_size: 8 + }))); // Advance to a block 6, but without session change. That means that the channel has // not been created yet. @@ -397,7 +420,10 @@ fn close_channel_works() { assert!(!channel_exists(para_a, para_b)); Hrmp::assert_storage_consistency_exhaustive(); assert!(System::events().iter().any(|record| record.event == - MockEvent::Hrmp(Event::ChannelClosed(para_b, channel_id.clone())))); + MockEvent::Hrmp(Event::ChannelClosed { + by_parachain: para_b, + channel_id: channel_id.clone() + }))); }); } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs index e066ad825a3300545a4ecf7840084ecbfc838223..ba74e488cd3b7aac66fb4e62938235b491cce839 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/mod.rs @@ -25,5 +25,6 @@ //! 1. Bump the version of the stable module (e.g. `v2` becomes `v3`) //! 2. Move methods from `vstaging` to `v3`. The new stable version should include all methods from //! `vstaging` tagged with the new version number (e.g. all `v3` methods). -pub mod v5; + +pub mod v7; pub mod vstaging; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs similarity index 79% rename from polkadot/runtime/parachains/src/runtime_api_impl/v5.rs rename to polkadot/runtime/parachains/src/runtime_api_impl/v7.rs index 46a609e0368dd65224fb24376245c4883c97e4c9..35d92f71084fe64f1890f29d1ee840606e944beb 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v7.rs @@ -18,12 +18,16 @@ //! functions. use crate::{ - disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, + configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler::{self, CoreOccupied}, session_info, shared, }; use frame_system::pallet_prelude::*; use primitives::{ + async_backing::{ + AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, + InboundHrmpLimitations, OutboundHrmpChannelLimitations, + }, slashing, AuthorityDiscoveryId, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, @@ -395,3 +399,100 @@ pub fn submit_unsigned_slashing_report<T: disputes::slashing::Config>( key_ownership_proof, ) } + +/// Return the min backing votes threshold from the configuration. +pub fn minimum_backing_votes<T: initializer::Config>() -> u32 { + <configuration::Pallet<T>>::config().minimum_backing_votes +} + +/// Implementation for `ParaBackingState` function from the runtime API +pub fn backing_state<T: initializer::Config>( + para_id: ParaId, +) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> { + let config = <configuration::Pallet<T>>::config(); + // Async backing is only expected to be enabled with a tracker capacity of 1. + // Subsequent configuration update gets applied on new session, which always + // clears the buffer. + // + // Thus, minimum relay parent is ensured to have asynchronous backing enabled. + let now = <frame_system::Pallet<T>>::block_number(); + let min_relay_parent_number = <shared::Pallet<T>>::allowed_relay_parents() + .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); + + let required_parent = <paras::Pallet<T>>::para_head(para_id)?; + let validation_code_hash = <paras::Pallet<T>>::current_code_hash(para_id)?; + + let upgrade_restriction = <paras::Pallet<T>>::upgrade_restriction_signal(para_id); + let future_validation_code = + <paras::Pallet<T>>::future_code_upgrade_at(para_id).and_then(|block_num| { + // Only read the storage if there's a pending upgrade. + Some(block_num).zip(<paras::Pallet<T>>::future_code_hash(para_id)) + }); + + let (ump_msg_count, ump_total_bytes) = + <inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id); + let ump_remaining = config.max_upward_queue_count - ump_msg_count; + let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; + + let dmp_remaining_messages = <dmp::Pallet<T>>::dmq_contents(para_id) + .into_iter() + .map(|msg| msg.sent_at) + .collect(); + + let valid_watermarks = <hrmp::Pallet<T>>::valid_watermarks(para_id); + let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; + let hrmp_channels_out = <hrmp::Pallet<T>>::outbound_remaining_capacity(para_id) + .into_iter() + .map(|(para, (messages_remaining, bytes_remaining))| { + (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) + }) + .collect(); + + let constraints = Constraints { + min_relay_parent_number, + max_pov_size: config.max_pov_size, + max_code_size: config.max_code_size, + ump_remaining, + ump_remaining_bytes, + max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, + dmp_remaining_messages, + hrmp_inbound, + hrmp_channels_out, + max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, + required_parent, + validation_code_hash, + upgrade_restriction, + future_validation_code, + }; + + let pending_availability = { + // Note: the API deals with a `Vec` as it is future-proof for cases + // where there may be multiple candidates pending availability at a time. + // But at the moment only one candidate can be pending availability per + // parachain. + crate::inclusion::PendingAvailability::<T>::get(¶_id) + .and_then(|pending| { + let commitments = + crate::inclusion::PendingAvailabilityCommitments::<T>::get(¶_id); + commitments.map(move |c| (pending, c)) + }) + .map(|(pending, commitments)| { + CandidatePendingAvailability { + candidate_hash: pending.candidate_hash(), + descriptor: pending.candidate_descriptor().clone(), + commitments, + relay_parent_number: pending.relay_parent_number(), + max_pov_size: constraints.max_pov_size, // assume always same in session. + } + }) + .into_iter() + .collect() + }; + + Some(BackingState { constraints, pending_availability }) +} + +/// Implementation for `AsyncBackingParams` function from the runtime API +pub fn async_backing_params<T: configuration::Config>() -> AsyncBackingParams { + <configuration::Pallet<T>>::config().async_backing_params +} diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index deef19d9071071fa86ec7a79341191a8d6737a70..d01b543630c31c80747d2d1e80480b931ae86886 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -15,111 +15,3 @@ // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. //! Put implementations of functions from staging APIs here. - -use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; -use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{ - vstaging::{ - AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, - InboundHrmpLimitations, OutboundHrmpChannelLimitations, - }, - Id as ParaId, -}; -use sp_std::prelude::*; - -/// Implementation for `StagingParaBackingState` function from the runtime API -pub fn backing_state<T: initializer::Config>( - para_id: ParaId, -) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> { - let config = <configuration::Pallet<T>>::config(); - // Async backing is only expected to be enabled with a tracker capacity of 1. - // Subsequent configuration update gets applied on new session, which always - // clears the buffer. - // - // Thus, minimum relay parent is ensured to have asynchronous backing enabled. - let now = <frame_system::Pallet<T>>::block_number(); - let min_relay_parent_number = <shared::Pallet<T>>::allowed_relay_parents() - .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); - - let required_parent = <paras::Pallet<T>>::para_head(para_id)?; - let validation_code_hash = <paras::Pallet<T>>::current_code_hash(para_id)?; - - let upgrade_restriction = <paras::Pallet<T>>::upgrade_restriction_signal(para_id); - let future_validation_code = - <paras::Pallet<T>>::future_code_upgrade_at(para_id).and_then(|block_num| { - // Only read the storage if there's a pending upgrade. - Some(block_num).zip(<paras::Pallet<T>>::future_code_hash(para_id)) - }); - - let (ump_msg_count, ump_total_bytes) = - <inclusion::Pallet<T>>::relay_dispatch_queue_size(para_id); - let ump_remaining = config.max_upward_queue_count - ump_msg_count; - let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; - - let dmp_remaining_messages = <dmp::Pallet<T>>::dmq_contents(para_id) - .into_iter() - .map(|msg| msg.sent_at) - .collect(); - - let valid_watermarks = <hrmp::Pallet<T>>::valid_watermarks(para_id); - let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; - let hrmp_channels_out = <hrmp::Pallet<T>>::outbound_remaining_capacity(para_id) - .into_iter() - .map(|(para, (messages_remaining, bytes_remaining))| { - (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) - }) - .collect(); - - let constraints = Constraints { - min_relay_parent_number, - max_pov_size: config.max_pov_size, - max_code_size: config.max_code_size, - ump_remaining, - ump_remaining_bytes, - max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, - dmp_remaining_messages, - hrmp_inbound, - hrmp_channels_out, - max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, - required_parent, - validation_code_hash, - upgrade_restriction, - future_validation_code, - }; - - let pending_availability = { - // Note: the API deals with a `Vec` as it is future-proof for cases - // where there may be multiple candidates pending availability at a time. - // But at the moment only one candidate can be pending availability per - // parachain. - crate::inclusion::PendingAvailability::<T>::get(¶_id) - .and_then(|pending| { - let commitments = - crate::inclusion::PendingAvailabilityCommitments::<T>::get(¶_id); - commitments.map(move |c| (pending, c)) - }) - .map(|(pending, commitments)| { - CandidatePendingAvailability { - candidate_hash: pending.candidate_hash(), - descriptor: pending.candidate_descriptor().clone(), - commitments, - relay_parent_number: pending.relay_parent_number(), - max_pov_size: constraints.max_pov_size, // assume always same in session. - } - }) - .into_iter() - .collect() - }; - - Some(BackingState { constraints, pending_availability }) -} - -/// Implementation for `StagingAsyncBackingParams` function from the runtime API -pub fn async_backing_params<T: configuration::Config>() -> AsyncBackingParams { - <configuration::Pallet<T>>::config().async_backing_params -} - -/// Return the min backing votes threshold from the configuration. -pub fn minimum_backing_votes<T: initializer::Config>() -> u32 { - <configuration::Pallet<T>>::config().minimum_backing_votes -} diff --git a/polkadot/runtime/polkadot/Cargo.toml b/polkadot/runtime/polkadot/Cargo.toml index a659a4553220cd782d62b1b4db2b58030294077a..5e283b496697d2b075ec053830978535d40b3f25 100644 --- a/polkadot/runtime/polkadot/Cargo.toml +++ b/polkadot/runtime/polkadot/Cargo.toml @@ -26,6 +26,7 @@ offchain-primitives = { package = "sp-offchain", path = "../../../substrate/prim tx-pool-api = { package = "sp-transaction-pool", path = "../../../substrate/primitives/transaction-pool", default-features = false } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } @@ -193,6 +194,7 @@ std = [ "sp-api/std", "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", diff --git a/polkadot/runtime/polkadot/src/lib.rs b/polkadot/runtime/polkadot/src/lib.rs index 5956b0e155bb5b6f3a16cb3935b769625fbdb271..0b2dd12b1542ad7719430c84b00a58ddcad225b9 100644 --- a/polkadot/runtime/polkadot/src/lib.rs +++ b/polkadot/runtime/polkadot/src/lib.rs @@ -34,7 +34,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -45,7 +45,9 @@ use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, generate_solution_type, onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, @@ -2228,6 +2230,17 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } + } #[cfg(test)] diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 8f38659b84f5498d7744a70445205400fc1e420c..49a4a4e8299034dd8af0c72679ca41a17db4540a 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -21,8 +21,10 @@ beefy-primitives = { package = "sp-consensus-beefy", path = "../../../substrate/ binary-merkle-tree = { path = "../../../substrate/utils/binary-merkle-tree", default-features = false } rococo-runtime-constants = { package = "rococo-runtime-constants", path = "constants", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } +sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } @@ -47,6 +49,7 @@ pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migr pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } pallet-collective = { path = "../../../substrate/frame/collective", default-features = false } +pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } @@ -62,7 +65,9 @@ pallet-nis = { path = "../../../substrate/frame/nis", default-features = false } pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } +pallet-ranked-collective = { path = "../../../substrate/frame/ranked-collective", default-features = false } pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } pallet-society = { path = "../../../substrate/frame/society", default-features = false } @@ -76,6 +81,7 @@ pallet-tips = { path = "../../../substrate/frame/tips", default-features = false pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } @@ -104,7 +110,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing", default-features tokio = { version = "1.24.2", features = ["macros"] } [build-dependencies] -substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [features] default = [ "std" ] @@ -134,6 +140,7 @@ std = [ "pallet-bounties/std", "pallet-child-bounties/std", "pallet-collective/std", + "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", "pallet-grandpa/std", @@ -148,7 +155,9 @@ std = [ "pallet-offences/std", "pallet-preimage/std", "pallet-proxy/std", + "pallet-ranked-collective/std", "pallet-recovery/std", + "pallet-referenda/std", "pallet-scheduler/std", "pallet-session/std", "pallet-society/std", @@ -162,6 +171,7 @@ std = [ "pallet-treasury/std", "pallet-utility/std", "pallet-vesting/std", + "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", "parity-scale-codec/std", @@ -174,7 +184,9 @@ std = [ "serde/std", "serde_derive", "sp-api/std", + "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", @@ -184,6 +196,7 @@ std = [ "sp-storage/std", "sp-tracing/std", "sp-version/std", + "substrate-wasm-builder", "tx-pool-api/std", "xcm-builder/std", "xcm-executor/std", @@ -199,6 +212,7 @@ runtime-benchmarks = [ "pallet-bounties/runtime-benchmarks", "pallet-child-bounties/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", @@ -213,7 +227,9 @@ runtime-benchmarks = [ "pallet-offences/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", + "pallet-ranked-collective/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", @@ -224,6 +240,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -250,6 +267,7 @@ try-runtime = [ "pallet-bounties/try-runtime", "pallet-child-bounties/try-runtime", "pallet-collective/try-runtime", + "pallet-conviction-voting/try-runtime", "pallet-democracy/try-runtime", "pallet-elections-phragmen/try-runtime", "pallet-grandpa/try-runtime", @@ -264,7 +282,9 @@ try-runtime = [ "pallet-offences/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", + "pallet-ranked-collective/try-runtime", "pallet-recovery/try-runtime", + "pallet-referenda/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -277,6 +297,7 @@ try-runtime = [ "pallet-treasury/try-runtime", "pallet-utility/try-runtime", "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", "runtime-common/try-runtime", "runtime-parachains/try-runtime", diff --git a/polkadot/runtime/rococo/README.md b/polkadot/runtime/rococo/README.md index 465afd25549b69f8e3ef30f926bdefa4b81ed88f..5b2c296f0ced9a053de46bac5d8207d7a1d835d8 100644 --- a/polkadot/runtime/rococo/README.md +++ b/polkadot/runtime/rococo/README.md @@ -2,6 +2,13 @@ Rococo is a testnet runtime with no stability guarantees. +## How to build `rococo` runtime +`EpochDurationInBlocks` parameter is configurable via `ROCOCO_EPOCH_DURATION` environment variable. To build wasm +runtime blob with customized epoch duration the following command shall be exectuted: +```bash +ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime /path/to/output/directory/ +``` + ## How to run `rococo-local` The [Cumulus Tutorial](https://docs.substrate.io/tutorials/v3/cumulus/start-relay/) details building, starting, and diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index e7134e0ef723eae59cbd65a79ad7dda47d958b52..ed32d33105b76c45bac68360e92646019e578b2e 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -14,12 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. -use substrate_wasm_builder::WasmBuilder; - +#[cfg(feature = "std")] fn main() { - WasmBuilder::new() + // note: needs to be synced with rococo-runtime-constants::time hard-coded string literal + const ROCOCO_EPOCH_DURATION_ENV: &str = "ROCOCO_EPOCH_DURATION"; + + substrate_wasm_builder::WasmBuilder::new() .with_current_project() .import_memory() .export_heap_base() - .build() + .build(); + + println!("cargo:rerun-if-env-changed={}", ROCOCO_EPOCH_DURATION_ENV); } + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 214e2f3fa980e8793c2683c544cb69b2d0d8d00e..2200f7ddefe1f912f026b98f7232cf7273891d18 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -38,12 +38,13 @@ pub mod currency { /// Time and blocks. pub mod time { use primitives::{BlockNumber, Moment}; - use runtime_common::prod_or_fast; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; - pub const DEFAULT_EPOCH_DURATION: BlockNumber = prod_or_fast!(1 * HOURS, 1 * MINUTES); + frame_support::parameter_types! { - pub storage EpochDurationInBlocks: BlockNumber = DEFAULT_EPOCH_DURATION; + pub storage EpochDurationInBlocks: BlockNumber = option_env!("ROCOCO_EPOCH_DURATION") + .map(|s| s.parse().expect("`ROCOCO_EPOCH_DURATION` is not a valid `BlockNumber`")) + .unwrap_or(1 * MINUTES); } // These time units are defined in number of blocks. diff --git a/polkadot/runtime/rococo/src/governance/fellowship.rs b/polkadot/runtime/rococo/src/governance/fellowship.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5df6cf2df341c5563f31ea6f49f1eeae5b81fc1 --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/fellowship.rs @@ -0,0 +1,343 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Elements of governance concerning the Rococo Fellowship. + +use frame_support::traits::{MapSuccess, TryMapSuccess}; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, Replace}; + +use super::*; +use crate::{CENTS, DAYS}; + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; +} + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo { + type Id = u16; + type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] { + static DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 10] = [ + ( + 0u16, + pallet_referenda::TrackInfo { + name: "candidates", + max_deciding: 10, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 1u16, + pallet_referenda::TrackInfo { + name: "members", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 2u16, + pallet_referenda::TrackInfo { + name: "proficients", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 3u16, + pallet_referenda::TrackInfo { + name: "fellows", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 4u16, + pallet_referenda::TrackInfo { + name: "senior fellows", + max_deciding: 10, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 5u16, + pallet_referenda::TrackInfo { + name: "experts", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 6u16, + pallet_referenda::TrackInfo { + name: "senior experts", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 7u16, + pallet_referenda::TrackInfo { + name: "masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 8u16, + pallet_referenda::TrackInfo { + name: "senior masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + 9u16, + pallet_referenda::TrackInfo { + name: "grand masters", + max_deciding: 10, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 30 * MINUTES, + decision_period: 7 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> { + use super::origins::Origin; + + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::FellowshipInitiates) => Ok(0), + Ok(Origin::Fellowship1Dan) => Ok(1), + Ok(Origin::Fellowship2Dan) => Ok(2), + Ok(Origin::Fellowship3Dan) | Ok(Origin::Fellows) => Ok(3), + Ok(Origin::Fellowship4Dan) => Ok(4), + Ok(Origin::Fellowship5Dan) | Ok(Origin::FellowshipExperts) => Ok(5), + Ok(Origin::Fellowship6Dan) => Ok(6), + Ok(Origin::Fellowship7Dan | Origin::FellowshipMasters) => Ok(7), + Ok(Origin::Fellowship8Dan) => Ok(8), + Ok(Origin::Fellowship9Dan) => Ok(9), + _ => Err(()), + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); + +pub type FellowshipReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config<FellowshipReferendaInstance> for Runtime { + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo<Self>; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = + pallet_ranked_collective::EnsureMember<Runtime, FellowshipCollectiveInstance, 1>; + type CancelOrigin = FellowshipExperts; + type KillOrigin = FellowshipMasters; + type Slash = Treasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf<Runtime, FellowshipCollectiveInstance>; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} + +pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + +impl pallet_ranked_collective::Config<FellowshipCollectiveInstance> for Runtime { + type WeightInfo = weights::pallet_ranked_collective::WeightInfo<Self>; + type RuntimeEvent = RuntimeEvent; + // Promotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank *above* the new rank. + type PromoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess<Self::AccountId, ConstU16<65535>>, + EitherOf< + MapSuccess<FellowshipAdmin, Replace<ConstU16<9>>>, + TryMapSuccess<origins::EnsureFellowship, CheckedReduceBy<ConstU16<1>>>, + >, + >; + // Demotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the current rank. + type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess<Self::AccountId, ConstU16<65535>>, + EitherOf< + MapSuccess<FellowshipAdmin, Replace<ConstU16<9>>>, + TryMapSuccess<origins::EnsureFellowship, CheckedReduceBy<ConstU16<2>>>, + >, + >; + type Polls = FellowshipReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Geometric; +} diff --git a/polkadot/runtime/rococo/src/governance/mod.rs b/polkadot/runtime/rococo/src/governance/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef2adf60753d541c44eaf6073a3a012e66bf9f5c --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/mod.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! New governance configurations for the Rococo runtime. + +use super::*; +use frame_support::{ + parameter_types, + traits::{ConstU16, EitherOf}, +}; +use frame_system::EnsureRootWithSuccess; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, Fellows, FellowshipAdmin, FellowshipExperts, + FellowshipInitiates, FellowshipMasters, GeneralAdmin, LeaseAdmin, ReferendumCanceller, + ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; +mod fellowship; +pub use fellowship::{FellowshipCollectiveInstance, FellowshipReferendaInstance}; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo<Self>; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf<Balances, Self::AccountId>; + type Polls = Referenda; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +parameter_types! { + pub const MaxBalance: Balance = Balance::max_value(); +} +pub type TreasurySpender = EitherOf<EnsureRootWithSuccess<AccountId, MaxBalance>, Spender>; + +impl origins::pallet_custom_origins::Config for Runtime {} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo<Self>; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = + EitherOf<EnsureRootWithSuccess<Self::AccountId, ConstU16<65535>>, Fellows>; + type DispatchWhitelistedOrigin = EitherOf<EnsureRoot<Self::AccountId>, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_referenda::WeightInfo<Self>; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned<AccountId>; + type CancelOrigin = EitherOf<EnsureRoot<AccountId>, ReferendumCanceller>; + type KillOrigin = EitherOf<EnsureRoot<AccountId>, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf<Runtime>; + type Tally = pallet_conviction_voting::TallyOf<Runtime>; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} diff --git a/polkadot/runtime/rococo/src/governance/origins.rs b/polkadot/runtime/rococo/src/governance/origins.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4639f40dc432473e1222f6ab1094a31ecd26f68 --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/origins.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet<T>(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl<O: Into<Result<Origin, O>> + From<Origin>> + EnsureOrigin<O> for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result<Self::Success, O> { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result<O, ()> { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin<Success = $success_type:ty> { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl<O: Into<Result<Origin, O>> + From<Origin>> + EnsureOrigin<O> for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result<Self::Success, O> { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result<O, ()> { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result<O, ()> = Err(()); + $( + let _result: Result<O, ()> = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin<Success = Balance> { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin<Success = u16> { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/polkadot/runtime/rococo/src/governance/tracks.rs b/polkadot/runtime/rococo/src/governance/tracks.rs new file mode 100644 index 0000000000000000000000000000000000000000..3765569f183e0414a10fe2852e528ccc9dedc3d7 --- /dev/null +++ b/polkadot/runtime/rococo/src/governance/tracks.rs @@ -0,0 +1,320 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Track configurations for governance. + +use super::*; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15] = [ + ( + 0, + pallet_referenda::TrackInfo { + name: "root", + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + ), + ( + 1, + pallet_referenda::TrackInfo { + name: "whitelisted_caller", + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + ), + ( + 10, + pallet_referenda::TrackInfo { + name: "staking_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + ), + ( + 11, + pallet_referenda::TrackInfo { + name: "treasurer", + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + ), + ( + 12, + pallet_referenda::TrackInfo { + name: "lease_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + ), + ( + 13, + pallet_referenda::TrackInfo { + name: "fellowship_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + ), + ( + 14, + pallet_referenda::TrackInfo { + name: "general_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + ), + ( + 15, + pallet_referenda::TrackInfo { + name: "auction_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + ), + ( + 20, + pallet_referenda::TrackInfo { + name: "referendum_canceller", + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + ), + ( + 21, + pallet_referenda::TrackInfo { + name: "referendum_killer", + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + ), + ( + 30, + pallet_referenda::TrackInfo { + name: "small_tipper", + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + ), + ( + 31, + pallet_referenda::TrackInfo { + name: "big_tipper", + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + ), + ( + 32, + pallet_referenda::TrackInfo { + name: "small_spender", + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + ), + ( + 33, + pallet_referenda::TrackInfo { + name: "medium_spender", + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + ), + ( + 34, + pallet_referenda::TrackInfo { + name: "big_spender", + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + ), +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo { + type Id = u16; + type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] { + &TRACKS_DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 7046c4640c040e4ddc13fb94b945b1ac3780de56..4bdcc1237394e2b690f41f4e616cf9671d661c44 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -46,7 +46,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, - runtime_api_impl::v5 as parachains_runtime_api_impl, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -58,10 +58,12 @@ use beefy_primitives::{ }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ - fungible::HoldConsideration, Contains, EitherOfDiverse, EverythingBut, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, PrivilegeCmp, ProcessMessage, + fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, @@ -86,7 +88,6 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use static_assertions::const_assert; use xcm::latest::Junction; pub use frame_system::Call as SystemCall; @@ -101,6 +102,13 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Governance and configurations. +pub mod governance; +use governance::{ + pallet_custom_origins, AuctionAdmin, Fellows, GeneralAdmin, LeaseAdmin, Treasurer, + TreasurySpender, +}; + mod validator_manager; impl_runtime_weights!(rococo_runtime_constants); @@ -115,7 +123,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 9430, + spec_version: 10020, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, @@ -184,11 +192,6 @@ parameter_types! { pub const NoPreimagePostponement: Option<u32> = Some(10); } -type ScheduleOrigin = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 1, 2>, ->; - /// Used the compare the privilege of an origin inside the scheduler. pub struct OriginPrivilegeCmp; @@ -201,11 +204,6 @@ impl PrivilegeCmp<OriginCaller> for OriginPrivilegeCmp { match (left, right) { // Root is greater than anything. (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - // Check which one has more yes votes. - ( - OriginCaller::Council(pallet_collective::RawOrigin::Members(l_yes_votes, l_count)), - OriginCaller::Council(pallet_collective::RawOrigin::Members(r_yes_votes, r_count)), - ) => Some((l_yes_votes * r_count).cmp(&(r_yes_votes * l_count))), // For every other origin we don't care, as they are not used for `ScheduleOrigin`. _ => None, } @@ -218,7 +216,9 @@ impl pallet_scheduler::Config for Runtime { type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = ScheduleOrigin; + // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of + // OpenGov to schedule periodic auctions. + type ScheduleOrigin = EitherOf<EnsureRoot<AccountId>, AuctionAdmin>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo<Runtime>; type OriginPrivilegeCmp = OriginPrivilegeCmp; @@ -378,171 +378,6 @@ parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 28; } -parameter_types! { - pub LaunchPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1, "ROC_LAUNCH_PERIOD"); - pub VotingPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1 * MINUTES, "ROC_VOTING_PERIOD"); - pub FastTrackVotingPeriod: BlockNumber = prod_or_fast!(3 * HOURS, 1 * MINUTES, "ROC_FAST_TRACK_VOTING_PERIOD"); - pub const MinimumDeposit: Balance = 100 * CENTS; - pub EnactmentPeriod: BlockNumber = prod_or_fast!(8 * DAYS, 1, "ROC_ENACTMENT_PERIOD"); - pub CooloffPeriod: BlockNumber = prod_or_fast!(7 * DAYS, 1 * MINUTES, "ROC_COOLOFF_PERIOD"); - pub const InstantAllowed: bool = true; - pub const MaxVotes: u32 = 100; - pub const MaxProposals: u32 = 100; -} - -impl pallet_democracy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type EnactmentPeriod = EnactmentPeriod; - type VoteLockingPeriod = EnactmentPeriod; - type LaunchPeriod = LaunchPeriod; - type VotingPeriod = VotingPeriod; - type MinimumDeposit = MinimumDeposit; - type SubmitOrigin = frame_system::EnsureSigned<AccountId>; - /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 1, 2>; - /// A majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 1, 2>; - /// A unanimous council can have the next scheduled referendum be a straight default-carries - /// (NTB) vote. - type ExternalDefaultOrigin = - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 1, 1>; - /// Two thirds of the technical committee can have an `ExternalMajority/ExternalDefault` vote - /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = - pallet_collective::EnsureProportionAtLeast<AccountId, TechnicalCollective, 2, 3>; - type InstantOrigin = - pallet_collective::EnsureProportionAtLeast<AccountId, TechnicalCollective, 1, 1>; - type InstantAllowed = InstantAllowed; - type FastTrackVotingPeriod = FastTrackVotingPeriod; - // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 2, 3>, - >; - type BlacklistOrigin = EnsureRoot<AccountId>; - // To cancel a proposal before it has been passed, the technical committee must be unanimous or - // Root must agree. - type CancelProposalOrigin = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionAtLeast<AccountId, TechnicalCollective, 1, 1>, - >; - // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. - type VetoOrigin = pallet_collective::EnsureMember<AccountId, TechnicalCollective>; - type CooloffPeriod = CooloffPeriod; - type Slash = Treasury; - type Scheduler = Scheduler; - type PalletsOrigin = OriginCaller; - type MaxVotes = MaxVotes; - type WeightInfo = weights::pallet_democracy::WeightInfo<Runtime>; - type MaxProposals = MaxProposals; - type Preimages = Preimage; - type MaxDeposits = ConstU32<100>; - type MaxBlacklisted = ConstU32<100>; -} - -parameter_types! { - pub CouncilMotionDuration: BlockNumber = prod_or_fast!(3 * DAYS, 2 * MINUTES, "ROC_MOTION_DURATION"); - pub const CouncilMaxProposals: u32 = 100; - pub const CouncilMaxMembers: u32 = 100; - pub MaxProposalWeight: Weight = Perbill::from_percent(50) * BlockWeights::get().max_block; -} - -type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Config<CouncilCollective> for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = CouncilMotionDuration; - type MaxProposals = CouncilMaxProposals; - type MaxMembers = CouncilMaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type SetMembersOrigin = EnsureRoot<AccountId>; - type WeightInfo = weights::pallet_collective_council::WeightInfo<Runtime>; - type MaxProposalWeight = MaxProposalWeight; -} - -parameter_types! { - pub const CandidacyBond: Balance = 100 * CENTS; - // 1 storage item created, key size is 32 bytes, value size is 16+16. - pub const VotingBondBase: Balance = deposit(1, 64); - // additional data per vote is 32 bytes (account id). - pub const VotingBondFactor: Balance = deposit(0, 32); - /// Daily council elections - pub TermDuration: BlockNumber = prod_or_fast!(24 * HOURS, 2 * MINUTES, "ROC_TERM_DURATION"); - pub const DesiredMembers: u32 = 19; - pub const DesiredRunnersUp: u32 = 19; - pub const MaxVoters: u32 = 10 * 1000; - pub const MaxVotesPerVoter: u32 = 16; - pub const MaxCandidates: u32 = 1000; - pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; -} - -// Make sure that there are no more than MaxMembers members elected via phragmen. -const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); - -impl pallet_elections_phragmen::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type ChangeMembers = Council; - type InitializeMembers = Council; - type CurrencyToVote = runtime_common::CurrencyToVote; - type CandidacyBond = CandidacyBond; - type VotingBondBase = VotingBondBase; - type VotingBondFactor = VotingBondFactor; - type LoserCandidate = Treasury; - type KickedMember = Treasury; - type DesiredMembers = DesiredMembers; - type DesiredRunnersUp = DesiredRunnersUp; - type TermDuration = TermDuration; - type MaxVoters = MaxVoters; - type MaxVotesPerVoter = MaxVotesPerVoter; - type MaxCandidates = MaxCandidates; - type PalletId = PhragmenElectionPalletId; - type WeightInfo = weights::pallet_elections_phragmen::WeightInfo<Runtime>; -} - -parameter_types! { - pub TechnicalMotionDuration: BlockNumber = prod_or_fast!(3 * DAYS, 2 * MINUTES, "ROC_MOTION_DURATION"); - pub const TechnicalMaxProposals: u32 = 100; - pub const TechnicalMaxMembers: u32 = 100; -} - -type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Config<TechnicalCollective> for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = TechnicalMotionDuration; - type MaxProposals = TechnicalMaxProposals; - type MaxMembers = TechnicalMaxMembers; - type DefaultVote = pallet_collective::PrimeDefaultVote; - type SetMembersOrigin = EnsureRoot<AccountId>; - type WeightInfo = weights::pallet_collective_technical_committee::WeightInfo<Runtime>; - type MaxProposalWeight = MaxProposalWeight; -} - -type MoreThanHalfCouncil = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionMoreThan<AccountId, CouncilCollective, 1, 2>, ->; - -impl pallet_membership::Config<pallet_membership::Instance1> for Runtime { - type RuntimeEvent = RuntimeEvent; - type AddOrigin = MoreThanHalfCouncil; - type RemoveOrigin = MoreThanHalfCouncil; - type SwapOrigin = MoreThanHalfCouncil; - type ResetOrigin = MoreThanHalfCouncil; - type PrimeOrigin = MoreThanHalfCouncil; - type MembershipInitialized = TechnicalCommittee; - type MembershipChanged = TechnicalCommittee; - type MaxMembers = TechnicalMaxMembers; - type WeightInfo = weights::pallet_membership::WeightInfo<Runtime>; -} - parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const ProposalBondMinimum: Balance = 2000 * CENTS; @@ -561,16 +396,11 @@ parameter_types! { pub const MaxPeerInHeartbeats: u32 = 10_000; } -type ApproveOrigin = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 3, 5>, ->; - impl pallet_treasury::Config for Runtime { type PalletId = TreasuryPalletId; type Currency = Balances; - type ApproveOrigin = ApproveOrigin; - type RejectOrigin = MoreThanHalfCouncil; + type ApproveOrigin = EitherOfDiverse<EnsureRoot<AccountId>, Treasurer>; + type RejectOrigin = EitherOfDiverse<EnsureRoot<AccountId>, Treasurer>; type RuntimeEvent = RuntimeEvent; type OnSlash = Treasury; type ProposalBond = ProposalBond; @@ -582,7 +412,7 @@ impl pallet_treasury::Config for Runtime { type MaxApprovals = MaxApprovals; type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>; type SpendFunds = Bounties; - type SpendOrigin = frame_support::traits::NeverEnsureOrigin<Balance>; + type SpendOrigin = TreasurySpender; } parameter_types! { @@ -623,17 +453,6 @@ impl pallet_child_bounties::Config for Runtime { type WeightInfo = weights::pallet_child_bounties::WeightInfo<Runtime>; } -impl pallet_tips::Config for Runtime { - type MaximumReasonLength = MaximumReasonLength; - type DataDepositPerByte = DataDepositPerByte; - type Tippers = PhragmenElection; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_tips::WeightInfo<Runtime>; -} - impl pallet_offences::Config for Runtime { type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple<Self>; @@ -744,8 +563,7 @@ impl claims::Config for Runtime { type RuntimeEvent = RuntimeEvent; type VestingSchedule = Vesting; type Prefix = Prefix; - type MoveClaimOrigin = - pallet_collective::EnsureProportionMoreThan<AccountId, CouncilCollective, 1, 2>; + type MoveClaimOrigin = EnsureRoot<AccountId>; type WeightInfo = weights::runtime_common_claims::WeightInfo<Runtime>; } @@ -769,8 +587,8 @@ impl pallet_identity::Config for Runtime { type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; type Slashed = Treasury; - type ForceOrigin = MoreThanHalfCouncil; - type RegistrarOrigin = MoreThanHalfCouncil; + type ForceOrigin = EitherOf<EnsureRoot<Self::AccountId>, GeneralAdmin>; + type RegistrarOrigin = EitherOf<EnsureRoot<Self::AccountId>, GeneralAdmin>; type WeightInfo = weights::pallet_identity::WeightInfo<Runtime>; } @@ -911,15 +729,14 @@ impl InstanceFilter<RuntimeCall> for ProxyType { RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | RuntimeCall::ImOnline(..) | - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | - RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::PhragmenElection(..) | - RuntimeCall::TechnicalMembership(..) | RuntimeCall::Treasury(..) | RuntimeCall::Bounties(..) | RuntimeCall::ChildBounties(..) | - RuntimeCall::Tips(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) | RuntimeCall::Claims(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | @@ -946,17 +763,18 @@ impl InstanceFilter<RuntimeCall> for ProxyType { RuntimeCall::Slots(..) | RuntimeCall::Auctions(..) // Specifically omitting the entire XCM Pallet ), - ProxyType::Governance => - matches!( - c, - RuntimeCall::Democracy(..) | - RuntimeCall::Council(..) | RuntimeCall::TechnicalCommittee(..) | - RuntimeCall::PhragmenElection(..) | - RuntimeCall::Treasury(..) | - RuntimeCall::Bounties(..) | - RuntimeCall::Tips(..) | RuntimeCall::Utility(..) | - RuntimeCall::ChildBounties(..) - ), + ProxyType::Governance => matches!( + c, + RuntimeCall::Bounties(..) | + RuntimeCall::Utility(..) | + RuntimeCall::ChildBounties(..) | + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::FellowshipCollective(..) | + RuntimeCall::FellowshipReferenda(..) | + RuntimeCall::Whitelist(..) + ), ProxyType::IdentityJudgement => matches!( c, RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | @@ -1182,7 +1000,7 @@ impl slots::Config for Runtime { type Registrar = Registrar; type LeasePeriod = LeasePeriod; type LeaseOffset = (); - type ForceOrigin = MoreThanHalfCouncil; + type ForceOrigin = EitherOf<EnsureRoot<Self::AccountId>, LeaseAdmin>; type WeightInfo = weights::runtime_common_slots::WeightInfo<Runtime>; } @@ -1215,11 +1033,6 @@ parameter_types! { pub const SampleLength: BlockNumber = 2 * MINUTES; } -type AuctionInitiate = EitherOfDiverse< - EnsureRoot<AccountId>, - pallet_collective::EnsureProportionAtLeast<AccountId, CouncilCollective, 2, 3>, ->; - impl auctions::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Leaser = Slots; @@ -1227,7 +1040,7 @@ impl auctions::Config for Runtime { type EndingPeriod = EndingPeriod; type SampleLength = SampleLength; type Randomness = pallet_babe::RandomnessFromOneEpochAgo<Runtime>; - type InitiateOrigin = AuctionInitiate; + type InitiateOrigin = EitherOf<EnsureRoot<Self::AccountId>, AuctionAdmin>; type WeightInfo = weights::runtime_common_auctions::WeightInfo<Runtime>; } @@ -1423,13 +1236,19 @@ construct_runtime! { AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config<T>} = 12, // Governance stuff; uncallable initially. - Democracy: pallet_democracy::{Pallet, Call, Storage, Config<T>, Event<T>} = 13, - Council: pallet_collective::<Instance1>::{Pallet, Call, Storage, Origin<T>, Event<T>, Config<T>} = 14, - TechnicalCommittee: pallet_collective::<Instance2>::{Pallet, Call, Storage, Origin<T>, Event<T>, Config<T>} = 15, - PhragmenElection: pallet_elections_phragmen::{Pallet, Call, Storage, Event<T>, Config<T>} = 16, - TechnicalMembership: pallet_membership::<Instance1>::{Pallet, Call, Storage, Event<T>, Config<T>} = 17, Treasury: pallet_treasury::{Pallet, Call, Storage, Config<T>, Event<T>} = 18, - + ConvictionVoting: pallet_conviction_voting::{Pallet, Call, Storage, Event<T>} = 20, + Referenda: pallet_referenda::{Pallet, Call, Storage, Event<T>} = 21, + // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + FellowshipCollective: pallet_ranked_collective::<Instance1>::{ + Pallet, Call, Storage, Event<T> + } = 22, + // pub type FellowshipReferendaInstance = pallet_referenda::Instance2; + FellowshipReferenda: pallet_referenda::<Instance2>::{ + Pallet, Call, Storage, Event<T> + } = 23, + Origins: pallet_custom_origins::{Origin} = 43, + Whitelist: pallet_whitelist::{Pallet, Call, Storage, Event<T>} = 44, // Claims. Usable initially. Claims: claims::{Pallet, Call, Storage, Event<T>, Config<T>, ValidateUnsigned} = 19, @@ -1464,9 +1283,6 @@ construct_runtime! { Bounties: pallet_bounties::{Pallet, Call, Storage, Event<T>} = 35, ChildBounties: pallet_child_bounties = 40, - // Tips module. - Tips: pallet_tips::{Pallet, Call, Storage, Event<T>} = 36, - // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event<T>, HoldReason} = 38, // pub type NisCounterpartInstance = pallet_balances::Instance2; @@ -1561,6 +1377,8 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8<Runtime>, parachains_configuration::migration::v9::MigrateToV9<Runtime>, paras_registrar::migration::VersionCheckedMigrateToV1<Runtime, ()>, + pallet_referenda::migration::v1::MigrateV0ToV1<Runtime, ()>, + pallet_referenda::migration::v1::MigrateV0ToV1<Runtime, pallet_referenda::Instance2>, ); } @@ -1627,28 +1445,27 @@ mod benches { [frame_benchmarking::baseline, Baseline::<Runtime>] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] - [pallet_collective, Council] - [pallet_collective, TechnicalCommittee] - [pallet_democracy, Democracy] - [pallet_elections_phragmen, PhragmenElection] + [pallet_conviction_voting, ConvictionVoting] [pallet_nis, Nis] [pallet_identity, Identity] [pallet_im_online, ImOnline] [pallet_indices, Indices] - [pallet_membership, TechnicalMembership] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_preimage, Preimage] [pallet_proxy, Proxy] + [pallet_ranked_collective, FellowshipCollective] [pallet_recovery, Recovery] + [pallet_referenda, Referenda] + [pallet_referenda, FellowshipReferenda] [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::<Runtime>] [pallet_timestamp, Timestamp] - [pallet_tips, Tips] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] // XCM [pallet_xcm, XcmPallet] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::<Runtime>] @@ -1722,6 +1539,7 @@ sp_api::impl_runtime_apis! { } } + #[api_version(7)] impl primitives::runtime_api::ParachainHost<Block, Hash, BlockNumber> for Runtime { fn validators() -> Vec<ValidatorId> { parachains_runtime_api_impl::validators::<Runtime>() @@ -1852,6 +1670,18 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } + + fn minimum_backing_votes() -> u32 { + parachains_runtime_api_impl::minimum_backing_votes::<Runtime>() + } + + fn para_backing_state(para_id: ParaId) -> Option<primitives::async_backing::BackingState> { + parachains_runtime_api_impl::backing_state::<Runtime>(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::async_backing_params::<Runtime>() + } } #[api_version(3)] @@ -2128,7 +1958,7 @@ sp_api::impl_runtime_apis! { use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ - LocalCheckAccount, LocationConverter, Rockmine, TokenLocation, XcmConfig, + LocalCheckAccount, LocationConverter, AssetHub, TokenLocation, XcmConfig, }; impl frame_system_benchmarking::Config for Runtime {} @@ -2137,7 +1967,7 @@ sp_api::impl_runtime_apis! { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; fn valid_destination() -> Result<MultiLocation, BenchmarkError> { - Ok(Rockmine::get()) + Ok(AssetHub::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // Rococo only knows about ROC @@ -2150,7 +1980,7 @@ sp_api::impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - Rockmine::get(), + AssetHub::get(), MultiAsset { fun: Fungible(1 * UNITS), id: Concrete(TokenLocation::get()) }, )); pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -2189,15 +2019,15 @@ sp_api::impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((Rockmine::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((AssetHub::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result<MultiLocation, BenchmarkError> { - Ok(Rockmine::get()) + Ok(AssetHub::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = Rockmine::get(); + let origin = AssetHub::get(); let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) @@ -2232,6 +2062,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } #[cfg(test)] diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 21558ca3fb90548f08d90bb79fcea531165d3287..e0c1c4f413515f99424deeafb98f572a75c91044 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -20,27 +20,26 @@ pub mod pallet_balances; pub mod pallet_balances_nis_counterpart_balances; pub mod pallet_bounties; pub mod pallet_child_bounties; -pub mod pallet_collective_council; -pub mod pallet_collective_technical_committee; -pub mod pallet_democracy; -pub mod pallet_elections_phragmen; +pub mod pallet_conviction_voting; pub mod pallet_identity; pub mod pallet_im_online; pub mod pallet_indices; -pub mod pallet_membership; pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_nis; pub mod pallet_preimage; pub mod pallet_proxy; +pub mod pallet_ranked_collective; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; -pub mod pallet_tips; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; +pub mod pallet_whitelist; pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_collective.rs deleted file mode 100644 index 9bf6671e2297112790655e99037f0ab412556ddc..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight}}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_collective::WeightInfo for WeightInfo<T> { - // Storage: Collective Members (r:1 w:1) - // Storage: Collective Proposals (r:1 w:0) - // Storage: Collective Voting (r:100 w:100) - // Storage: Collective Prime (r:0 w:1) - /// The range of component `m` is `[1, 100]`. - /// The range of component `n` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - Weight::from_parts(0 as u64, 0) - // Standard Error: 15_000 - .saturating_add(Weight::from_parts(10_832_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 15_000 - .saturating_add(Weight::from_parts(12_894_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) - } - // Storage: Collective Members (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_parts(19_069_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(2_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(13_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:0) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_parts(20_794_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(2_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(22_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalCount (r:1 w:1) - // Storage: Collective Voting (r:0 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(27_870_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_parts(22_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_parts(94_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - } - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Voting (r:1 w:1) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - Weight::from_parts(27_249_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(35_000 as u64, 0).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_parts(30_754_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(28_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(81_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(39_508_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(29_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(90_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Prime (r:1 w:0) - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_parts(32_769_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(31_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(83_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Voting (r:1 w:1) - // Storage: Collective Members (r:1 w:0) - // Storage: Collective Prime (r:1 w:0) - // Storage: Collective ProposalOf (r:1 w:1) - // Storage: Collective Proposals (r:1 w:1) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_parts(41_704_000 as u64, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_000 as u64, 0).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(28_000 as u64, 0).saturating_mul(m as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(92_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Collective Proposals (r:1 w:1) - // Storage: Collective Voting (r:0 w:1) - // Storage: Collective ProposalOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_parts(22_720_000 as u64, 0) - // Standard Error: 2_000 - .saturating_add(Weight::from_parts(74_000 as u64, 0).saturating_mul(p as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs b/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs deleted file mode 100644 index 835bdef7e67387d97edd1e7004a4a5d454711960..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective_council.rs +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_collective::WeightInfo for WeightInfo<T> { - /// Storage: Council Members (r:1 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:100 w:100) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15795 + m * (1967 ±16) + p * (4332 ±16)` - // Minimum execution time: 17_182_000 picoseconds. - Weight::from_parts(17_462_000, 0) - .saturating_add(Weight::from_parts(0, 15795)) - // Standard Error: 42_032 - .saturating_add(Weight::from_parts(4_868_618, 0).saturating_mul(m.into())) - // Standard Error: 42_032 - .saturating_add(Weight::from_parts(7_289_594, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `136 + m * (32 ±0)` - // Estimated: `1622 + m * (32 ±0)` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(16_066_632, 0) - .saturating_add(Weight::from_parts(0, 1622)) - // Standard Error: 21 - .saturating_add(Weight::from_parts(982, 0).saturating_mul(b.into())) - // Standard Error: 220 - .saturating_add(Weight::from_parts(14_026, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:0) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `136 + m * (32 ±0)` - // Estimated: `3602 + m * (32 ±0)` - // Minimum execution time: 18_990_000 picoseconds. - Weight::from_parts(18_411_713, 0) - .saturating_add(Weight::from_parts(0, 3602)) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_166, 0).saturating_mul(b.into())) - // Standard Error: 164 - .saturating_add(Weight::from_parts(23_067, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalCount (r:1 w:1) - /// Proof Skipped: Council ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `426 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3818 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 25_500_000 picoseconds. - Weight::from_parts(26_304_307, 0) - .saturating_add(Weight::from_parts(0, 3818)) - // Standard Error: 49 - .saturating_add(Weight::from_parts(2_243, 0).saturating_mul(b.into())) - // Standard Error: 515 - .saturating_add(Weight::from_parts(18_905, 0).saturating_mul(m.into())) - // Standard Error: 508 - .saturating_add(Weight::from_parts(120_761, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `875 + m * (64 ±0)` - // Estimated: `4339 + m * (64 ±0)` - // Minimum execution time: 22_166_000 picoseconds. - Weight::from_parts(22_901_859, 0) - .saturating_add(Weight::from_parts(0, 4339)) - // Standard Error: 238 - .saturating_add(Weight::from_parts(40_475, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `464 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3909 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_064_000 picoseconds. - Weight::from_parts(27_961_599, 0) - .saturating_add(Weight::from_parts(0, 3909)) - // Standard Error: 401 - .saturating_add(Weight::from_parts(22_196, 0).saturating_mul(m.into())) - // Standard Error: 391 - .saturating_add(Weight::from_parts(115_698, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `766 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4083 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 38_302_000 picoseconds. - Weight::from_parts(40_639_640, 0) - .saturating_add(Weight::from_parts(0, 4083)) - // Standard Error: 123 - .saturating_add(Weight::from_parts(1_914, 0).saturating_mul(b.into())) - // Standard Error: 1_272 - .saturating_add(Weight::from_parts(150_067, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `484 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3929 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_017_000 picoseconds. - Weight::from_parts(30_565_580, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 378 - .saturating_add(Weight::from_parts(24_396, 0).saturating_mul(m.into())) - // Standard Error: 369 - .saturating_add(Weight::from_parts(114_807, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: Council Voting (r:1 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Members (r:1 w:0) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:0) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:1 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `786 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4103 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_911_000 picoseconds. - Weight::from_parts(42_312_485, 0) - .saturating_add(Weight::from_parts(0, 4103)) - // Standard Error: 83 - .saturating_add(Weight::from_parts(2_208, 0).saturating_mul(b.into())) - // Standard Error: 879 - .saturating_add(Weight::from_parts(20_173, 0).saturating_mul(m.into())) - // Standard Error: 857 - .saturating_add(Weight::from_parts(146_302, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: Council Proposals (r:1 w:1) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Voting (r:0 w:1) - /// Proof Skipped: Council Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council ProposalOf (r:0 w:1) - /// Proof Skipped: Council ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `293 + p * (32 ±0)` - // Estimated: `1778 + p * (32 ±0)` - // Minimum execution time: 15_465_000 picoseconds. - Weight::from_parts(17_387_663, 0) - .saturating_add(Weight::from_parts(0, 1778)) - // Standard Error: 450 - .saturating_add(Weight::from_parts(110_406, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs b/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs deleted file mode 100644 index 6d66dc871cd54edcd9c846fc0a3e059d1f8a42ce..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_collective_technical_committee.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_collective -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_collective::WeightInfo for WeightInfo<T> { - /// Storage: TechnicalCommittee Members (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:100 w:100) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15766 + m * (1967 ±16) + p * (4332 ±16)` - // Minimum execution time: 17_826_000 picoseconds. - Weight::from_parts(18_046_000, 0) - .saturating_add(Weight::from_parts(0, 15766)) - // Standard Error: 42_164 - .saturating_add(Weight::from_parts(4_858_188, 0).saturating_mul(m.into())) - // Standard Error: 42_164 - .saturating_add(Weight::from_parts(7_379_354, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `107 + m * (32 ±0)` - // Estimated: `1593 + m * (32 ±0)` - // Minimum execution time: 16_992_000 picoseconds. - Weight::from_parts(16_555_669, 0) - .saturating_add(Weight::from_parts(0, 1593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(976, 0).saturating_mul(b.into())) - // Standard Error: 189 - .saturating_add(Weight::from_parts(12_101, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:0) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `107 + m * (32 ±0)` - // Estimated: `3573 + m * (32 ±0)` - // Minimum execution time: 19_900_000 picoseconds. - Weight::from_parts(19_068_072, 0) - .saturating_add(Weight::from_parts(0, 3573)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_161, 0).saturating_mul(b.into())) - // Standard Error: 129 - .saturating_add(Weight::from_parts(22_376, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalCount (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `397 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3789 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 26_264_000 picoseconds. - Weight::from_parts(27_099_606, 0) - .saturating_add(Weight::from_parts(0, 3789)) - // Standard Error: 50 - .saturating_add(Weight::from_parts(2_278, 0).saturating_mul(b.into())) - // Standard Error: 525 - .saturating_add(Weight::from_parts(19_424, 0).saturating_mul(m.into())) - // Standard Error: 519 - .saturating_add(Weight::from_parts(120_852, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[5, 100]`. - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `846 + m * (64 ±0)` - // Estimated: `4310 + m * (64 ±0)` - // Minimum execution time: 22_954_000 picoseconds. - Weight::from_parts(23_675_214, 0) - .saturating_add(Weight::from_parts(0, 4310)) - // Standard Error: 256 - .saturating_add(Weight::from_parts(40_562, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `435 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3880 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_797_000 picoseconds. - Weight::from_parts(28_934_600, 0) - .saturating_add(Weight::from_parts(0, 3880)) - // Standard Error: 374 - .saturating_add(Weight::from_parts(20_716, 0).saturating_mul(m.into())) - // Standard Error: 364 - .saturating_add(Weight::from_parts(115_491, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `737 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4054 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 39_160_000 picoseconds. - Weight::from_parts(40_470_419, 0) - .saturating_add(Weight::from_parts(0, 4054)) - // Standard Error: 82 - .saturating_add(Weight::from_parts(2_146, 0).saturating_mul(b.into())) - // Standard Error: 869 - .saturating_add(Weight::from_parts(21_442, 0).saturating_mul(m.into())) - // Standard Error: 847 - .saturating_add(Weight::from_parts(144_479, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `455 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3900 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 30_953_000 picoseconds. - Weight::from_parts(31_427_489, 0) - .saturating_add(Weight::from_parts(0, 3900)) - // Standard Error: 397 - .saturating_add(Weight::from_parts(24_280, 0).saturating_mul(m.into())) - // Standard Error: 387 - .saturating_add(Weight::from_parts(116_864, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Voting (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:1 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `4074 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 41_468_000 picoseconds. - Weight::from_parts(43_538_242, 0) - .saturating_add(Weight::from_parts(0, 4074)) - // Standard Error: 80 - .saturating_add(Weight::from_parts(1_994, 0).saturating_mul(b.into())) - // Standard Error: 853 - .saturating_add(Weight::from_parts(19_637, 0).saturating_mul(m.into())) - // Standard Error: 831 - .saturating_add(Weight::from_parts(144_674, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: TechnicalCommittee Proposals (r:1 w:1) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Voting (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: TechnicalCommittee ProposalOf (r:0 w:1) - /// Proof Skipped: TechnicalCommittee ProposalOf (max_values: None, max_size: None, mode: Measured) - /// The range of component `p` is `[1, 100]`. - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `264 + p * (32 ±0)` - // Estimated: `1749 + p * (32 ±0)` - // Minimum execution time: 15_998_000 picoseconds. - Weight::from_parts(17_837_641, 0) - .saturating_add(Weight::from_parts(0, 1749)) - // Standard Error: 422 - .saturating_add(Weight::from_parts(111_526, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs new file mode 100644 index 0000000000000000000000000000000000000000..ba505737f1b070d21931c7e467b4853de1f119d2 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,195 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_conviction_voting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_conviction_voting::WeightInfo for WeightInfo<T> { + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13445` + // Estimated: `42428` + // Minimum execution time: 151_077_000 picoseconds. + Weight::from_parts(165_283_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14166` + // Estimated: `83866` + // Minimum execution time: 232_420_000 picoseconds. + Weight::from_parts(244_439_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 205_017_000 picoseconds. + Weight::from_parts(216_594_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13004` + // Estimated: `30706` + // Minimum execution time: 84_226_000 picoseconds. + Weight::from_parts(91_255_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: ConvictionVoting VotingFor (r:2 w:2) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:512 w:512) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29640 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 78_708_000 picoseconds. + Weight::from_parts(2_053_488_615, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 179_271 + .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: ConvictionVoting VotingFor (r:2 w:2) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:512 w:512) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 45_232_000 picoseconds. + Weight::from_parts(2_045_021_014, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 185_130 + .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: ConvictionVoting VotingFor (r:1 w:1) + /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) + /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) + /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12218` + // Estimated: `30706` + // Minimum execution time: 116_446_000 picoseconds. + Weight::from_parts(124_043_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_democracy.rs b/polkadot/runtime/rococo/src/weights/pallet_democracy.rs deleted file mode 100644 index 00629a5c1103cba6813566097b9590104e712347..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_democracy.rs +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_democracy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_democracy -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_democracy`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_democracy::WeightInfo for WeightInfo<T> { - /// Storage: Democracy PublicPropCount (r:1 w:1) - /// Proof: Democracy PublicPropCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:0 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - fn propose() -> Weight { - // Proof Size summary in bytes: - // Measured: `4734` - // Estimated: `18187` - // Minimum execution time: 39_492_000 picoseconds. - Weight::from_parts(39_853_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - fn second() -> Weight { - // Proof Size summary in bytes: - // Measured: `3489` - // Estimated: `6695` - // Minimum execution time: 36_683_000 picoseconds. - Weight::from_parts(37_121_000, 0) - .saturating_add(Weight::from_parts(0, 6695)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn vote_new() -> Weight { - // Proof Size summary in bytes: - // Measured: `3365` - // Estimated: `7260` - // Minimum execution time: 48_191_000 picoseconds. - Weight::from_parts(48_936_000, 0) - .saturating_add(Weight::from_parts(0, 7260)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn vote_existing() -> Weight { - // Proof Size summary in bytes: - // Measured: `3387` - // Estimated: `7260` - // Minimum execution time: 52_175_000 picoseconds. - Weight::from_parts(53_011_000, 0) - .saturating_add(Weight::from_parts(0, 7260)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Cancellations (r:1 w:1) - /// Proof: Democracy Cancellations (max_values: None, max_size: Some(33), added: 2508, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn emergency_cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `299` - // Estimated: `3666` - // Minimum execution time: 26_255_000 picoseconds. - Weight::from_parts(26_768_000, 0) - .saturating_add(Weight::from_parts(0, 3666)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:3 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:0 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - fn blacklist() -> Weight { - // Proof Size summary in bytes: - // Measured: `5843` - // Estimated: `18187` - // Minimum execution time: 96_376_000 picoseconds. - Weight::from_parts(97_222_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:0) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - fn external_propose() -> Weight { - // Proof Size summary in bytes: - // Measured: `3349` - // Estimated: `6703` - // Minimum execution time: 13_815_000 picoseconds. - Weight::from_parts(14_071_000, 0) - .saturating_add(Weight::from_parts(0, 6703)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - fn external_propose_majority() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_456_000 picoseconds. - Weight::from_parts(3_716_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:0 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - fn external_propose_default() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_610_000 picoseconds. - Weight::from_parts(3_768_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:1) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:2) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - fn fast_track() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `3518` - // Minimum execution time: 27_514_000 picoseconds. - Weight::from_parts(27_905_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: Democracy NextExternal (r:1 w:1) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy Blacklist (r:1 w:1) - /// Proof: Democracy Blacklist (max_values: None, max_size: Some(3238), added: 5713, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn veto_external() -> Weight { - // Proof Size summary in bytes: - // Measured: `3452` - // Estimated: `6703` - // Minimum execution time: 31_250_000 picoseconds. - Weight::from_parts(31_604_000, 0) - .saturating_add(Weight::from_parts(0, 6703)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy PublicProps (r:1 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy DepositOf (r:1 w:1) - /// Proof: Democracy DepositOf (max_values: None, max_size: Some(3230), added: 5705, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn cancel_proposal() -> Weight { - // Proof Size summary in bytes: - // Measured: `5754` - // Estimated: `18187` - // Minimum execution time: 79_757_000 picoseconds. - Weight::from_parts(83_603_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:0 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - fn cancel_referendum() -> Weight { - // Proof Size summary in bytes: - // Measured: `204` - // Estimated: `3518` - // Minimum execution time: 20_034_000 picoseconds. - Weight::from_parts(20_674_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn on_initialize_base(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `177 + r * (86 ±0)` - // Estimated: `1489 + r * (2676 ±0)` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(10_157_848, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 5_462 - .saturating_add(Weight::from_parts(2_710_889, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy LowestUnbaked (r:1 w:1) - /// Proof: Democracy LowestUnbaked (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumCount (r:1 w:0) - /// Proof: Democracy ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Democracy LastTabledWasExternal (r:1 w:0) - /// Proof: Democracy LastTabledWasExternal (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `177 + r * (86 ±0)` - // Estimated: `18187 + r * (2676 ±0)` - // Minimum execution time: 9_585_000 picoseconds. - Weight::from_parts(13_021_372, 0) - .saturating_add(Weight::from_parts(0, 18187)) - // Standard Error: 6_031 - .saturating_add(Weight::from_parts(2_707_449, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy VotingOf (r:3 w:3) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn delegate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `729 + r * (108 ±0)` - // Estimated: `19800 + r * (2676 ±0)` - // Minimum execution time: 41_109_000 picoseconds. - Weight::from_parts(46_477_334, 0) - .saturating_add(Weight::from_parts(0, 19800)) - // Standard Error: 9_372 - .saturating_add(Weight::from_parts(3_815_232, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy VotingOf (r:2 w:2) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Democracy ReferendumInfoOf (r:99 w:99) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn undelegate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `426 + r * (108 ±0)` - // Estimated: `13530 + r * (2676 ±0)` - // Minimum execution time: 21_283_000 picoseconds. - Weight::from_parts(23_372_139, 0) - .saturating_add(Weight::from_parts(0, 13530)) - // Standard Error: 6_191 - .saturating_add(Weight::from_parts(3_768_585, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2676).saturating_mul(r.into())) - } - /// Storage: Democracy PublicProps (r:0 w:1) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - fn clear_public_proposals() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_510_000 picoseconds. - Weight::from_parts(3_642_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn unlock_remove(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `458` - // Estimated: `7260` - // Minimum execution time: 23_647_000 picoseconds. - Weight::from_parts(36_627_552, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 2_937 - .saturating_add(Weight::from_parts(34_132, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `r` is `[0, 99]`. - fn unlock_set(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `459 + r * (22 ±0)` - // Estimated: `7260` - // Minimum execution time: 33_932_000 picoseconds. - Weight::from_parts(35_331_660, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 615 - .saturating_add(Weight::from_parts(60_730, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 100]`. - fn remove_vote(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `661 + r * (26 ±0)` - // Estimated: `7260` - // Minimum execution time: 16_605_000 picoseconds. - Weight::from_parts(19_057_092, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 873 - .saturating_add(Weight::from_parts(68_964, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:1) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy VotingOf (r:1 w:1) - /// Proof: Democracy VotingOf (max_values: None, max_size: Some(3795), added: 6270, mode: MaxEncodedLen) - /// The range of component `r` is `[1, 100]`. - fn remove_other_vote(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `661 + r * (26 ±0)` - // Estimated: `7260` - // Minimum execution time: 16_801_000 picoseconds. - Weight::from_parts(19_166_788, 0) - .saturating_add(Weight::from_parts(0, 7260)) - // Standard Error: 1_008 - .saturating_add(Weight::from_parts(69_851, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_external_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 19_207_000 picoseconds. - Weight::from_parts(19_693_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy NextExternal (r:1 w:0) - /// Proof: Democracy NextExternal (max_values: Some(1), max_size: Some(132), added: 627, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_external_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `219` - // Estimated: `3518` - // Minimum execution time: 17_333_000 picoseconds. - Weight::from_parts(17_555_000, 0) - .saturating_add(Weight::from_parts(0, 3518)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_proposal_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `4893` - // Estimated: `18187` - // Minimum execution time: 33_859_000 picoseconds. - Weight::from_parts(34_538_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy PublicProps (r:1 w:0) - /// Proof: Democracy PublicProps (max_values: Some(1), max_size: Some(16702), added: 17197, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_proposal_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `4755` - // Estimated: `18187` - // Minimum execution time: 31_155_000 picoseconds. - Weight::from_parts(31_520_000, 0) - .saturating_add(Weight::from_parts(0, 18187)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Preimage StatusFor (r:1 w:0) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:0 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn set_referendum_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `216` - // Estimated: `3556` - // Minimum execution time: 15_924_000 picoseconds. - Weight::from_parts(16_151_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Democracy ReferendumInfoOf (r:1 w:0) - /// Proof: Democracy ReferendumInfoOf (max_values: None, max_size: Some(201), added: 2676, mode: MaxEncodedLen) - /// Storage: Democracy MetadataOf (r:1 w:1) - /// Proof: Democracy MetadataOf (max_values: None, max_size: Some(53), added: 2528, mode: MaxEncodedLen) - fn clear_referendum_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3666` - // Minimum execution time: 18_983_000 picoseconds. - Weight::from_parts(19_280_000, 0) - .saturating_add(Weight::from_parts(0, 3666)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs b/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs deleted file mode 100644 index fe6aca5ab881d457ac8c40c8191f655a43261bb2..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_elections_phragmen.rs +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_elections_phragmen` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_elections_phragmen -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_elections_phragmen`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_elections_phragmen::WeightInfo for WeightInfo<T> { - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[1, 16]`. - fn vote_equal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `331 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 30_910_000 picoseconds. - Weight::from_parts(31_851_802, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 4_099 - .saturating_add(Weight::from_parts(137_675, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[2, 16]`. - fn vote_more(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `299 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 42_670_000 picoseconds. - Weight::from_parts(43_351_345, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_986 - .saturating_add(Weight::from_parts(142_231, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// The range of component `v` is `[2, 16]`. - fn vote_less(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `331 + v * (80 ±0)` - // Estimated: `4764 + v * (80 ±0)` - // Minimum execution time: 42_782_000 picoseconds. - Weight::from_parts(43_611_866, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_968 - .saturating_add(Weight::from_parts(125_939, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 80).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Voting (r:1 w:1) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - fn remove_voter() -> Weight { - // Proof Size summary in bytes: - // Measured: `853` - // Estimated: `4764` - // Minimum execution time: 44_301_000 picoseconds. - Weight::from_parts(44_843_000, 0) - .saturating_add(Weight::from_parts(0, 4764)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - fn submit_candidacy(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2678 + c * (48 ±0)` - // Estimated: `4161 + c * (48 ±0)` - // Minimum execution time: 33_576_000 picoseconds. - Weight::from_parts(26_859_487, 0) - .saturating_add(Weight::from_parts(0, 4161)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(81_887, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - fn renounce_candidacy_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `250 + c * (48 ±0)` - // Estimated: `1722 + c * (48 ±0)` - // Minimum execution time: 29_671_000 picoseconds. - Weight::from_parts(22_509_800, 0) - .saturating_add(Weight::from_parts(0, 1722)) - // Standard Error: 908 - .saturating_add(Weight::from_parts(58_320, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 48).saturating_mul(c.into())) - } - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - fn renounce_candidacy_members() -> Weight { - // Proof Size summary in bytes: - // Measured: `2952` - // Estimated: `4437` - // Minimum execution time: 45_934_000 picoseconds. - Weight::from_parts(46_279_000, 0) - .saturating_add(Weight::from_parts(0, 4437)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - fn renounce_candidacy_runners_up() -> Weight { - // Proof Size summary in bytes: - // Measured: `1647` - // Estimated: `3132` - // Minimum execution time: 30_291_000 picoseconds. - Weight::from_parts(30_611_000, 0) - .saturating_add(Weight::from_parts(0, 3132)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) - fn remove_member_without_replacement() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_000_000_000_000 picoseconds. - Weight::from_parts(2_000_000_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:1 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - fn remove_member_with_replacement() -> Weight { - // Proof Size summary in bytes: - // Measured: `2952` - // Estimated: `4437` - // Minimum execution time: 63_178_000 picoseconds. - Weight::from_parts(63_850_000, 0) - .saturating_add(Weight::from_parts(0, 4437)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: PhragmenElection Voting (r:10001 w:10000) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:0) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Candidates (r:1 w:0) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Balances Locks (r:10000 w:10000) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:10000 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:10000 w:10000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[0, 5000]`. - fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `35961 + v * (808 ±0)` - // Estimated: `39702 + v * (3774 ±0)` - // Minimum execution time: 379_638_846_000 picoseconds. - Weight::from_parts(380_443_068_000, 0) - .saturating_add(Weight::from_parts(0, 39702)) - // Standard Error: 318_371 - .saturating_add(Weight::from_parts(46_236_987, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 3774).saturating_mul(v.into())) - } - /// Storage: PhragmenElection Candidates (r:1 w:1) - /// Proof Skipped: PhragmenElection Candidates (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:1) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection RunnersUp (r:1 w:1) - /// Proof Skipped: PhragmenElection RunnersUp (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PhragmenElection Voting (r:10001 w:0) - /// Proof Skipped: PhragmenElection Voting (max_values: None, max_size: None, mode: Measured) - /// Storage: Council Proposals (r:1 w:0) - /// Proof Skipped: Council Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:962 w:962) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: PhragmenElection ElectionRounds (r:1 w:1) - /// Proof Skipped: PhragmenElection ElectionRounds (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Members (r:0 w:1) - /// Proof Skipped: Council Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Council Prime (r:0 w:1) - /// Proof Skipped: Council Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `c` is `[1, 1000]`. - /// The range of component `v` is `[1, 10000]`. - /// The range of component `e` is `[10000, 160000]`. - fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + e * (28 ±0) + v * (607 ±0)` - // Estimated: `2771509 + c * (2560 ±0) + e * (16 ±0) + v * (2744 ±4)` - // Minimum execution time: 35_941_980_000 picoseconds. - Weight::from_parts(36_032_688_000, 0) - .saturating_add(Weight::from_parts(0, 2771509)) - // Standard Error: 554_972 - .saturating_add(Weight::from_parts(43_733_923, 0).saturating_mul(v.into())) - // Standard Error: 35_614 - .saturating_add(Weight::from_parts(2_430_249, 0).saturating_mul(e.into())) - .saturating_add(T::DbWeight::get().reads(265)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2560).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 16).saturating_mul(e.into())) - .saturating_add(Weight::from_parts(0, 2744).saturating_mul(v.into())) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_membership.rs b/polkadot/runtime/rococo/src/weights/pallet_membership.rs deleted file mode 100644 index 4486c7a270c47e790f51f76148b7592a15ff0fbc..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_membership.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_membership` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_membership -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_membership`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_membership::WeightInfo for WeightInfo<T> { - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 99]`. - fn add_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `140 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 17_084_000 picoseconds. - Weight::from_parts(17_897_754, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 295 - .saturating_add(Weight::from_parts(30_882, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[2, 100]`. - fn remove_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_550_000 picoseconds. - Weight::from_parts(20_467_978, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 330 - .saturating_add(Weight::from_parts(31_881, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[2, 100]`. - fn swap_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 19_994_000 picoseconds. - Weight::from_parts(20_663_824, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 337 - .saturating_add(Weight::from_parts(44_806, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:0) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 18_978_000 picoseconds. - Weight::from_parts(21_273_577, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 2_765 - .saturating_add(Weight::from_parts(152_082, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:1) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Proposals (r:1 w:0) - /// Proof Skipped: TechnicalCommittee Proposals (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalMembership Prime (r:1 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Members (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn change_key(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `244 + m * (64 ±0)` - // Estimated: `4687 + m * (64 ±0)` - // Minimum execution time: 20_005_000 picoseconds. - Weight::from_parts(21_280_089, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 672 - .saturating_add(Weight::from_parts(41_961, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Members (r:1 w:0) - /// Proof: TechnicalMembership Members (max_values: Some(1), max_size: Some(3202), added: 3697, mode: MaxEncodedLen) - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn set_prime(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` - // Estimated: `4687 + m * (32 ±0)` - // Minimum execution time: 8_168_000 picoseconds. - Weight::from_parts(8_579_141, 0) - .saturating_add(Weight::from_parts(0, 4687)) - // Standard Error: 215 - .saturating_add(Weight::from_parts(9_557, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: TechnicalMembership Prime (r:0 w:1) - /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: TechnicalCommittee Prime (r:0 w:1) - /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_344_000 picoseconds. - Weight::from_parts(3_551_700, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 86 - .saturating_add(Weight::from_parts(832, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs new file mode 100644 index 0000000000000000000000000000000000000000..8a556c3a248ef46fdb2b5b5449c4b0c75972a9fd --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -0,0 +1,175 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_ranked_collective +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_ranked_collective::WeightInfo for WeightInfo<T> { + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 17_632_000 picoseconds. + Weight::from_parts(18_252_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `517 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 27_960_000 picoseconds. + Weight::from_parts(30_632_408, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 22_806 + .saturating_add(Weight::from_parts(13_000_901, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `214 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 19_900_000 picoseconds. + Weight::from_parts(20_908_316, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 4_878 + .saturating_add(Weight::from_parts(330_385, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `532 + r * (72 ±0)` + // Estimated: `3519` + // Minimum execution time: 27_697_000 picoseconds. + Weight::from_parts(30_341_815, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 17_010 + .saturating_add(Weight::from_parts(642_213, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:1 w:1) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `638` + // Estimated: `83866` + // Minimum execution time: 48_275_000 picoseconds. + Weight::from_parts(49_326_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) + /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:100 w:100) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `434 + n * (50 ±0)` + // Estimated: `4365 + n * (2540 ±0)` + // Minimum execution time: 15_506_000 picoseconds. + Weight::from_parts(17_634_029, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 2_117 + .saturating_add(Weight::from_parts(1_126_879, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2540).saturating_mul(n.into())) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..96f172230e13f5aed92b47338b2387f4db79fa27 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,524 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_referenda +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_referenda::WeightInfo for WeightInfo<T> { + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `327` + // Estimated: `42428` + // Minimum execution time: 29_909_000 picoseconds. + Weight::from_parts(30_645_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `438` + // Estimated: `83866` + // Minimum execution time: 54_405_000 picoseconds. + Weight::from_parts(55_583_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2076` + // Estimated: `42428` + // Minimum execution time: 110_477_000 picoseconds. + Weight::from_parts(119_187_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2117` + // Estimated: `42428` + // Minimum execution time: 111_467_000 picoseconds. + Weight::from_parts(117_758_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `83866` + // Minimum execution time: 191_135_000 picoseconds. + Weight::from_parts(210_535_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `639` + // Estimated: `83866` + // Minimum execution time: 67_168_000 picoseconds. + Weight::from_parts(68_895_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `4365` + // Minimum execution time: 31_298_000 picoseconds. + Weight::from_parts(32_570_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `4365` + // Minimum execution time: 15_674_000 picoseconds. + Weight::from_parts(16_190_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `83866` + // Minimum execution time: 38_927_000 picoseconds. + Weight::from_parts(40_545_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `83866` + // Minimum execution time: 80_209_000 picoseconds. + Weight::from_parts(82_084_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `4277` + // Minimum execution time: 9_520_000 picoseconds. + Weight::from_parts(10_088_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2376` + // Estimated: `42428` + // Minimum execution time: 93_893_000 picoseconds. + Weight::from_parts(101_065_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2362` + // Estimated: `42428` + // Minimum execution time: 98_811_000 picoseconds. + Weight::from_parts(103_590_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1841` + // Estimated: `4365` + // Minimum execution time: 43_230_000 picoseconds. + Weight::from_parts(46_120_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1808` + // Estimated: `4365` + // Minimum execution time: 43_092_000 picoseconds. + Weight::from_parts(46_018_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1824` + // Estimated: `4365` + // Minimum execution time: 49_697_000 picoseconds. + Weight::from_parts(53_795_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1865` + // Estimated: `4365` + // Minimum execution time: 50_417_000 picoseconds. + Weight::from_parts(53_214_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `42428` + // Minimum execution time: 25_688_000 picoseconds. + Weight::from_parts(26_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 26_230_000 picoseconds. + Weight::from_parts(27_235_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4365` + // Minimum execution time: 17_585_000 picoseconds. + Weight::from_parts(18_225_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `584` + // Estimated: `42428` + // Minimum execution time: 38_243_000 picoseconds. + Weight::from_parts(39_959_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `719` + // Estimated: `42428` + // Minimum execution time: 88_424_000 picoseconds. + Weight::from_parts(92_969_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 138_207_000 picoseconds. + Weight::from_parts(151_726_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `755` + // Estimated: `42428` + // Minimum execution time: 131_001_000 picoseconds. + Weight::from_parts(148_651_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 109_612_000 picoseconds. + Weight::from_parts(143_626_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `42428` + // Minimum execution time: 71_754_000 picoseconds. + Weight::from_parts(77_329_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `83866` + // Minimum execution time: 153_244_000 picoseconds. + Weight::from_parts(169_961_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `772` + // Estimated: `42428` + // Minimum execution time: 137_997_000 picoseconds. + Weight::from_parts(157_862_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `458` + // Estimated: `4365` + // Minimum execution time: 21_794_000 picoseconds. + Weight::from_parts(22_341_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `319` + // Estimated: `4365` + // Minimum execution time: 18_458_000 picoseconds. + Weight::from_parts(19_097_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7cc5df28b91cc2cc36cade14784b41d7d34ff71 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -0,0 +1,522 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_referenda +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_referenda::WeightInfo for WeightInfo<T> { + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `324` + // Estimated: `42428` + // Minimum execution time: 39_852_000 picoseconds. + Weight::from_parts(41_610_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 52_588_000 picoseconds. + Weight::from_parts(54_154_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3334` + // Estimated: `42428` + // Minimum execution time: 70_483_000 picoseconds. + Weight::from_parts(72_731_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3354` + // Estimated: `42428` + // Minimum execution time: 68_099_000 picoseconds. + Weight::from_parts(71_560_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 64_357_000 picoseconds. + Weight::from_parts(66_081_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `577` + // Estimated: `83866` + // Minimum execution time: 62_709_000 picoseconds. + Weight::from_parts(64_534_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `417` + // Estimated: `4401` + // Minimum execution time: 31_296_000 picoseconds. + Weight::from_parts(32_221_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `4401` + // Minimum execution time: 31_209_000 picoseconds. + Weight::from_parts(32_168_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `83866` + // Minimum execution time: 38_887_000 picoseconds. + Weight::from_parts(40_193_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `726` + // Estimated: `83866` + // Minimum execution time: 106_054_000 picoseconds. + Weight::from_parts(108_318_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `240` + // Estimated: `5477` + // Minimum execution time: 9_263_000 picoseconds. + Weight::from_parts(9_763_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3254` + // Estimated: `42428` + // Minimum execution time: 50_080_000 picoseconds. + Weight::from_parts(51_858_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3254` + // Estimated: `42428` + // Minimum execution time: 53_889_000 picoseconds. + Weight::from_parts(55_959_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `3077` + // Estimated: `5477` + // Minimum execution time: 23_266_000 picoseconds. + Weight::from_parts(24_624_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `3077` + // Estimated: `5477` + // Minimum execution time: 22_846_000 picoseconds. + Weight::from_parts(24_793_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3081` + // Estimated: `5477` + // Minimum execution time: 28_284_000 picoseconds. + Weight::from_parts(29_940_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3101` + // Estimated: `5477` + // Minimum execution time: 28_133_000 picoseconds. + Weight::from_parts(29_638_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `437` + // Estimated: `42428` + // Minimum execution time: 25_710_000 picoseconds. + Weight::from_parts(26_500_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 25_935_000 picoseconds. + Weight::from_parts(26_803_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `344` + // Estimated: `4401` + // Minimum execution time: 17_390_000 picoseconds. + Weight::from_parts(18_042_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 35_141_000 picoseconds. + Weight::from_parts(36_318_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `485` + // Estimated: `42428` + // Minimum execution time: 37_815_000 picoseconds. + Weight::from_parts(39_243_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 30_779_000 picoseconds. + Weight::from_parts(31_845_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `521` + // Estimated: `42428` + // Minimum execution time: 31_908_000 picoseconds. + Weight::from_parts(33_253_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 28_951_000 picoseconds. + Weight::from_parts(30_004_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `542` + // Estimated: `42428` + // Minimum execution time: 27_750_000 picoseconds. + Weight::from_parts(28_588_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `542` + // Estimated: `83866` + // Minimum execution time: 43_950_000 picoseconds. + Weight::from_parts(46_164_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `42428` + // Minimum execution time: 31_050_000 picoseconds. + Weight::from_parts(32_169_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `560` + // Estimated: `4401` + // Minimum execution time: 21_193_000 picoseconds. + Weight::from_parts(22_116_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `421` + // Estimated: `4401` + // Minimum execution time: 18_065_000 picoseconds. + Weight::from_parts(18_781_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_tips.rs b/polkadot/runtime/rococo/src/weights/pallet_tips.rs deleted file mode 100644 index c4710afd78e2bf2fa6b0501140845681295eee0b..0000000000000000000000000000000000000000 --- a/polkadot/runtime/rococo/src/weights/pallet_tips.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. - -//! Autogenerated weights for `pallet_tips` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_tips -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_tips`. -pub struct WeightInfo<T>(PhantomData<T>); -impl<T: frame_system::Config> pallet_tips::WeightInfo for WeightInfo<T> { - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 16384]`. - fn report_awesome(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `3469` - // Minimum execution time: 27_741_000 picoseconds. - Weight::from_parts(28_495_173, 0) - .saturating_add(Weight::from_parts(0, 3469)) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_433, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - fn retract_tip() -> Weight { - // Proof Size summary in bytes: - // Measured: `221` - // Estimated: `3686` - // Minimum execution time: 27_275_000 picoseconds. - Weight::from_parts(27_649_000, 0) - .saturating_add(Weight::from_parts(0, 3686)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:1 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Tips (r:0 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `r` is `[0, 16384]`. - /// The range of component `t` is `[1, 19]`. - fn tip_new(r: u32, t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `74 + t * (64 ±0)` - // Estimated: `3539 + t * (64 ±0)` - // Minimum execution time: 19_809_000 picoseconds. - Weight::from_parts(18_182_607, 0) - .saturating_add(Weight::from_parts(0, 3539)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_303, 0).saturating_mul(r.into())) - // Standard Error: 5_156 - .saturating_add(Weight::from_parts(151_789, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(t.into())) - } - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `295 + t * (112 ±0)` - // Estimated: `3760 + t * (112 ±0)` - // Minimum execution time: 15_528_000 picoseconds. - Weight::from_parts(15_717_755, 0) - .saturating_add(Weight::from_parts(0, 3760)) - // Standard Error: 6_569 - .saturating_add(Weight::from_parts(146_426, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: PhragmenElection Members (r:1 w:0) - /// Proof Skipped: PhragmenElection Members (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn close_tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + t * (112 ±0)` - // Estimated: `3790 + t * (112 ±0)` - // Minimum execution time: 58_304_000 picoseconds. - Weight::from_parts(60_138_785, 0) - .saturating_add(Weight::from_parts(0, 3790)) - // Standard Error: 7_636 - .saturating_add(Weight::from_parts(86_665, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 112).saturating_mul(t.into())) - } - /// Storage: Tips Tips (r:1 w:1) - /// Proof Skipped: Tips Tips (max_values: None, max_size: None, mode: Measured) - /// Storage: Tips Reasons (r:0 w:1) - /// Proof Skipped: Tips Reasons (max_values: None, max_size: None, mode: Measured) - /// The range of component `t` is `[1, 19]`. - fn slash_tip(t: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `269` - // Estimated: `3734` - // Minimum execution time: 15_097_000 picoseconds. - Weight::from_parts(15_497_872, 0) - .saturating_add(Weight::from_parts(0, 3734)) - // Standard Error: 785 - .saturating_add(Weight::from_parts(18_377, 0).saturating_mul(t.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs new file mode 100644 index 0000000000000000000000000000000000000000..7c307deec4c6f8480019b5559117cfc47ee84b40 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_whitelist +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_whitelist::WeightInfo for WeightInfo<T> { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `3556` + // Minimum execution time: 20_035_000 picoseconds. + Weight::from_parts(20_452_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `3556` + // Minimum execution time: 20_247_000 picoseconds. + Weight::from_parts(20_808_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `428 + n * (1 ±0)` + // Estimated: `3892 + n * (1 ±0)` + // Minimum execution time: 32_633_000 picoseconds. + Weight::from_parts(32_855_000, 0) + .saturating_add(Weight::from_parts(0, 3892)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `3556` + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_698_994, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 445cb8014357d98584cac2486fbf3717ace74f8b..b84d2335a6990e7c4e92093c5cff68e4f1c266b3 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -17,9 +17,12 @@ //! XCM configuration for Rococo. use super::{ - parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, ParaId, Runtime, + parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, Fellows, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmPallet, }; + +use crate::governance::StakingAdmin; + use frame_support::{ match_types, parameter_types, traits::{Everything, Nothing}, @@ -38,9 +41,9 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, DescribeFamily, FixedWeightBounds, HashedDescription, IsChildSystemParachain, IsConcrete, - MintLocation, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, - TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, - WithUniqueTopic, + MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::XcmExecutor; @@ -110,7 +113,7 @@ pub type XcmRouter = WithUniqueTopic<( parameter_types! { pub const Roc: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); - pub const Rockmine: MultiLocation = Parachain(1000).into_location(); + pub const AssetHub: MultiLocation = Parachain(1000).into_location(); pub const Contracts: MultiLocation = Parachain(1002).into_location(); pub const Encointer: MultiLocation = Parachain(1003).into_location(); pub const Tick: MultiLocation = Parachain(100).into_location(); @@ -119,7 +122,7 @@ parameter_types! { pub const RocForTick: (MultiAssetFilter, MultiLocation) = (Roc::get(), Tick::get()); pub const RocForTrick: (MultiAssetFilter, MultiLocation) = (Roc::get(), Trick::get()); pub const RocForTrack: (MultiAssetFilter, MultiLocation) = (Roc::get(), Track::get()); - pub const RocForRockmine: (MultiAssetFilter, MultiLocation) = (Roc::get(), Rockmine::get()); + pub const RocForAssetHub: (MultiAssetFilter, MultiLocation) = (Roc::get(), AssetHub::get()); pub const RocForContracts: (MultiAssetFilter, MultiLocation) = (Roc::get(), Contracts::get()); pub const RocForEncointer: (MultiAssetFilter, MultiLocation) = (Roc::get(), Encointer::get()); pub const MaxInstructions: u32 = 100; @@ -129,7 +132,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case<RocForTick>, xcm_builder::Case<RocForTrick>, xcm_builder::Case<RocForTrack>, - xcm_builder::Case<RocForRockmine>, + xcm_builder::Case<RocForAssetHub>, xcm_builder::Case<RocForContracts>, xcm_builder::Case<RocForEncointer>, ); @@ -193,6 +196,14 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } +parameter_types! { + pub const CollectiveBodyId: BodyId = BodyId::Unit; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + #[cfg(feature = "runtime-benchmarks")] parameter_types! { pub ReachableDest: Option<MultiLocation> = Some(Parachain(1000).into()); @@ -201,12 +212,33 @@ parameter_types! { /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( - // A usual Signed origin to be used in XCM as a corresponding AccountId32 + // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32<RuntimeOrigin, AccountId, ThisNetwork>, ); + +/// Type to convert the `StakingAdmin` origin to a Plurality `MultiLocation` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice<RuntimeOrigin, StakingAdmin, StakingAdminBodyId>; + +/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. +pub type FellowsToPlurality = OriginToPluralityVoice<RuntimeOrigin, Fellows, FellowsBodyId>; + +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + StakingAdminToPlurality, + // Fellows origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + FellowsToPlurality, +); + impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>; + // We only allow the root, fellows and the staking admin to send messages. + // This is basically safe to enable for everyone (safe the possibility of someone spamming the + // parachain if they're willing to pay the KSM to send from the Relay-chain), but it's useless + // until we bring in XCM v3 which will make `DescendOrigin` a bit more useful. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalPalletOriginToLocation>; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally. type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>; diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index a8f26eb5b3d951a09c72a9b40cb78599a24299b6..2e9c773a3f8cf17da29e6e52ea1efb15da147d85 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -28,6 +28,7 @@ sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-version = { path = "../../../substrate/primitives/version", default-features = false } @@ -124,6 +125,7 @@ std = [ "serde_derive", "sp-api/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 94852ad39f5aae110931c701793d104430ffc199..99fd2198400bf821f406345ac5daabe13d4b009a 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -30,7 +30,7 @@ use polkadot_runtime_parachains::{ disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as runtime_impl, + paras_inherent as parachains_paras_inherent, runtime_api_impl::v7 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -42,7 +42,9 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; @@ -1137,4 +1139,14 @@ sp_api::impl_runtime_apis! { Timestamp::now() } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index f4bb66f68cdfee559da0f7825a019e764dc2fde4..de9d6666059183f2f182cb94cd97e86f7ebe34a7 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -24,7 +24,9 @@ inherents = { package = "sp-inherents", path = "../../../substrate/primitives/in offchain-primitives = { package = "sp-offchain", path = "../../../substrate/primitives/offchain", default-features = false } sp-api = { path = "../../../substrate/primitives/api", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } +sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-mmr-primitives = { path = "../../../substrate/primitives/merkle-mountain-range", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } @@ -64,10 +66,12 @@ pallet-message-queue = { path = "../../../substrate/frame/message-queue", defaul pallet-mmr = { path = "../../../substrate/frame/merkle-mountain-range", default-features = false } pallet-multisig = { path = "../../../substrate/frame/multisig", default-features = false } pallet-nomination-pools = { path = "../../../substrate/frame/nomination-pools", default-features = false } +pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-offences = { path = "../../../substrate/frame/offences", default-features = false } pallet-preimage = { path = "../../../substrate/frame/preimage", default-features = false } pallet-proxy = { path = "../../../substrate/frame/proxy", default-features = false } pallet-recovery = { path = "../../../substrate/frame/recovery", default-features = false } +pallet-referenda = { path = "../../../substrate/frame/referenda", default-features = false } pallet-scheduler = { path = "../../../substrate/frame/scheduler", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } pallet-society = { path = "../../../substrate/frame/society", default-features = false } @@ -83,6 +87,7 @@ pallet-nomination-pools-runtime-api = { path = "../../../substrate/frame/nominat pallet-treasury = { path = "../../../substrate/frame/treasury", default-features = false } pallet-utility = { path = "../../../substrate/frame/utility", default-features = false } pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } @@ -146,6 +151,7 @@ std = [ "pallet-beefy-mmr/std", "pallet-beefy/std", "pallet-collective/std", + "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-election-provider-multi-phase/std", "pallet-election-provider-support-benchmarking?/std", @@ -167,6 +173,7 @@ std = [ "pallet-preimage/std", "pallet-proxy/std", "pallet-recovery/std", + "pallet-referenda/std", "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", @@ -181,6 +188,7 @@ std = [ "pallet-treasury/std", "pallet-utility/std", "pallet-vesting/std", + "pallet-whitelist/std", "pallet-xcm-benchmarks?/std", "pallet-xcm/std", "parity-scale-codec/std", @@ -194,7 +202,9 @@ std = [ "serde_derive", "sp-api/std", "sp-application-crypto/std", + "sp-arithmetic/std", "sp-core/std", + "sp-genesis-builder/std", "sp-io/std", "sp-mmr-primitives/std", "sp-npos-elections/std", @@ -222,6 +232,7 @@ runtime-benchmarks = [ "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", @@ -242,6 +253,7 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", "pallet-society/runtime-benchmarks", @@ -252,6 +264,7 @@ runtime-benchmarks = [ "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", @@ -278,6 +291,7 @@ try-runtime = [ "pallet-beefy-mmr/try-runtime", "pallet-beefy/try-runtime", "pallet-collective/try-runtime", + "pallet-conviction-voting/try-runtime", "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", @@ -295,6 +309,7 @@ try-runtime = [ "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", "pallet-recovery/try-runtime", + "pallet-referenda/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", "pallet-society/try-runtime", @@ -306,6 +321,7 @@ try-runtime = [ "pallet-treasury/try-runtime", "pallet-utility/try-runtime", "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", "pallet-xcm/try-runtime", "runtime-common/try-runtime", "runtime-parachains/try-runtime", diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index f9830dab3325b62b2a9003498911e79e7778e0d3..0dd64d092c34512dc963499ab418201d253fa167 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -96,6 +96,26 @@ pub mod fee { } } +/// XCM protocol related constants. +pub mod xcm { + /// Pluralistic bodies existing within the consensus. + pub mod body { + // Preallocated for the Root body. + #[allow(dead_code)] + const ROOT_INDEX: u32 = 0; + // The bodies corresponding to the Polkadot OpenGov Origins. + pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + } +} + +/// System Parachains. +pub mod system_parachain { + /// Statemint parachain ID. + pub const ASSET_HUB_ID: u32 = 1000; + /// Collectives parachain ID. + pub const COLLECTIVES_ID: u32 = 1001; +} + #[cfg(test)] mod tests { use super::{ diff --git a/polkadot/runtime/westend/src/governance/mod.rs b/polkadot/runtime/westend/src/governance/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d027f788d71f6bf255b71157bfe21ba1fdc16f5a --- /dev/null +++ b/polkadot/runtime/westend/src/governance/mod.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! New governance configurations for the Kusama runtime. + +use super::*; +use crate::xcm_config::Collectives; +use frame_support::{parameter_types, traits::EitherOf}; +use frame_system::EnsureRootWithSuccess; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use xcm::latest::BodyId; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, + ReferendumCanceller, ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo<Self>; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf<Balances, Self::AccountId>; + type Polls = Referenda; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +parameter_types! { + pub const MaxBalance: Balance = Balance::max_value(); +} +pub type TreasurySpender = EitherOf<EnsureRootWithSuccess<AccountId, MaxBalance>, Spender>; + +impl origins::pallet_custom_origins::Config for Runtime {} + +parameter_types! { + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo<Self>; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = EitherOfDiverse< + EnsureRoot<Self::AccountId>, + EnsureXcm<IsVoiceOfBody<Collectives, FellowsBodyId>>, + >; + type DispatchWhitelistedOrigin = EitherOf<EnsureRoot<Self::AccountId>, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_referenda::WeightInfo<Self>; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned<AccountId>; + type CancelOrigin = EitherOf<EnsureRoot<AccountId>, ReferendumCanceller>; + type KillOrigin = EitherOf<EnsureRoot<AccountId>, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf<Runtime>; + type Tally = pallet_conviction_voting::TallyOf<Runtime>; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; +} diff --git a/polkadot/runtime/westend/src/governance/origins.rs b/polkadot/runtime/westend/src/governance/origins.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4639f40dc432473e1222f6ab1094a31ecd26f68 --- /dev/null +++ b/polkadot/runtime/westend/src/governance/origins.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet<T>(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl<O: Into<Result<Origin, O>> + From<Origin>> + EnsureOrigin<O> for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result<Self::Success, O> { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result<O, ()> { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin<Success = $success_type:ty> { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl<O: Into<Result<Origin, O>> + From<Origin>> + EnsureOrigin<O> for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result<Self::Success, O> { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result<O, ()> { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result<O, ()> = Err(()); + $( + let _result: Result<O, ()> = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin<Success = Balance> { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin<Success = u16> { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/polkadot/runtime/westend/src/governance/tracks.rs b/polkadot/runtime/westend/src/governance/tracks.rs new file mode 100644 index 0000000000000000000000000000000000000000..3765569f183e0414a10fe2852e528ccc9dedc3d7 --- /dev/null +++ b/polkadot/runtime/westend/src/governance/tracks.rs @@ -0,0 +1,320 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Track configurations for governance. + +use super::*; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [(u16, pallet_referenda::TrackInfo<Balance, BlockNumber>); 15] = [ + ( + 0, + pallet_referenda::TrackInfo { + name: "root", + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + ), + ( + 1, + pallet_referenda::TrackInfo { + name: "whitelisted_caller", + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + ), + ( + 10, + pallet_referenda::TrackInfo { + name: "staking_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + ), + ( + 11, + pallet_referenda::TrackInfo { + name: "treasurer", + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + ), + ( + 12, + pallet_referenda::TrackInfo { + name: "lease_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + ), + ( + 13, + pallet_referenda::TrackInfo { + name: "fellowship_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + ), + ( + 14, + pallet_referenda::TrackInfo { + name: "general_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + ), + ( + 15, + pallet_referenda::TrackInfo { + name: "auction_admin", + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + ), + ( + 20, + pallet_referenda::TrackInfo { + name: "referendum_canceller", + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + ), + ( + 21, + pallet_referenda::TrackInfo { + name: "referendum_killer", + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + ), + ( + 30, + pallet_referenda::TrackInfo { + name: "small_tipper", + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + ), + ( + 31, + pallet_referenda::TrackInfo { + name: "big_tipper", + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + ), + ( + 32, + pallet_referenda::TrackInfo { + name: "small_spender", + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + ), + ( + 33, + pallet_referenda::TrackInfo { + name: "medium_spender", + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + ), + ( + 34, + pallet_referenda::TrackInfo { + name: "big_spender", + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + ), +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo<Balance, BlockNumber> for TracksInfo { + type Id = u16; + type RuntimeOrigin = <RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo<Balance, BlockNumber>)] { + &TRACKS_DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result<Self::Id, ()> { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 9af18b5be2bbcb4300796eccc4a57afa8814110b..b1231a5d95fd002c7ccebe7007b2ff84d678e291 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -17,7 +17,7 @@ //! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 512. +// `construct_runtime!` does a lot of recursion and requires us to increase the limit. #![recursion_limit = "512"] use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; @@ -27,11 +27,13 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, Contains, EverythingBut, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, - WithdrawReasons, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, + ProcessMessageError, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -63,9 +65,7 @@ use runtime_parachains::{ inclusion::{AggregateMessageOrigin, UmpQueueId}, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, - runtime_api_impl::{ - v5 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, - }, + runtime_api_impl::v7 as parachains_runtime_api_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -80,7 +80,7 @@ use sp_runtime::{ Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -107,6 +107,13 @@ mod bag_thresholds; mod weights; pub mod xcm_config; +// Governance and configurations. +pub mod governance; +use governance::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, StakingAdmin, + Treasurer, TreasurySpender, +}; + #[cfg(test)] mod tests; @@ -122,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 9430, + spec_version: 10020, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, @@ -197,7 +204,9 @@ impl pallet_scheduler::Config for Runtime { type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = EnsureRoot<AccountId>; + // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of + // OpenGov to schedule periodic auctions. + type ScheduleOrigin = EitherOf<EnsureRoot<AccountId>, AuctionAdmin>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo<Runtime>; type OriginPrivilegeCmp = frame_support::traits::EqualPrivilegeOnly; @@ -683,7 +692,40 @@ impl pallet_fast_unstake::Config for Runtime { } parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: Balance = 2000 * CENTS; + pub const ProposalBondMaximum: Balance = 1 * GRAND; + pub const SpendPeriod: BlockNumber = 6 * DAYS; + pub const Burn: Permill = Permill::from_perthousand(2); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + + pub const TipCountdown: BlockNumber = 1 * DAYS; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: Balance = 100 * CENTS; + pub const DataDepositPerByte: Balance = 1 * CENTS; + pub const MaxApprovals: u32 = 100; pub const MaxAuthorities: u32 = 100_000; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; +} + +impl pallet_treasury::Config for Runtime { + type PalletId = TreasuryPalletId; + type Currency = Balances; + type ApproveOrigin = EitherOfDiverse<EnsureRoot<AccountId>, Treasurer>; + type RejectOrigin = EitherOfDiverse<EnsureRoot<AccountId>, Treasurer>; + type RuntimeEvent = RuntimeEvent; + type OnSlash = Treasury; + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type ProposalBondMaximum = ProposalBondMaximum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); + type MaxApprovals = MaxApprovals; + type WeightInfo = weights::pallet_treasury::WeightInfo<Runtime>; + type SpendFunds = (); + type SpendOrigin = TreasurySpender; } impl pallet_offences::Config for Runtime { @@ -699,8 +741,6 @@ impl pallet_authority_discovery::Config for Runtime { parameter_types! { pub const NposSolutionPriority: TransactionPriority = TransactionPriority::max_value() / 2; pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); - pub const MaxKeys: u32 = 10_000; - pub const MaxPeerInHeartbeats: u32 = 10_000; } impl pallet_im_online::Config for Runtime { @@ -814,8 +854,8 @@ impl pallet_identity::Config for Runtime { type MaxSubAccounts = MaxSubAccounts; type MaxAdditionalFields = MaxAdditionalFields; type MaxRegistrars = MaxRegistrars; - type RegistrarOrigin = frame_system::EnsureRoot<AccountId>; - type ForceOrigin = frame_system::EnsureRoot<AccountId>; + type ForceOrigin = EitherOf<EnsureRoot<Self::AccountId>, GeneralAdmin>; + type RegistrarOrigin = EitherOf<EnsureRoot<Self::AccountId>, GeneralAdmin>; type WeightInfo = weights::pallet_identity::WeightInfo<Runtime>; } @@ -912,6 +952,7 @@ parameter_types! { pub enum ProxyType { Any, NonTransfer, + Governance, Staking, SudoBalances, IdentityJudgement, @@ -944,6 +985,9 @@ impl InstanceFilter<RuntimeCall> for ProxyType { RuntimeCall::ImOnline(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) | RuntimeCall::Recovery(pallet_recovery::Call::as_recovered{..}) | RuntimeCall::Recovery(pallet_recovery::Call::vouch_recovery{..}) | RuntimeCall::Recovery(pallet_recovery::Call::claim_recovery{..}) | @@ -989,6 +1033,13 @@ impl InstanceFilter<RuntimeCall> for ProxyType { RuntimeCall::Utility(..) => true, _ => false, }, + ProxyType::Governance => matches!( + c, + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) + ), ProxyType::IdentityJudgement => matches!( c, RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | @@ -1184,7 +1235,7 @@ impl parachains_slashing::Config for Runtime { parameter_types! { pub const ParaDeposit: Balance = 2000 * CENTS; - pub const DataDepositPerByte: Balance = deposit(0, 1); + pub const RegistrarDataDepositPerByte: Balance = deposit(0, 1); } impl paras_registrar::Config for Runtime { @@ -1193,7 +1244,7 @@ impl paras_registrar::Config for Runtime { type Currency = Balances; type OnSwap = (Crowdloan, Slots); type ParaDeposit = ParaDeposit; - type DataDepositPerByte = DataDepositPerByte; + type DataDepositPerByte = RegistrarDataDepositPerByte; type WeightInfo = weights::runtime_common_paras_registrar::WeightInfo<Runtime>; } @@ -1207,7 +1258,7 @@ impl slots::Config for Runtime { type Registrar = Registrar; type LeasePeriod = LeasePeriod; type LeaseOffset = (); - type ForceOrigin = EnsureRoot<AccountId>; + type ForceOrigin = EitherOf<EnsureRoot<Self::AccountId>, LeaseAdmin>; type WeightInfo = weights::runtime_common_slots::WeightInfo<Runtime>; } @@ -1247,7 +1298,7 @@ impl auctions::Config for Runtime { type EndingPeriod = EndingPeriod; type SampleLength = SampleLength; type Randomness = pallet_babe::RandomnessFromOneEpochAgo<Runtime>; - type InitiateOrigin = EnsureRoot<AccountId>; + type InitiateOrigin = EitherOf<EnsureRoot<Self::AccountId>, AuctionAdmin>; type WeightInfo = weights::runtime_common_auctions::WeightInfo<Runtime>; } @@ -1352,6 +1403,15 @@ construct_runtime! { // Fast unstake pallet: extension to staking. FastUnstake: pallet_fast_unstake = 30, + // OpenGov + ConvictionVoting: pallet_conviction_voting::{Pallet, Call, Storage, Event<T>} = 31, + Referenda: pallet_referenda::{Pallet, Call, Storage, Event<T>} = 32, + Origins: pallet_custom_origins::{Origin} = 35, + Whitelist: pallet_whitelist::{Pallet, Call, Storage, Event<T>} = 36, + + // Treasury + Treasury: pallet_treasury::{Pallet, Call, Storage, Config<T>, Event<T>} = 37, + // Parachains pallets. Start indices at 40 to leave room. ParachainsOrigin: parachains_origin::{Pallet, Origin} = 41, Configuration: parachains_configuration::{Pallet, Call, Storage, Config<T>} = 42, @@ -1445,6 +1505,7 @@ pub mod migrations { UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9<Runtime>, paras_registrar::migration::VersionCheckedMigrateToV1<Runtime, ()>, + pallet_referenda::migration::v1::MigrateV0ToV1<Runtime, ()>, ); } @@ -1485,6 +1546,7 @@ mod benches { // Substrate [pallet_bags_list, VoterList] [pallet_balances, Balances] + [pallet_conviction_voting, ConvictionVoting] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [frame_election_provider_support, ElectionProviderBench::<Runtime>] [pallet_fast_unstake, FastUnstake] @@ -1498,14 +1560,17 @@ mod benches { [pallet_preimage, Preimage] [pallet_proxy, Proxy] [pallet_recovery, Recovery] + [pallet_referenda, Referenda] [pallet_scheduler, Scheduler] [pallet_session, SessionBench::<Runtime>] [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::<Runtime>] [pallet_timestamp, Timestamp] + [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] // XCM [pallet_xcm, XcmPallet] // NOTE: Make sure you point to the individual modules below. @@ -1580,7 +1645,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(6)] + #[api_version(7)] impl primitives::runtime_api::ParachainHost<Block, Hash, BlockNumber> for Runtime { fn validators() -> Vec<ValidatorId> { parachains_runtime_api_impl::validators::<Runtime>() @@ -1713,7 +1778,15 @@ sp_api::impl_runtime_apis! { } fn minimum_backing_votes() -> u32 { - parachains_staging_runtime_api_impl::minimum_backing_votes::<Runtime>() + parachains_runtime_api_impl::minimum_backing_votes::<Runtime>() + } + + fn para_backing_state(para_id: ParaId) -> Option<primitives::async_backing::BackingState> { + parachains_runtime_api_impl::backing_state::<Runtime>(para_id) + } + + fn async_backing_params() -> primitives::AsyncBackingParams { + parachains_runtime_api_impl::async_backing_params::<Runtime>() } } @@ -2051,13 +2124,13 @@ sp_api::impl_runtime_apis! { AssetId::*, Fungibility::*, InteriorMultiLocation, Junction, Junctions::*, MultiAsset, MultiAssets, MultiLocation, NetworkId, Response, }; - use xcm_config::{Westmint, TokenLocation}; + use xcm_config::{AssetHub, TokenLocation}; impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationConverter; fn valid_destination() -> Result<MultiLocation, BenchmarkError> { - Ok(Westmint::get()) + Ok(AssetHub::get()) } fn worst_case_holding(_depositable_count: u32) -> MultiAssets { // Westend only knows about WND. @@ -2070,7 +2143,7 @@ sp_api::impl_runtime_apis! { parameter_types! { pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - Westmint::get(), + AssetHub::get(), MultiAsset { fun: Fungible(1 * UNITS), id: Concrete(TokenLocation::get()) }, )); pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; @@ -2109,15 +2182,15 @@ sp_api::impl_runtime_apis! { } fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((Westmint::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + Ok((AssetHub::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) } fn subscribe_origin() -> Result<MultiLocation, BenchmarkError> { - Ok(Westmint::get()) + Ok(AssetHub::get()) } fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = Westmint::get(); + let origin = AssetHub::get(); let assets: MultiAssets = (Concrete(TokenLocation::get()), 1_000 * UNITS).into(); let ticket = MultiLocation { parents: 0, interior: Here }; Ok((origin, ticket, assets)) @@ -2153,6 +2226,16 @@ sp_api::impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } #[cfg(all(test, feature = "try-runtime"))] diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 531de5527de52deb2d3979658f6a28f5b5194787..faa94bcac586224b1b8357a0a0885f7bb10d3875 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -19,6 +19,7 @@ pub mod frame_election_provider_support; pub mod frame_system; pub mod pallet_bags_list; pub mod pallet_balances; +pub mod pallet_conviction_voting; pub mod pallet_election_provider_multi_phase; pub mod pallet_fast_unstake; pub mod pallet_identity; @@ -29,13 +30,17 @@ pub mod pallet_multisig; pub mod pallet_nomination_pools; pub mod pallet_preimage; pub mod pallet_proxy; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_referenda_referenda; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; +pub mod pallet_whitelist; pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; diff --git a/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs new file mode 100644 index 0000000000000000000000000000000000000000..8965a7392ed2859b245ff96a80e13dd5eea66403 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,194 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_conviction_voting +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_conviction_voting::WeightInfo for WeightInfo<T> { + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13445` + // Estimated: `42428` + // Minimum execution time: 152_223_000 picoseconds. + Weight::from_parts(162_148_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14166` + // Estimated: `83866` + // Minimum execution time: 220_361_000 picoseconds. + Weight::from_parts(236_478_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 198_787_000 picoseconds. + Weight::from_parts(204_983_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13004` + // Estimated: `30706` + // Minimum execution time: 88_469_000 picoseconds. + Weight::from_parts(95_942_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29640 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 79_951_000 picoseconds. + Weight::from_parts(1_844_983_097, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 160_158 + .saturating_add(Weight::from_parts(43_973_863, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 47_976_000 picoseconds. + Weight::from_parts(1_877_857_335, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 168_477 + .saturating_add(Weight::from_parts(43_303_902, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12218` + // Estimated: `30706` + // Minimum execution time: 102_868_000 picoseconds. + Weight::from_parts(110_438_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4ac066791168a67b17289a1c87b1ede681f92ad --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,525 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_referenda::WeightInfo for WeightInfo<T> { + /// Storage: FellowshipCollective Members (r:1 w:0) + /// Proof: FellowshipCollective Members (max_values: None, max_size: Some(42), added: 2517, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumCount (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:0 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `327` + // Estimated: `42428` + // Minimum execution time: 28_969_000 picoseconds. + Weight::from_parts(30_902_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 53_500_000 picoseconds. + Weight::from_parts(54_447_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2042` + // Estimated: `42428` + // Minimum execution time: 114_321_000 picoseconds. + Weight::from_parts(122_607_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2083` + // Estimated: `42428` + // Minimum execution time: 113_476_000 picoseconds. + Weight::from_parts(120_078_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `83866` + // Minimum execution time: 194_798_000 picoseconds. + Weight::from_parts(208_378_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `639` + // Estimated: `83866` + // Minimum execution time: 69_502_000 picoseconds. + Weight::from_parts(71_500_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 30_561_000 picoseconds. + Weight::from_parts(31_427_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 14_535_000 picoseconds. + Weight::from_parts(14_999_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `349` + // Estimated: `83866` + // Minimum execution time: 38_532_000 picoseconds. + Weight::from_parts(39_361_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:1 w:0) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `83866` + // Minimum execution time: 78_956_000 picoseconds. + Weight::from_parts(80_594_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:0) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `4277` + // Minimum execution time: 9_450_000 picoseconds. + Weight::from_parts(9_881_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2376` + // Estimated: `42428` + // Minimum execution time: 98_126_000 picoseconds. + Weight::from_parts(102_511_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2362` + // Estimated: `42428` + // Minimum execution time: 99_398_000 picoseconds. + Weight::from_parts(104_045_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1807` + // Estimated: `4365` + // Minimum execution time: 43_734_000 picoseconds. + Weight::from_parts(46_962_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1774` + // Estimated: `4365` + // Minimum execution time: 42_863_000 picoseconds. + Weight::from_parts(46_241_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1790` + // Estimated: `4365` + // Minimum execution time: 57_511_000 picoseconds. + Weight::from_parts(64_027_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:0) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda TrackQueue (r:1 w:1) + /// Proof: FellowshipReferenda TrackQueue (max_values: None, max_size: Some(812), added: 3287, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1831` + // Estimated: `4365` + // Minimum execution time: 56_726_000 picoseconds. + Weight::from_parts(61_962_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `301` + // Estimated: `42428` + // Minimum execution time: 24_870_000 picoseconds. + Weight::from_parts(25_837_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `349` + // Estimated: `42428` + // Minimum execution time: 25_297_000 picoseconds. + Weight::from_parts(26_086_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 16_776_000 picoseconds. + Weight::from_parts(17_396_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `584` + // Estimated: `42428` + // Minimum execution time: 37_780_000 picoseconds. + Weight::from_parts(38_626_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda DecidingCount (r:1 w:1) + /// Proof: FellowshipReferenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `719` + // Estimated: `42428` + // Minimum execution time: 85_265_000 picoseconds. + Weight::from_parts(89_986_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 143_283_000 picoseconds. + Weight::from_parts(158_540_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `755` + // Estimated: `42428` + // Minimum execution time: 143_736_000 picoseconds. + Weight::from_parts(162_755_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `42428` + // Minimum execution time: 139_021_000 picoseconds. + Weight::from_parts(157_398_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `42428` + // Minimum execution time: 78_530_000 picoseconds. + Weight::from_parts(83_556_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `776` + // Estimated: `83866` + // Minimum execution time: 174_165_000 picoseconds. + Weight::from_parts(188_496_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:1) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipCollective MemberCount (r:1 w:0) + /// Proof: FellowshipCollective MemberCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `772` + // Estimated: `42428` + // Minimum execution time: 142_964_000 picoseconds. + Weight::from_parts(157_257_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:0 w:1) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `352` + // Estimated: `4365` + // Minimum execution time: 20_126_000 picoseconds. + Weight::from_parts(20_635_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: FellowshipReferenda ReferendumInfoFor (r:1 w:0) + /// Proof: FellowshipReferenda ReferendumInfoFor (max_values: None, max_size: Some(900), added: 3375, mode: MaxEncodedLen) + /// Storage: FellowshipReferenda MetadataOf (r:1 w:1) + /// Proof: FellowshipReferenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 17_716_000 picoseconds. + Weight::from_parts(18_324_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs new file mode 100644 index 0000000000000000000000000000000000000000..accaa0ef10d970839ff1d2470154f8952ad70ce9 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_referenda_referenda.rs @@ -0,0 +1,523 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_referenda::WeightInfo for WeightInfo<T> { + /// Storage: Referenda ReferendumCount (r:1 w:1) + /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:0 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `186` + // Estimated: `42428` + // Minimum execution time: 39_146_000 picoseconds. + Weight::from_parts(40_383_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 51_385_000 picoseconds. + Weight::from_parts(52_701_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3196` + // Estimated: `42428` + // Minimum execution time: 70_018_000 picoseconds. + Weight::from_parts(75_868_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3216` + // Estimated: `42428` + // Minimum execution time: 69_311_000 picoseconds. + Weight::from_parts(72_425_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 64_385_000 picoseconds. + Weight::from_parts(66_178_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 62_200_000 picoseconds. + Weight::from_parts(63_782_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `4401` + // Minimum execution time: 29_677_000 picoseconds. + Weight::from_parts(30_603_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `269` + // Estimated: `4401` + // Minimum execution time: 29_897_000 picoseconds. + Weight::from_parts(30_618_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `83866` + // Minimum execution time: 37_697_000 picoseconds. + Weight::from_parts(38_953_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:0) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `83866` + // Minimum execution time: 106_001_000 picoseconds. + Weight::from_parts(107_102_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:0) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `102` + // Estimated: `5477` + // Minimum execution time: 8_987_000 picoseconds. + Weight::from_parts(9_431_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 55_344_000 picoseconds. + Weight::from_parts(58_026_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 57_003_000 picoseconds. + Weight::from_parts(60_347_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_001_000 picoseconds. + Weight::from_parts(24_812_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_299_000 picoseconds. + Weight::from_parts(24_465_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2943` + // Estimated: `5477` + // Minimum execution time: 28_223_000 picoseconds. + Weight::from_parts(29_664_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `5477` + // Minimum execution time: 27_474_000 picoseconds. + Weight::from_parts(29_072_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `299` + // Estimated: `42428` + // Minimum execution time: 24_405_000 picoseconds. + Weight::from_parts(25_184_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 24_572_000 picoseconds. + Weight::from_parts(25_287_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `4401` + // Minimum execution time: 16_042_000 picoseconds. + Weight::from_parts(16_610_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 33_639_000 picoseconds. + Weight::from_parts(34_749_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 36_467_000 picoseconds. + Weight::from_parts(37_693_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 29_857_000 picoseconds. + Weight::from_parts(30_840_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 31_028_000 picoseconds. + Weight::from_parts(32_154_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 28_594_000 picoseconds. + Weight::from_parts(29_092_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `42428` + // Minimum execution time: 27_246_000 picoseconds. + Weight::from_parts(28_003_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 43_426_000 picoseconds. + Weight::from_parts(44_917_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 30_285_000 picoseconds. + Weight::from_parts(31_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:0 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4401` + // Minimum execution time: 19_254_000 picoseconds. + Weight::from_parts(19_855_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `283` + // Estimated: `4401` + // Minimum execution time: 16_957_000 picoseconds. + Weight::from_parts(17_556_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_treasury.rs b/polkadot/runtime/westend/src/weights/pallet_treasury.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2eb6abfc7bbb7bf90a40457d9ef1fccfb124d63 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_treasury.rs @@ -0,0 +1,150 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_treasury` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_treasury +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_treasury`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_treasury::WeightInfo for WeightInfo<T> { + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `1887` + // Minimum execution time: 13_644_000 picoseconds. + Weight::from_parts(13_988_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn propose_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `107` + // Estimated: `1489` + // Minimum execution time: 26_304_000 picoseconds. + Weight::from_parts(26_850_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Proposals` (r:1 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn reject_proposal() -> Weight { + // Proof Size summary in bytes: + // Measured: `265` + // Estimated: `3593` + // Minimum execution time: 40_318_000 picoseconds. + Weight::from_parts(41_598_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Proposals` (r:1 w:0) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 99]`. + fn approve_proposal(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `433 + p * (8 ±0)` + // Estimated: `3573` + // Minimum execution time: 8_250_000 picoseconds. + Weight::from_parts(10_937_873, 0) + .saturating_add(Weight::from_parts(0, 3573)) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(82_426, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn remove_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `90` + // Estimated: `1887` + // Minimum execution time: 6_170_000 picoseconds. + Weight::from_parts(6_366_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::InactiveIssuance` (r:1 w:1) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:100 w:100) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 100]`. + fn on_initialize_proposals(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `175 + p * (251 ±0)` + // Estimated: `1887 + p * (5206 ±0)` + // Minimum execution time: 39_691_000 picoseconds. + Weight::from_parts(29_703_313, 0) + .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 18_540 + .saturating_add(Weight::from_parts(42_601_290, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + } +} diff --git a/polkadot/runtime/westend/src/weights/pallet_whitelist.rs b/polkadot/runtime/westend/src/weights/pallet_whitelist.rs new file mode 100644 index 0000000000000000000000000000000000000000..6177ac799e6a41b47f0216bf145e1009acf39679 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_whitelist.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see <http://www.gnu.org/licenses/>. + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_whitelist +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo<T>(PhantomData<T>); +impl<T: frame_system::Config> pallet_whitelist::WeightInfo for WeightInfo<T> { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3556` + // Minimum execution time: 21_188_000 picoseconds. + Weight::from_parts(21_804_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 17_655_000 picoseconds. + Weight::from_parts(19_443_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `327 + n * (1 ±0)` + // Estimated: `3791 + n * (1 ±0)` + // Minimum execution time: 30_540_000 picoseconds. + Weight::from_parts(30_886_000, 0) + .saturating_add(Weight::from_parts(0, 3791)) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_779, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 21_082_000 picoseconds. + Weight::from_parts(21_922_294, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_412, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 92dcee150aab27a1939d2cff39e9002bdd7f8422..66a2e2230ccd6652c3b9f5e8d54d8ca0dae7d122 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -17,26 +17,31 @@ //! XCM configurations for Westend. use super::{ - parachains_origin, weights, AccountId, AllPalletsWithSystem, Balances, Dmp, ParaId, Runtime, - RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmPallet, + parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, FellowshipAdmin, + GeneralAdmin, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, StakingAdmin, + TransactionByteFee, WeightToFee, XcmPallet, }; + use frame_support::{ - parameter_types, + match_types, parameter_types, traits::{Everything, Nothing}, }; use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; use runtime_common::{ xcm_sender::{ChildParachainRouter, ExponentialPrice}, ToAuthor, }; use sp_core::ConstU32; -use westend_runtime_constants::currency::CENTS; +use westend_runtime_constants::{ + currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, +}; use xcm::latest::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, CurrencyAdapter as XcmCurrencyAdapter, DescribeBodyTerminal, - DescribeFamily, HashedDescription, IsChildSystemParachain, IsConcrete, MintLocation, + DescribeFamily, HashedDescription, IsConcrete, MintLocation, OriginToPluralityVoice, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, }; @@ -45,7 +50,7 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: MultiLocation = Here.into_location(); pub const ThisNetwork: NetworkId = Westend; - pub UniversalLocation: InteriorMultiLocation = ThisNetwork::get().into(); + pub const UniversalLocation: InteriorMultiLocation = X1(GlobalConsensus(ThisNetwork::get())); pub CheckAccount: AccountId = XcmPallet::check_account(); pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); /// The asset ID for the asset that we use to pay for message delivery fees. @@ -77,9 +82,17 @@ pub type LocalAssetTransactor = XcmCurrencyAdapter< >; type LocalOriginConverter = ( + // If the origin kind is `Sovereign`, then return a `Signed` origin with the account determined + // by the `LocationConverter` converter. SovereignSignedViaLocation<LocationConverter, RuntimeOrigin>, + // If the origin kind is `Native` and the XCM origin is a child parachain, then we can express + // it with the special `parachains_origin::Origin` origin variant. ChildParachainAsNative<parachains_origin::Origin, RuntimeOrigin>, + // If the origin kind is `Native` and the XCM origin is the `AccountId32` location, then it can + // be expressed using the `Signed` origin variant. SignedAccountId32AsNative<ThisNetwork, RuntimeOrigin>, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough<RuntimeOrigin>, ); /// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our @@ -94,22 +107,27 @@ pub type XcmRouter = WithUniqueTopic<( )>; parameter_types! { - pub const Westmint: MultiLocation = Parachain(1000).into_location(); - pub const Collectives: MultiLocation = Parachain(1001).into_location(); pub const Wnd: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete(TokenLocation::get()) }); - pub const WndForWestmint: (MultiAssetFilter, MultiLocation) = (Wnd::get(), Westmint::get()); + pub const AssetHub: MultiLocation = Parachain(ASSET_HUB_ID).into_location(); + pub const WndForAssetHub: (MultiAssetFilter, MultiLocation) = (Wnd::get(), AssetHub::get()); + pub const Collectives: MultiLocation = Parachain(COLLECTIVES_ID).into_location(); pub const WndForCollectives: (MultiAssetFilter, MultiLocation) = (Wnd::get(), Collectives::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option<MultiLocation> = Some(Parachain(1000).into()); -} - pub type TrustedTeleporters = - (xcm_builder::Case<WndForWestmint>, xcm_builder::Case<WndForCollectives>); + (xcm_builder::Case<WndForAssetHub>, xcm_builder::Case<WndForCollectives>); + +match_types! { + pub type OnlyParachains: impl Contains<MultiLocation> = { + MultiLocation { parents: 0, interior: X1(Parachain(_)) } + }; + pub type CollectivesOrFellows: impl Contains<MultiLocation> = { + MultiLocation { parents: 0, interior: X1(Parachain(COLLECTIVES_ID)) } | + MultiLocation { parents: 0, interior: X2(Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }) } + }; +} /// The barriers one of which must be passed for an XCM message to be executed. pub type Barrier = TrailingSetTopicAsId<( @@ -121,10 +139,10 @@ pub type Barrier = TrailingSetTopicAsId<( ( // If the message is one that immediately attemps to pay for execution, then allow it. AllowTopLevelPaidExecutionFrom<Everything>, - // Messages coming from system parachains need not pay for execution. - AllowExplicitUnpaidExecutionFrom<IsChildSystemParachain<ParaId>>, // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom<Everything>, + AllowSubscriptionsFrom<OnlyParachains>, + // Collectives and Fellows plurality get free execution. + AllowExplicitUnpaidExecutionFrom<CollectivesOrFellows>, ), UniversalLocation, ConstU32<8>, @@ -141,8 +159,11 @@ impl xcm_executor::Config for XcmConfig { type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; - type Weigher = - WeightInfoBounds<weights::xcm::WestendXcmWeight<RuntimeCall>, RuntimeCall, MaxInstructions>; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight<RuntimeCall>, + RuntimeCall, + MaxInstructions, + >; type Trader = UsingComponents<WeightToFee, TokenLocation, AccountId, Balances, ToAuthor<Runtime>>; type ResponseHandler = XcmPallet; @@ -161,16 +182,54 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } +parameter_types! { + // `GeneralAdmin` pluralistic body. + pub const GeneralAdminBodyId: BodyId = BodyId::Administration; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // FellowshipAdmin pluralistic body. + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option<MultiLocation> = Some(Parachain(1000).into()); +} + +/// Type to convert the `GeneralAdmin` origin to a Plurality `MultiLocation` value. +pub type GeneralAdminToPlurality = + OriginToPluralityVoice<RuntimeOrigin, GeneralAdmin, GeneralAdminBodyId>; + /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( + GeneralAdminToPlurality, // And a usual Signed origin to be used in XCM as a corresponding AccountId32 SignedToAccountId32<RuntimeOrigin, AccountId, ThisNetwork>, ); +/// Type to convert the `StakingAdmin` origin to a Plurality `MultiLocation` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice<RuntimeOrigin, StakingAdmin, StakingAdminBodyId>; + +/// Type to convert the `FellowshipAdmin` origin to a Plurality `MultiLocation` value. +pub type FellowshipAdminToPlurality = + OriginToPluralityVoice<RuntimeOrigin, FellowshipAdmin, FellowshipAdminBodyId>; + +/// Type to convert a pallet `Origin` type value into a `MultiLocation` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // GeneralAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + GeneralAdminToPlurality, + // StakingAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + StakingAdminToPlurality, + // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + FellowshipAdminToPlurality, +); + impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalPalletOriginToLocation>; type XcmRouter = XcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>; @@ -179,8 +238,11 @@ impl pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor<XcmConfig>; type XcmTeleportFilter = Everything; type XcmReserveTransferFilter = Everything; - type Weigher = - WeightInfoBounds<weights::xcm::WestendXcmWeight<RuntimeCall>, RuntimeCall, MaxInstructions>; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight<RuntimeCall>, + RuntimeCall, + MaxInstructions, + >; type UniversalLocation = UniversalLocation; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/scripts/init.sh b/polkadot/scripts/init.sh deleted file mode 100755 index cf5ecf97926fea7a5e8fd2a91df96853f90e8ee7..0000000000000000000000000000000000000000 --- a/polkadot/scripts/init.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly - -# Install wasm-gc. It's useful for stripping slimming down wasm binaries. -command -v wasm-gc || \ - cargo +nightly install --git https://github.com/alexcrichton/wasm-gc --force diff --git a/polkadot/scripts/update-rust-stable.sh b/polkadot/scripts/update-rust-stable.sh deleted file mode 100755 index 6aae75d8cb2daa2dee5a332d19c31cd8f4fe818f..0000000000000000000000000000000000000000 --- a/polkadot/scripts/update-rust-stable.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -# -# Script for updating the UI tests for a new rust stable version. -# -# It needs to be called like this: -# -# update-rust-stable.sh 1.61 -# -# This will run all UI tests with the rust stable 1.61. The script -# requires that rustup is installed. -set -e - -if [ "$#" -ne 1 ]; then - echo "Please specify the rust version to use. E.g. update-rust-stable.sh 1.61" - exit -fi - -RUST_VERSION=$1 - -if ! command -v rustup &> /dev/null -then - echo "rustup needs to be installed" - exit -fi - -rustup install $RUST_VERSION -rustup component add rust-src --toolchain $RUST_VERSION - -# Ensure we run the ui tests -export RUN_UI_TESTS=1 -# We don't need any wasm files for ui tests -export SKIP_WASM_BUILD=1 -# Let trybuild overwrite the .stderr files -export TRYBUILD=overwrite - -# Run all the relevant UI tests -# -# Any new UI tests in different crates need to be added here as well. -rustup run $RUST_VERSION cargo test -p orchestra ui diff --git a/polkadot/tests/common.rs b/polkadot/tests/common.rs index 10859ead5fe8b8e8ebe782bbbcd83d2c9bafd811..15721c990e01b30b97db32151b0899c1e5a3888b 100644 --- a/polkadot/tests/common.rs +++ b/polkadot/tests/common.rs @@ -33,9 +33,7 @@ pub async fn wait_n_finalized_blocks(n: usize, url: &str) { let mut interval = tokio::time::interval(Duration::from_secs(6)); loop { - let Ok(rpc) = ws_client(url).await else { - continue; - }; + let Ok(rpc) = ws_client(url).await else { continue }; if let Ok(block) = ChainApi::<(), Hash, Header, Block>::finalized_head(&rpc).await { built_blocks.insert(block); diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 5acd093d3bca259691e817921640d6d9907f8e03..321bb294b88d89187924a88a98302c33dd860fb9 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -203,10 +203,10 @@ pub mod pallet { >; /// Our XCM filter which messages to be executed using `XcmExecutor` must pass. - type XcmExecuteFilter: Contains<(MultiLocation, Xcm<<Self as SysConfig>::RuntimeCall>)>; + type XcmExecuteFilter: Contains<(MultiLocation, Xcm<<Self as Config>::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<<Self as SysConfig>::RuntimeCall>; + type XcmExecutor: ExecuteXcm<<Self as Config>::RuntimeCall>; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec<MultiAsset>)>; @@ -216,7 +216,7 @@ pub mod pallet { type XcmReserveTransferFilter: Contains<(MultiLocation, Vec<MultiAsset>)>; /// Means of measuring the weight consumed by an XCM message locally. - type Weigher: WeightBounds<<Self as SysConfig>::RuntimeCall>; + type Weigher: WeightBounds<<Self as Config>::RuntimeCall>; /// This chain's Universal Location. type UniversalLocation: Get<InteriorMultiLocation>; @@ -227,7 +227,6 @@ pub mod pallet { /// The runtime `Call` type. type RuntimeCall: Parameter + GetDispatchInfo - + IsType<<Self as frame_system::Config>::RuntimeCall> + Dispatchable< RuntimeOrigin = <Self as Config>::RuntimeOrigin, PostInfo = PostDispatchInfo, @@ -902,7 +901,7 @@ pub mod pallet { #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( origin: OriginFor<T>, - message: Box<VersionedXcm<<T as SysConfig>::RuntimeCall>>, + message: Box<VersionedXcm<<T as Config>::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml deleted file mode 100644 index 918fb5bf4f62c6be92009a62143fb5972d8c2292..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.toml +++ /dev/null @@ -1,34 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 - - [parachains.collator] - name = "collator01" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl b/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl deleted file mode 100644 index 46c1d77acf46cec2a0925ed80bc93487e7c12452..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl +++ /dev/null @@ -1,23 +0,0 @@ -Description: Async Backing Compatibility Test -Network: ./001-async-backing-compatibility.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check authority status -alice: reports node_roles is 4 -bob: reports node_roles is 4 - -# Check peers -alice: reports peers count is at least 2 within 20 seconds -bob: reports peers count is at least 2 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml deleted file mode 100644 index e61f7dd47ef6e09ed82173b21e58cd4445a8da28..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml +++ /dev/null @@ -1,54 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "bob" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "charlie" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - - [[relaychain.nodes]] - name = "dave" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug,runtime=debug"] - -[[parachains]] -id = 100 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[[parachains]] -id = 101 -addToGenesis = true - - [parachains.collator] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl deleted file mode 100644 index 6213d1afb81e23ecd441cc566551461b0a802033..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl +++ /dev/null @@ -1,34 +0,0 @@ -Description: Async Backing Runtime Upgrade Test -Network: ./002-async-backing-runtime-upgrade.toml -Creds: config - -# General -alice: is up -bob: is up -charlie: is up -dave: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds -charlie: parachain 100 is registered within 225 seconds -dave: parachain 100 is registered within 225 seconds -alice: parachain 101 is registered within 225 seconds -bob: parachain 101 is registered within 225 seconds -charlie: parachain 101 is registered within 225 seconds -dave: parachain 101 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds - -# Runtime upgrade (according to previous runtime tests, avg. is 30s) -alice: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds -bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds - -# Bootstrap the runtime upgrade -sleep 30 seconds diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml deleted file mode 100644 index 4dca4d3d531268c45026a7da938b186334aee5de..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.toml +++ /dev/null @@ -1,40 +0,0 @@ -[settings] -timeout = 1000 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "rococo-local" -default_command = "polkadot" - - [relaychain.default_resources] - limits = { memory = "4G", cpu = "2" } - requests = { memory = "2G", cpu = "1" } - - [[relaychain.nodes]] - name = "alice" - args = [ "-lparachain=debug"] - - [[relaychain.nodes]] - name = "bob" - image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" - args = [ "-lparachain=debug"] - -[[parachains]] -id = 100 - - [[parachains.collators]] - name = "collator01" - image = "docker.io/paritypr/colander:master" - command = "undying-collator" - args = ["-lparachain=debug"] - - [[parachains.collators]] - name = "collator02" - image = "{{COL_IMAGE}}" - command = "undying-collator" - args = ["-lparachain=debug"] - -[types.Header] -number = "u64" -parent_hash = "Hash" -post_state = "Hash" diff --git a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl deleted file mode 100644 index 98436b0459cf80a4f7f494d7af9a6684da73cdf9..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl +++ /dev/null @@ -1,19 +0,0 @@ -Description: Async Backing Collator Mix Test -Network: ./003-async-backing-collator-mix.toml -Creds: config - -# General -alice: is up -bob: is up - -# Check peers -alice: reports peers count is at least 3 within 20 seconds -bob: reports peers count is at least 3 within 20 seconds - -# Parachain registration -alice: parachain 100 is registered within 225 seconds -bob: parachain 100 is registered within 225 seconds - -# Ensure parachain progress -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/polkadot/zombienet_tests/async_backing/README.md b/polkadot/zombienet_tests/async_backing/README.md deleted file mode 100644 index 9774ea3c25c96d434643d1e565a3e933fbc2412c..0000000000000000000000000000000000000000 --- a/polkadot/zombienet_tests/async_backing/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# async-backing zombienet tests - -This directory contains zombienet tests made explicitly for the async-backing feature branch. - -## coverage - -- Network protocol upgrade deploying both master and async branch (compatibility). -- Runtime ugprade while running both master and async backing branch nodes. -- Async backing test with a mix of collators collating via async backing and sync backing. diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index a0a87d60d4e3ec381686a8dd02dbb605209dd2af..e6aeb8e245c2bab3e53628498aeb2c7caa626e0f 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -1,7 +1,7 @@ [settings] timeout = 1000 -[relaychain.genesis.runtime.runtime_genesis_config.configuration.config] +[relaychain.genesis.runtime.configuration.config] max_validators_per_core = 5 needed_approvals = 8 diff --git a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml index 7c4f5a9f1bcab8550755bfefadd1a1c425b4ac06..ef27d7b92f029d7978c9b7ab1a175c882fbb1d70 100644 --- a/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml +++ b/polkadot/zombienet_tests/functional/0004-parachains-garbage-candidate.toml @@ -2,7 +2,7 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtime.runtime_genesis_config.configuration.config] +[relaychain.genesis.runtime.configuration.config] max_validators_per_core = 1 needed_approvals = 2 diff --git a/polkadot/zombienet_tests/misc/0001-paritydb.toml b/polkadot/zombienet_tests/misc/0001-paritydb.toml index 38fa56898196ae21934e786a26cb6265ed915282..99dc9c66e26ee6b04bfea1951d7818e23be70118 100644 --- a/polkadot/zombienet_tests/misc/0001-paritydb.toml +++ b/polkadot/zombienet_tests/misc/0001-paritydb.toml @@ -2,7 +2,7 @@ timeout = 1000 bootnode = true -[relaychain.genesis.runtime.runtime_genesis_config.configuration.config] +[relaychain.genesis.runtime.configuration.config] max_validators_per_core = 1 needed_approvals = 3 diff --git a/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh b/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh index 0d4b2807579564f0fcbad3e43f400f8a68f082fc..b55534559278df65cab369ea02d924abdc5746ad 100644 --- a/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh +++ b/polkadot/zombienet_tests/misc/0002-download-polkadot-from-pr.sh @@ -12,8 +12,10 @@ export PATH=$CFG_DIR:$PATH cd $CFG_DIR # see 0002-upgrade-node.zndsl to view the args. -curl -L -O $1/polkadot -curl -L -O $1/polkadot-prepare-worker -curl -L -O $1/polkadot-execute-worker +curl -L -O $1/polkadot & +curl -L -O $1/polkadot-prepare-worker & +curl -L -O $1/polkadot-execute-worker & +wait + chmod +x $CFG_DIR/polkadot $CFG_DIR/polkadot-prepare-worker $CFG_DIR/polkadot-execute-worker echo $(polkadot --version) diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index fdf16b7286c910bef0d2052a8a1f6c5c595352d7..9191fb027de0d0240805d6cb65e68adfc4f15942 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -11,8 +11,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # with the version of polkadot you want to download. # avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 40 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 40 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds alice: restart after 5 seconds bob: restart after 5 seconds diff --git a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml index 0becb408550a61e4a05af7ca80b4df79acedee00..d72e3ebdb3354dedc5c90e50dd697d1883365d0f 100644 --- a/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml +++ b/polkadot/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.toml @@ -30,8 +30,8 @@ cumulus_based = true [parachains.collator] name = "collator01" - image = "{{COL_IMAGE}}" - command = "polkadot-collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" [[parachains.collator.env]] name = "RUST_LOG" diff --git a/scripts/update-ui-tests.sh b/scripts/update-ui-tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..785cc7bd3291ccdaff1d80558e9f3d52418068c3 --- /dev/null +++ b/scripts/update-ui-tests.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Script for updating the UI tests for a new rust stable version. +# Exit on error +set -e + +# by default current rust stable will be used +RUSTUP_RUN="" +# check if we have a parameter +# ./scripts/update-ui-tests.sh 1.70 +if [ ! -z "$1" ]; then + echo "RUST_VERSION: $1" + # This will run all UI tests with the rust stable 1.70. + # The script requires that rustup is installed. + RUST_VERSION=$1 + RUSTUP_RUN="rustup run $RUST_VERSION" + + + echo "installing rustup $RUST_VERSION" + if ! command -v rustup &> /dev/null + then + echo "rustup needs to be installed" + exit + fi + + rustup install $RUST_VERSION + rustup component add rust-src --toolchain $RUST_VERSION +fi + +# Ensure we run the ui tests +export RUN_UI_TESTS=1 +# We don't need any wasm files for ui tests +export SKIP_WASM_BUILD=1 +# Let trybuild overwrite the .stderr files +export TRYBUILD=overwrite + +# ./substrate +$RUSTUP_RUN cargo test -p sp-runtime-interface ui +$RUSTUP_RUN cargo test -p sp-api-test ui +$RUSTUP_RUN cargo test -p frame-election-provider-solution-type ui +$RUSTUP_RUN cargo test -p frame-support-test ui diff --git a/substrate/.maintain/init.sh b/substrate/.maintain/init.sh deleted file mode 100755 index 1405a41ef333e6af863080d83f854d3edb5fb4fa..0000000000000000000000000000000000000000 --- a/substrate/.maintain/init.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly diff --git a/substrate/.maintain/update-rust-stable.sh b/substrate/.maintain/update-rust-stable.sh deleted file mode 100755 index b253bb4105313c6663948e55ee826dff3059d22d..0000000000000000000000000000000000000000 --- a/substrate/.maintain/update-rust-stable.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# -# Script for updating the UI tests for a new rust stable version. -# -# It needs to be called like this: -# -# update-rust-stable.sh 1.61 -# -# This will run all UI tests with the rust stable 1.61. The script -# requires that rustup is installed. -set -e - -if [ "$#" -ne 1 ]; then - echo "Please specify the rust version to use. E.g. update-rust-stable.sh 1.61" - exit -fi - -RUST_VERSION=$1 - -if ! command -v rustup &> /dev/null -then - echo "rustup needs to be installed" - exit -fi - -rustup install $RUST_VERSION -rustup component add rust-src --toolchain $RUST_VERSION - -# Ensure we run the ui tests -export RUN_UI_TESTS=1 -# We don't need any wasm files for ui tests -export SKIP_WASM_BUILD=1 -# Let trybuild overwrite the .stderr files -export TRYBUILD=overwrite - -# Run all the relevant UI tests -# -# Any new UI tests in different crates need to be added here as well. -rustup run $RUST_VERSION cargo test -p sp-runtime-interface ui -rustup run $RUST_VERSION cargo test -p sp-api-test ui -rustup run $RUST_VERSION cargo test -p frame-election-provider-solution-type ui -rustup run $RUST_VERSION cargo test -p frame-support-test ui diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 65d0cfca59c418baaf1bd3d57013f3c6108c2cf7..caca54ce2ba4e87bfb3eefcf10b85ee7edf19185 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -39,6 +39,7 @@ sp-std = { path = "../../../primitives/std", default-features = false} sp-storage = { path = "../../../primitives/storage", default-features = false} sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} sp-version = { path = "../../../primitives/version", default-features = false} +sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} @@ -79,6 +80,7 @@ std = [ "sp-consensus-aura/std", "sp-consensus-grandpa/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 216be9588bca1ad936a4a5941a24c100b79e22e6..4653b49bf2c344c6cd433a5881565488a0d50fa9 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -23,6 +23,7 @@ use sp_std::prelude::*; use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use frame_support::genesis_builder_helper::{build_config, create_default_config}; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, parameter_types, @@ -571,4 +572,14 @@ impl_runtime_apis! { Executive::try_execute_block(block, state_root_check, signature_check, select).expect("execute-block failed") } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } diff --git a/substrate/bin/node-template/scripts/init.sh b/substrate/bin/node-template/scripts/init.sh deleted file mode 100755 index f976f7235d700c8f2e5064bd638ff9fb4d7ff48b..0000000000000000000000000000000000000000 --- a/substrate/bin/node-template/scripts/init.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# This script is meant to be run on Unix/Linux based systems -set -e - -echo "*** Initializing WASM build environment" - -if [ -z $CI_PROJECT_NAME ] ; then - rustup update nightly - rustup update stable -fi - -rustup target add wasm32-unknown-unknown --toolchain nightly diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index f14f89fcd3ab2da9aab235a89606730295b30666..23d0a0cc1ee5e1974e7394e841837e074ae04fc4 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -35,7 +35,7 @@ use sc_transaction_pool_api::{ }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; -use sp_runtime::{generic::BlockId, traits::NumberFor, OpaqueExtrinsic}; +use sp_runtime::{traits::NumberFor, OpaqueExtrinsic}; use crate::{ common::SizeType, @@ -233,7 +233,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { /// Returns a future that imports a bunch of unverified transactions to the pool. fn submit_at( &self, - _at: &BlockId<Self::Block>, + _at: Self::Hash, _source: TransactionSource, _xts: Vec<TransactionFor<Self>>, ) -> PoolFuture<Vec<Result<node_primitives::Hash, Self::Error>>, Self::Error> { @@ -243,7 +243,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { /// Returns a future that imports one unverified transaction to the pool. fn submit_one( &self, - _at: &BlockId<Self::Block>, + _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor<Self>, ) -> PoolFuture<TxHash<Self>, Self::Error> { @@ -252,7 +252,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { fn submit_and_watch( &self, - _at: &BlockId<Self::Block>, + _at: Self::Hash, _source: TransactionSource, _xt: TransactionFor<Self>, ) -> PoolFuture<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error> { diff --git a/substrate/bin/node/bench/src/txpool.rs b/substrate/bin/node/bench/src/txpool.rs index a3524ac5bc8901be636e23c2cc666feb7cfb460d..e56bb559a7b555f192f552e750d20f2313a2a0a9 100644 --- a/substrate/bin/node/bench/src/txpool.rs +++ b/substrate/bin/node/bench/src/txpool.rs @@ -27,7 +27,6 @@ use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes}; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{TransactionPool, TransactionSource}; -use sp_runtime::generic::BlockId; use crate::core::{self, Mode, Path}; @@ -58,10 +57,11 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { impl core::Benchmark for PoolBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(); + let genesis_hash = context.client.chain_info().genesis_hash; let _ = context .client - .runtime_version_at(context.client.chain_info().genesis_hash) + .runtime_version_at(genesis_hash) .expect("Failed to get runtime version") .spec_version; @@ -90,7 +90,7 @@ impl core::Benchmark for PoolBenchmark { let start = std::time::Instant::now(); let submissions = generated_transactions .into_iter() - .map(|tx| txpool.submit_one(&BlockId::Number(0), TransactionSource::External, tx)); + .map(|tx| txpool.submit_one(genesis_hash, TransactionSource::External, tx)); futures::executor::block_on(futures::future::join_all(submissions)); let elapsed = start.elapsed(); diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index 9847b8d3cba5760616a4b574f0285cb18a8d075f..a212bb64c5d1201f768ea6b6529a07b21a2f22a2 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -34,7 +34,7 @@ use sc_transaction_pool::PoolLimit; use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; use sp_core::{crypto::Pair, sr25519}; use sp_keyring::Sr25519Keyring; -use sp_runtime::{generic::BlockId, OpaqueExtrinsic}; +use sp_runtime::OpaqueExtrinsic; use tokio::runtime::Handle; fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { @@ -192,7 +192,7 @@ async fn submit_tx_and_wait_for_inclusion( let best_hash = client.chain_info().best_hash; let mut watch = tx_pool - .submit_and_watch(&BlockId::Hash(best_hash), TransactionSource::External, tx.clone()) + .submit_and_watch(best_hash, TransactionSource::External, tx.clone()) .await .expect("Submits tx to pool") .fuse(); diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 09c1fd9c6f315888a54c65b066a5850a7e11a2b8..7771b5f209719a49391da03e73eba68ceea1dd7e 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -32,6 +32,7 @@ sp-authority-discovery = { path = "../../../primitives/authority-discovery", def sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false} sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false} sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../../primitives/genesis-builder" } sp-inherents = { path = "../../../primitives/inherents", default-features = false} node-primitives = { path = "../primitives", default-features = false} sp-offchain = { path = "../../../primitives/offchain", default-features = false} @@ -231,6 +232,7 @@ std = [ "sp-consensus-babe/std", "sp-consensus-grandpa/std", "sp-core/std", + "sp-genesis-builder/std", "sp-inherents/std", "sp-io/std", "sp-offchain/std", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index df8fb06467d99dd74a5b749e66263170c34a3607..2fcb20ad8daa19a5028aef6b20b627b914fe74c4 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -30,6 +30,7 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, instances::{Instance1, Instance2}, ord_parameter_types, pallet_prelude::Get, @@ -1288,6 +1289,7 @@ impl pallet_tips::Config for Runtime { type TipCountdown = TipCountdown; type TipFindersFee = TipFindersFee; type TipReportDepositBase = TipReportDepositBase; + type MaxTipAmount = ConstU128<{ 500 * DOLLARS }>; type WeightInfo = pallet_tips::weights::SubstrateWeight<Runtime>; } @@ -2750,6 +2752,16 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { + fn create_default_config() -> Vec<u8> { + create_default_config::<RuntimeGenesisConfig>() + } + + fn build_config(config: Vec<u8>) -> sp_genesis_builder::Result { + build_config::<RuntimeGenesisConfig>(config) + } + } } #[cfg(test)] diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index b3a8f0d8970b6148a68abe9abbfa98080a84e4d3..0fb61b6fab1f6fb0f4f3efb68e8751e29466ae7b 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -642,8 +642,8 @@ mod tests { client.clone(), ); - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) - .unwrap(); + let hashof0 = client.info().genesis_hash; + block_on(txpool.submit_at(hashof0, SOURCE, vec![extrinsic(0), extrinsic(1)])).unwrap(); block_on( txpool.maintain(chain_event( @@ -658,7 +658,7 @@ mod tests { let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(hashof0).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -736,7 +736,7 @@ mod tests { let genesis_hash = client.info().best_hash; - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); + block_on(txpool.submit_at(genesis_hash, SOURCE, vec![extrinsic(0)])).unwrap(); block_on( txpool.maintain(chain_event( @@ -800,7 +800,7 @@ mod tests { }; block_on(txpool.submit_at( - &BlockId::number(0), + client.info().genesis_hash, SOURCE, vec![medium(0), medium(1), huge(2), medium(3), huge(4), medium(5), medium(6)], )) @@ -897,9 +897,8 @@ mod tests { spawner.clone(), client.clone(), ); - let genesis_header = client - .expect_header(client.info().genesis_hash) - .expect("there should be header"); + let genesis_hash = client.info().genesis_hash; + let genesis_header = client.expect_header(genesis_hash).expect("there should be header"); let extrinsics_num = 5; let extrinsics = std::iter::once( @@ -922,7 +921,7 @@ mod tests { .sum::<usize>() + Vec::<Extrinsic>::new().encoded_size(); - block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics.clone())).unwrap(); + block_on(txpool.submit_at(genesis_hash, SOURCE, extrinsics.clone())).unwrap(); block_on(txpool.maintain(chain_event(genesis_header.clone()))); @@ -999,6 +998,7 @@ mod tests { spawner.clone(), client.clone(), ); + let genesis_hash = client.info().genesis_hash; let tiny = |nonce| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)).nonce(nonce).build() @@ -1011,7 +1011,7 @@ mod tests { block_on( txpool.submit_at( - &BlockId::number(0), + genesis_hash, SOURCE, // add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources (0..MAX_SKIPPED_TRANSACTIONS * 2) @@ -1024,13 +1024,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(client.info().genesis_hash) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(genesis_hash).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); let mut proposer_factory = @@ -1038,7 +1034,7 @@ mod tests { let cell = Mutex::new(time::Instant::now()); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let old = *value; @@ -1071,6 +1067,7 @@ mod tests { spawner.clone(), client.clone(), ); + let genesis_hash = client.info().genesis_hash; let tiny = |who| { ExtrinsicBuilder::new_fill_block(Perbill::from_parts(TINY)) @@ -1086,7 +1083,7 @@ mod tests { block_on( txpool.submit_at( - &BlockId::number(0), + genesis_hash, SOURCE, (0..MAX_SKIPPED_TRANSACTIONS + 2) .into_iter() @@ -1098,13 +1095,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(client.info().genesis_hash) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(genesis_hash).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 4); let mut proposer_factory = @@ -1114,7 +1107,7 @@ mod tests { let cell = Arc::new(Mutex::new((0, time::Instant::now()))); let cell2 = cell.clone(); let proposer = proposer_factory.init_with_now( - &client.expect_header(client.info().genesis_hash).unwrap(), + &client.expect_header(genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let (called, old) = *value; diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index 67acb82c2c3042fd8eb72f8241ec3e85e1a43688..c55b97675da25d871736d517081280508ce5ed1e 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -125,7 +125,7 @@ pub enum CryptoScheme { Ed25519, /// Use sr25519. Sr25519, - /// Use + /// Use ecdsa. Ecdsa, } diff --git a/substrate/client/cli/src/commands/utils.rs b/substrate/client/cli/src/commands/utils.rs index ff159909b879c18f6f081021c4e97ed4e8417fdb..fc725f570e5ec2c3011012145bce1d390fa636a9 100644 --- a/substrate/client/cli/src/commands/utils.rs +++ b/substrate/client/cli/src/commands/utils.rs @@ -136,7 +136,7 @@ pub fn print_from_uri<Pair>( OutputType::Text => { println!( "Secret Key URI `{}` is account:\n \ - Network ID: {} \n \ + Network ID: {}\n \ Secret seed: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 1e5db966e66dbdbc453f9cdc04ff80d2f15ad9b2..41cd5f3127e8ead7508bb2a7c375aab9bc355892 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -351,7 +351,7 @@ mod tests { use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; use sp_inherents::InherentData; - use sp_runtime::generic::{BlockId, Digest, DigestItem}; + use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; @@ -400,10 +400,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -444,7 +445,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); // assert that the background task returns ok @@ -475,10 +476,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -535,7 +537,7 @@ mod tests { let mut finality_stream = client.finality_notification_stream(); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); // assert that the background task returns ok @@ -571,10 +573,11 @@ mod tests { let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); let genesis_hash = client.info().genesis_hash; + let pool_api = Arc::new(FullChainApi::new(client.clone(), None, &spawner.clone())); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), - api(), + pool_api, None, RevalidationType::Full, spawner.clone(), @@ -602,7 +605,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); let (tx, rx) = futures::channel::oneshot::channel(); @@ -688,7 +691,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; + let result = pool.submit_one(genesis_hash, SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); @@ -719,7 +722,7 @@ mod tests { } ); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); + assert!(pool.submit_one(created_block.hash, SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(created_block.hash).expect("db error").expect("imported above"); assert_eq!(header.number, 1); @@ -741,7 +744,7 @@ mod tests { .is_ok()); assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); + assert!(pool.submit_one(created_block.hash, SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); assert!(sink .send(EngineCommand::SealNewBlock { diff --git a/substrate/client/network/common/src/sync.rs b/substrate/client/network/common/src/sync.rs index 5a6f90b290d2f20fc76737d1d32fa804ade51f2c..8ef0fafa1c795ce2d5eb1b6f801c2816edf56784 100644 --- a/substrate/client/network/common/src/sync.rs +++ b/substrate/client/network/common/src/sync.rs @@ -36,7 +36,7 @@ use sp_runtime::{ }; use warp::WarpSyncProgress; -use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc, task::Poll}; +use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc}; /// The sync status of a peer we are trying to sync with #[derive(Debug)] @@ -204,6 +204,23 @@ pub enum PeerRequest<B: BlockT> { WarpProof, } +#[derive(Debug)] +pub enum PeerRequestType { + Block, + State, + WarpProof, +} + +impl<B: BlockT> PeerRequest<B> { + pub fn get_type(&self) -> PeerRequestType { + match self { + PeerRequest::Block(_) => PeerRequestType::Block, + PeerRequest::State => PeerRequestType::State, + PeerRequest::WarpProof => PeerRequestType::WarpProof, + } + } +} + /// Wrapper for implementation-specific state request. /// /// NOTE: Implementation must be able to encode and decode it for network purposes. @@ -289,9 +306,6 @@ pub trait ChainSync<Block: BlockT>: Send { /// Returns the current number of peers stored within this state machine. fn num_peers(&self) -> usize; - /// Returns the number of peers we're connected to and that are being queried. - fn num_active_peers(&self) -> usize; - /// Handle a new connected peer. /// /// Call this method whenever we connect to a new peer. @@ -369,10 +383,4 @@ pub trait ChainSync<Block: BlockT>: Send { /// Return some key metrics. fn metrics(&self) -> Metrics; - - /// Advance the state of `ChainSync` - fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()>; - - /// Send block request to peer - fn send_block_request(&mut self, who: PeerId, request: BlockRequest<Block>); } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index f10dd7869bbbc9f14522fae50c6fa166b84a716f..39312cc4b327dbc024a8cc3d3a9b934ae6bee315 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -29,6 +29,7 @@ prost = "0.11" schnellru = "0.2.1" smallvec = "1.11.0" thiserror = "1.0" +tokio-stream = "0.1.14" fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index c93ba89c6220e7625cfd662bea984eebfa81d1b7..8b8874ad4ebde1c26ffe5ca8cf8b3dcd283811ce 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -23,10 +23,12 @@ use crate::{ block_announce_validator::{ BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream, }, - block_relay_protocol::BlockDownloader, + block_relay_protocol::{BlockDownloader, BlockResponseError}, + pending_responses::{PendingResponses, ResponseEvent}, + schema::v1::{StateRequest, StateResponse}, service::{self, chain_sync::ToServiceCommand}, warp::WarpSyncParams, - ChainSync, ClientError, SyncingService, + BlockRequestEvent, ChainSync, ClientError, SyncingService, }; use codec::{Decode, Encode}; @@ -36,24 +38,32 @@ use futures::{ FutureExt, StreamExt, }; use futures_timer::Delay; -use libp2p::PeerId; +use libp2p::{request_response::OutboundFailure, PeerId}; +use log::{debug, trace}; use prometheus_endpoint::{ register, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64, }; +use prost::Message; use schnellru::{ByLength, LruMap}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::ImportQueueService; use sc_network::{ - config::{FullNetworkConfiguration, NonDefaultSetConfig, ProtocolId}, + config::{ + FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, + }, + request_responses::{IfDisconnected, RequestFailure}, utils::LruHashSet, NotificationsSink, ProtocolName, ReputationChange, }; use sc_network_common::{ role::Roles, sync::{ - message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState}, - BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, SyncEvent, + message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState}, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync as ChainSyncT, ExtendedPeerInfo, OpaqueStateRequest, + OpaqueStateResponse, PeerRequest, SyncEvent, }, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -63,6 +73,7 @@ use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet}, + iter, num::NonZeroUsize, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, @@ -98,6 +109,9 @@ const INACTIVITY_EVICT_THRESHOLD: Duration = Duration::from_secs(30); /// before it starts evicting peers. const INITIAL_EVICTION_WAIT_PERIOD: Duration = Duration::from_secs(2 * 60); +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; + mod rep { use sc_network::ReputationChange as Rep; /// Peer has different genesis. @@ -106,6 +120,14 @@ mod rep { pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement"); /// Block announce substream with the peer has been inactive too long pub const INACTIVE_SUBSTREAM: Rep = Rep::new(-(1 << 10), "Inactive block announce substream"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// Peer is on unsupported protocol version. + pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); + /// Reputation change when a peer refuses a request. + pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); + /// Reputation change when a peer doesn't respond in time to our messages. + pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); } struct Metrics { @@ -277,6 +299,18 @@ pub struct SyncingEngine<B: BlockT, Client> { /// Instant when the last notification was sent or received. last_notification_io: Instant, + + /// Pending responses + pending_responses: PendingResponses<B>, + + /// Block downloader + block_downloader: Arc<dyn BlockDownloader<B>>, + + /// Protocol name used to send out state requests + state_request_protocol_name: ProtocolName, + + /// Protocol name used to send out warp sync requests + warp_sync_protocol_name: Option<ProtocolName>, } impl<B: BlockT, Client> SyncingEngine<B, Client> @@ -381,24 +415,32 @@ where let warp_sync_target_block_header_rx = warp_sync_target_block_header_rx .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); - let (chain_sync, block_announce_config) = ChainSync::new( - mode, - client.clone(), + let block_announce_config = Self::get_block_announce_proto_config( protocol_id, fork_id, roles, + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); + let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); + + let chain_sync = ChainSync::new( + mode, + client.clone(), + block_announce_protocol_name.clone(), max_parallel_downloads, max_blocks_per_request, warp_sync_config, metrics_registry, network_service.clone(), import_queue, - block_downloader, - state_request_protocol_name, - warp_sync_protocol_name, )?; - let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -455,6 +497,10 @@ where } else { None }, + pending_responses: PendingResponses::new(), + block_downloader, + state_request_protocol_name, + warp_sync_protocol_name, }, SyncingService::new(tx, num_connected, is_major_syncing), block_announce_config, @@ -682,11 +728,23 @@ where ToServiceCommand::BlocksProcessed(imported, count, results) => { for result in self.chain_sync.on_blocks_processed(imported, count, results) { match result { - Ok((id, req)) => self.chain_sync.send_block_request(id, req), - Err(BadPeer(id, repu)) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(id, repu) + Ok(event) => match event { + BlockRequestEvent::SendRequest { peer_id, request } => { + // drop obsolete pending response first + self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request); + }, + BlockRequestEvent::RemoveStale { peer_id } => { + self.pending_responses.remove(&peer_id); + }, + }, + Err(BadPeer(peer_id, repu)) => { + self.pending_responses.remove(&peer_id); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + self.network_service.report_peer(peer_id, repu) }, } } @@ -715,7 +773,7 @@ where let _ = tx.send(status); }, ToServiceCommand::NumActivePeers(tx) => { - let _ = tx.send(self.chain_sync.num_active_peers()); + let _ = tx.send(self.num_active_peers()); }, ToServiceCommand::SyncState(tx) => { let _ = tx.send(self.chain_sync.status()); @@ -817,8 +875,13 @@ where Poll::Pending => {}, } - // Drive `ChainSync`. - while let Poll::Ready(()) = self.chain_sync.poll(cx) {} + // Send outbound requests on `ChanSync`'s behalf. + self.send_chain_sync_requests(); + + // Poll & process pending responses. + while let Poll::Ready(Some(event)) = self.pending_responses.poll_next_unpin(cx) { + self.process_response_event(event); + } // Poll block announce validations last, because if a block announcement was received // through the event stream between `SyncingEngine` and `Protocol` and the validation @@ -860,6 +923,7 @@ where } self.chain_sync.peer_disconnected(&peer_id); + self.pending_responses.remove(&peer_id); self.event_streams.retain(|stream| { stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok() }); @@ -991,7 +1055,7 @@ where } if let Some(req) = req { - self.chain_sync.send_block_request(peer_id, req); + self.send_block_request(peer_id, req); } self.event_streams @@ -999,4 +1063,278 @@ where Ok(()) } + + fn send_chain_sync_requests(&mut self) { + for (peer_id, request) in self.chain_sync.block_requests() { + self.send_block_request(peer_id, request); + } + + if let Some((peer_id, request)) = self.chain_sync.state_request() { + self.send_state_request(peer_id, request); + } + + for (peer_id, request) in self.chain_sync.justification_requests() { + self.send_block_request(peer_id, request); + } + + if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { + self.send_warp_sync_request(peer_id, request); + } + } + + fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest<B>) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let downloader = self.block_downloader.clone(); + + self.pending_responses.insert( + peer_id, + PeerRequest::Block(request.clone()), + async move { downloader.download_blocks(peer_id, request).await }.boxed(), + ); + } + + fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let (tx, rx) = oneshot::channel(); + + self.pending_responses.insert(peer_id, PeerRequest::State, rx.boxed()); + + match Self::encode_state_request(&request) { + Ok(data) => { + self.network_service.start_request( + peer_id, + self.state_request_protocol_name.clone(), + data, + tx, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: LOG_TARGET, + "Failed to encode state request {request:?}: {err:?}", + ); + }, + } + } + + fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest<B>) { + if !self.chain_sync.is_peer_known(&peer_id) { + trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); + debug_assert!(false); + return + } + + let (tx, rx) = oneshot::channel(); + + self.pending_responses.insert(peer_id, PeerRequest::WarpProof, rx.boxed()); + + match &self.warp_sync_protocol_name { + Some(name) => self.network_service.start_request( + peer_id, + name.clone(), + request.encode(), + tx, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: LOG_TARGET, + "Trying to send warp sync request when no protocol is configured {request:?}", + ); + }, + } + } + + fn encode_state_request(request: &OpaqueStateRequest) -> Result<Vec<u8>, String> { + let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { + "Failed to downcast opaque state response during encoding, this is an \ + implementation bug." + .to_string() + })?; + + Ok(request.encode_to_vec()) + } + + fn decode_state_response(response: &[u8]) -> Result<OpaqueStateResponse, String> { + let response = StateResponse::decode(response) + .map_err(|error| format!("Failed to decode state response: {error}"))?; + + Ok(OpaqueStateResponse(Box::new(response))) + } + + fn process_response_event(&mut self, response_event: ResponseEvent<B>) { + let ResponseEvent { peer_id, request, response } = response_event; + + match response { + Ok(Ok(resp)) => match request { + PeerRequest::Block(req) => { + match self.block_downloader.block_response_into_blocks(&req, resp) { + Ok(blocks) => { + if let Some((peer_id, new_req)) = + self.chain_sync.on_block_response(peer_id, req, blocks) + { + self.send_block_request(peer_id, new_req); + } + }, + Err(BlockResponseError::DecodeFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to decode block response from peer {:?}: {:?}.", + peer_id, + e + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + return + }, + Err(BlockResponseError::ExtractionFailed(e)) => { + debug!( + target: LOG_TARGET, + "Failed to extract blocks from peer response {:?}: {:?}.", + peer_id, + e + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + return + }, + } + }, + PeerRequest::State => { + let response = match Self::decode_state_response(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: LOG_TARGET, + "Failed to decode state response from peer {peer_id:?}: {e:?}.", + ); + self.network_service.report_peer(peer_id, rep::BAD_MESSAGE); + self.network_service.disconnect_peer( + peer_id, + self.block_announce_protocol_name.clone(), + ); + return + }, + }; + + self.chain_sync.on_state_response(peer_id, response); + }, + PeerRequest::WarpProof => { + self.chain_sync.on_warp_sync_response(peer_id, EncodedProof(resp)); + }, + }, + Ok(Err(e)) => { + debug!(target: LOG_TARGET, "Request to peer {peer_id:?} failed: {e:?}."); + + match e { + RequestFailure::Network(OutboundFailure::Timeout) => { + self.network_service.report_peer(peer_id, rep::TIMEOUT); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { + self.network_service.report_peer(peer_id, rep::BAD_PROTOCOL); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::DialFailure) => { + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Refused => { + self.network_service.report_peer(peer_id, rep::REFUSED); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) | + RequestFailure::NotConnected => { + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + RequestFailure::UnknownProtocol => { + debug_assert!(false, "Block request protocol should always be known."); + }, + RequestFailure::Obsolete => { + debug_assert!( + false, + "Can not receive `RequestFailure::Obsolete` after dropping the \ + response receiver.", + ); + }, + } + }, + Err(oneshot::Canceled) => { + trace!( + target: LOG_TARGET, + "Request to peer {peer_id:?} failed due to oneshot being canceled.", + ); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + }, + } + } + + /// Returns the number of peers we're connected to and that are being queried. + fn num_active_peers(&self) -> usize { + self.pending_responses.len() + } + + /// Get config for the block announcement protocol + fn get_block_announce_proto_config( + protocol_id: ProtocolId, + fork_id: &Option<String>, + roles: Roles, + best_number: NumberFor<B>, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> NonDefaultSetConfig { + let block_announces_protocol = { + let genesis_hash = genesis_hash.as_ref(); + if let Some(ref fork_id) = fork_id { + format!( + "/{}/{}/block-announces/1", + array_bytes::bytes2hex("", genesis_hash), + fork_id + ) + } else { + format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) + } + }; + + NonDefaultSetConfig { + notifications_protocol: block_announces_protocol.into(), + fallback_names: iter::once( + format!("/{}/block-announces/1", protocol_id.as_ref()).into(), + ) + .collect(), + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build( + roles, + best_number, + best_hash, + genesis_hash, + ))), + // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement + // protocol is still hardcoded into the peerset. + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } } diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index 20c0034966d5438b0b219c80e9721f6bde8df084..ff00e8f90f82bf5ad76d6a9d83845e7215ed7265 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -29,44 +29,31 @@ //! order to update it. use crate::{ - block_relay_protocol::{BlockDownloader, BlockResponseError}, blocks::BlockCollection, - schema::v1::{StateRequest, StateResponse}, + schema::v1::StateResponse, state::StateSync, warp::{WarpProofImportResult, WarpSync, WarpSyncConfig}, }; use codec::Encode; use extra_requests::ExtraRequests; -use futures::{channel::oneshot, task::Poll, Future, FutureExt}; -use libp2p::{request_response::OutboundFailure, PeerId}; +use libp2p::PeerId; use log::{debug, error, info, trace, warn}; -use prost::Message; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{ import_queue::ImportQueueService, BlockImportError, BlockImportStatus, IncomingBlock, }; -use sc_network::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - }, - request_responses::{IfDisconnected, RequestFailure}, - types::ProtocolName, -}; -use sc_network_common::{ - role::Roles, - sync::{ - message::{ - BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, - BlockResponse, Direction, FromBlock, - }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, - BadPeer, ChainSync as ChainSyncT, ImportResult, Metrics, OnBlockData, OnBlockJustification, - OnStateData, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, PeerRequest, SyncMode, - SyncState, SyncStatus, +use sc_network::types::ProtocolName; +use sc_network_common::sync::{ + message::{ + BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, + FromBlock, }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, SyncState, SyncStatus, }; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; @@ -81,9 +68,7 @@ use sp_runtime::{ use std::{ collections::{HashMap, HashSet}, - iter, ops::Range, - pin::Pin, sync::Arc, }; @@ -92,6 +77,7 @@ pub use service::chain_sync::SyncingService; mod block_announce_validator; mod extra_requests; mod futures_stream; +mod pending_responses; mod schema; pub mod block_relay_protocol; @@ -131,9 +117,6 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; -/// Maximum allowed size for a block announce. -const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; - /// Maximum blocks per response. pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128; @@ -170,18 +153,6 @@ mod rep { /// Peer response data does not have requested bits. pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); - - /// Reputation change when a peer doesn't respond in time to our messages. - pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); - - /// Peer is on unsupported protocol version. - pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); - - /// Reputation change when a peer refuses a request. - pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); - - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); } enum AllowedRequests { @@ -261,17 +232,12 @@ struct GapSync<B: BlockT> { target: NumberFor<B>, } -type PendingResponse<B> = Pin< - Box< - dyn Future< - Output = ( - PeerId, - PeerRequest<B>, - Result<Result<Vec<u8>, RequestFailure>, oneshot::Canceled>, - ), - > + Send, - >, ->; +/// An event used to notify [`engine::SyncingEngine`] if we want to perform a block request +/// or drop an obsolete pending response. +enum BlockRequestEvent<B: BlockT> { + SendRequest { peer_id: PeerId, request: BlockRequest<B> }, + RemoveStale { peer_id: PeerId }, +} /// The main data structure which contains all the state for a chains /// active syncing strategy. @@ -322,14 +288,6 @@ pub struct ChainSync<B: BlockT, Client> { network_service: service::network::NetworkServiceHandle, /// Protocol name used for block announcements block_announce_protocol_name: ProtocolName, - /// Block downloader stub - block_downloader: Arc<dyn BlockDownloader<B>>, - /// Protocol name used to send out state requests - state_request_protocol_name: ProtocolName, - /// Protocol name used to send out warp sync requests - warp_sync_protocol_name: Option<ProtocolName>, - /// Pending responses - pending_responses: HashMap<PeerId, PendingResponse<B>>, /// Handle to import queue. import_queue: Box<dyn ImportQueueService<B>>, /// Metrics. @@ -491,10 +449,6 @@ where self.peers.len() } - fn num_active_peers(&self) -> usize { - self.pending_responses.len() - } - fn new_peer( &mut self, who: PeerId, @@ -1145,7 +1099,6 @@ where gap_sync.blocks.clear_peer_download(who) } self.peers.remove(who); - self.pending_responses.remove(who); self.extra_justifications.peer_disconnected(who); self.allowed_requests.set_all(); self.fork_targets.retain(|_, target| { @@ -1168,36 +1121,6 @@ where justifications: self.extra_justifications.metrics(), } } - - fn poll(&mut self, cx: &mut std::task::Context) -> Poll<()> { - self.process_outbound_requests(); - - while let Poll::Ready(result) = self.poll_pending_responses(cx) { - match result { - ImportResult::BlockImport(origin, blocks) => self.import_blocks(origin, blocks), - ImportResult::JustificationImport(who, hash, number, justifications) => - self.import_justifications(who, hash, number, justifications), - } - } - - Poll::Pending - } - - fn send_block_request(&mut self, who: PeerId, request: BlockRequest<B>) { - if self.peers.contains_key(&who) { - let downloader = self.block_downloader.clone(); - self.pending_responses.insert( - who, - Box::pin(async move { - ( - who, - PeerRequest::Block(request.clone()), - downloader.download_blocks(who, request).await, - ) - }), - ); - } - } } impl<B, Client> ChainSync<B, Client> @@ -1216,32 +1139,14 @@ where pub fn new( mode: SyncMode, client: Arc<Client>, - protocol_id: ProtocolId, - fork_id: &Option<String>, - roles: Roles, + block_announce_protocol_name: ProtocolName, max_parallel_downloads: u32, max_blocks_per_request: u32, warp_sync_config: Option<WarpSyncConfig<B>>, metrics_registry: Option<&Registry>, network_service: service::network::NetworkServiceHandle, import_queue: Box<dyn ImportQueueService<B>>, - block_downloader: Arc<dyn BlockDownloader<B>>, - state_request_protocol_name: ProtocolName, - warp_sync_protocol_name: Option<ProtocolName>, - ) -> Result<(Self, NonDefaultSetConfig), ClientError> { - let block_announce_config = Self::get_block_announce_proto_config( - protocol_id, - fork_id, - roles, - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ); - + ) -> Result<Self, ClientError> { let mut sync = Self { client, peers: HashMap::new(), @@ -1261,16 +1166,9 @@ where import_existing: false, gap_sync: None, network_service, - block_downloader, - state_request_protocol_name, warp_sync_config, warp_sync_target_block_header: None, - warp_sync_protocol_name, - block_announce_protocol_name: block_announce_config - .notifications_protocol - .clone() - .into(), - pending_responses: HashMap::new(), + block_announce_protocol_name, import_queue, metrics: if let Some(r) = &metrics_registry { match SyncingMetrics::register(r) { @@ -1289,7 +1187,7 @@ where }; sync.reset_sync_start_point()?; - Ok((sync, block_announce_config)) + Ok(sync) } /// Returns the median seen block number. @@ -1413,7 +1311,7 @@ where /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator<Item = Result<(PeerId, BlockRequest<B>), BadPeer>> + '_ { + fn restart(&mut self) -> impl Iterator<Item = Result<BlockRequestEvent<B>, BadPeer>> + '_ { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); @@ -1427,23 +1325,23 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(id, mut p)| { + old_peers.into_iter().filter_map(move |(peer_id, mut p)| { // peers that were downloading justifications // should be kept in that state. if let PeerSyncState::DownloadingJustification(_) = p.state { // We make sure our commmon number is at least something we have. p.common_number = self.best_queued_number; - self.peers.insert(id, p); + self.peers.insert(peer_id, p); return None } - // since the request is not a justification, remove it from pending responses - self.pending_responses.remove(&id); - // handle peers that were in other states. - match self.new_peer(id, p.best_hash, p.best_number) { - Ok(None) => None, - Ok(Some(x)) => Some(Ok((id, x))), + match self.new_peer(peer_id, p.best_hash, p.best_number) { + // since the request is not a justification, remove it from pending responses + Ok(None) => Some(Ok(BlockRequestEvent::RemoveStale { peer_id })), + // update the request if the new one is available + Ok(Some(request)) => Some(Ok(BlockRequestEvent::SendRequest { peer_id, request })), + // this implies that we need to drop pending response from the peer Err(e) => Some(Err(e)), } }) @@ -1524,6 +1422,11 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } + /// Is the peer know to the sync state machine? + pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { + self.peers.contains_key(peer_id) + } + /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec<IncomingBlock<B>> { self.blocks @@ -1588,117 +1491,12 @@ where None } - /// Get config for the block announcement protocol - pub fn get_block_announce_proto_config( - protocol_id: ProtocolId, - fork_id: &Option<String>, - roles: Roles, - best_number: NumberFor<B>, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> NonDefaultSetConfig { - let block_announces_protocol = { - let genesis_hash = genesis_hash.as_ref(); - if let Some(ref fork_id) = fork_id { - format!( - "/{}/{}/block-announces/1", - array_bytes::bytes2hex("", genesis_hash), - fork_id - ) - } else { - format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) - } - }; - - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once( - format!("/{}/block-announces/1", protocol_id.as_ref()).into(), - ) - .collect(), - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::<B>::build( - roles, - best_number, - best_hash, - genesis_hash, - ))), - // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement - // protocol is still hardcoded into the peerset. - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - fn decode_state_response(response: &[u8]) -> Result<OpaqueStateResponse, String> { - let response = StateResponse::decode(response) - .map_err(|error| format!("Failed to decode state response: {error}"))?; - - Ok(OpaqueStateResponse(Box::new(response))) - } - - fn send_state_request(&mut self, who: PeerId, request: OpaqueStateRequest) { - let (tx, rx) = oneshot::channel(); - - if self.peers.contains_key(&who) { - self.pending_responses - .insert(who, Box::pin(async move { (who, PeerRequest::State, rx.await) })); - } - - match self.encode_state_request(&request) { - Ok(data) => { - self.network_service.start_request( - who, - self.state_request_protocol_name.clone(), - data, - tx, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: LOG_TARGET, - "Failed to encode state request {request:?}: {err:?}", - ); - }, - } - } - - fn send_warp_sync_request(&mut self, who: PeerId, request: WarpProofRequest<B>) { - let (tx, rx) = oneshot::channel(); - - if self.peers.contains_key(&who) { - self.pending_responses - .insert(who, Box::pin(async move { (who, PeerRequest::WarpProof, rx.await) })); - } - - match &self.warp_sync_protocol_name { - Some(name) => self.network_service.start_request( - who, - name.clone(), - request.encode(), - tx, - IfDisconnected::ImmediateError, - ), - None => { - log::warn!( - target: LOG_TARGET, - "Trying to send warp sync request when no protocol is configured {request:?}", - ); - }, - } - } - - fn on_block_response( + pub(crate) fn on_block_response( &mut self, peer_id: PeerId, request: BlockRequest<B>, blocks: Vec<BlockData<B>>, - ) -> Option<ImportResult<B>> { + ) -> Option<(PeerId, BlockRequest<B>)> { let block_response = BlockResponse::<B> { id: request.id, blocks }; let blocks_range = || match ( @@ -1741,10 +1539,7 @@ where self.import_blocks(origin, blocks); None }, - Ok(OnBlockData::Request(peer, req)) => { - self.send_block_request(peer, req); - None - }, + Ok(OnBlockData::Request(peer, req)) => Some((peer, req)), Ok(OnBlockData::Continue) => None, Err(BadPeer(id, repu)) => { self.network_service @@ -1756,20 +1551,14 @@ where } } - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> Option<ImportResult<B>> { + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - Some(ImportResult::BlockImport(origin, vec![block])), - Ok(OnStateData::Continue) => None, + Ok(OnStateData::Import(origin, block)) => self.import_blocks(origin, vec![block]), + Ok(OnStateData::Continue) => {}, Err(BadPeer(id, repu)) => { self.network_service .disconnect_peer(id, self.block_announce_protocol_name.clone()); self.network_service.report_peer(id, repu); - None }, } } @@ -1782,165 +1571,10 @@ where } } - fn process_outbound_requests(&mut self) { - for (id, request) in self.block_requests() { - self.send_block_request(id, request); - } - - if let Some((id, request)) = self.state_request() { - self.send_state_request(id, request); - } - - for (id, request) in self.justification_requests().collect::<Vec<_>>() { - self.send_block_request(id, request); - } - - if let Some((id, request)) = self.warp_sync_request() { - self.send_warp_sync_request(id, request); - } - } - - fn poll_pending_responses(&mut self, cx: &mut std::task::Context) -> Poll<ImportResult<B>> { - let ready_responses = self - .pending_responses - .values_mut() - .filter_map(|future| match future.poll_unpin(cx) { - Poll::Pending => None, - Poll::Ready(result) => Some(result), - }) - .collect::<Vec<_>>(); - - for (id, request, response) in ready_responses { - self.pending_responses - .remove(&id) - .expect("Logic error: peer id from pending response is missing in the map."); - - match response { - Ok(Ok(resp)) => match request { - PeerRequest::Block(req) => { - match self.block_downloader.block_response_into_blocks(&req, resp) { - Ok(blocks) => { - if let Some(import) = self.on_block_response(id, req, blocks) { - return Poll::Ready(import) - } - }, - Err(BlockResponseError::DecodeFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to decode block response from peer {:?}: {:?}.", - id, - e - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - continue - }, - Err(BlockResponseError::ExtractionFailed(e)) => { - debug!( - target: LOG_TARGET, - "Failed to extract blocks from peer response {:?}: {:?}.", - id, - e - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - continue - }, - } - }, - PeerRequest::State => { - let response = match Self::decode_state_response(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!( - target: LOG_TARGET, - "Failed to decode state response from peer {id:?}: {e:?}.", - ); - self.network_service.report_peer(id, rep::BAD_MESSAGE); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - continue - }, - }; - - if let Some(import) = self.on_state_response(id, response) { - return Poll::Ready(import) - } - }, - PeerRequest::WarpProof => { - self.on_warp_sync_response(id, EncodedProof(resp)); - }, - }, - Ok(Err(e)) => { - debug!(target: LOG_TARGET, "Request to peer {id:?} failed: {e:?}."); - - match e { - RequestFailure::Network(OutboundFailure::Timeout) => { - self.network_service.report_peer(id, rep::TIMEOUT); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { - self.network_service.report_peer(id, rep::BAD_PROTOCOL); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::DialFailure) => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Refused => { - self.network_service.report_peer(id, rep::REFUSED); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::Network(OutboundFailure::ConnectionClosed) | - RequestFailure::NotConnected => { - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - RequestFailure::UnknownProtocol => { - debug_assert!(false, "Block request protocol should always be known."); - }, - RequestFailure::Obsolete => { - debug_assert!( - false, - "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", - ); - }, - } - }, - Err(oneshot::Canceled) => { - trace!( - target: LOG_TARGET, - "Request to peer {id:?} failed due to oneshot being canceled.", - ); - self.network_service - .disconnect_peer(id, self.block_announce_protocol_name.clone()); - }, - } - } - - Poll::Pending - } - - fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result<Vec<u8>, String> { - let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| { - "Failed to downcast opaque state response during encoding, this is an \ - implementation bug." - .to_string() - })?; - - Ok(request.encode_to_vec()) - } - - fn justification_requests<'a>( - &'a mut self, - ) -> Box<dyn Iterator<Item = (PeerId, BlockRequest<B>)> + 'a> { + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest<B>)> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); - Box::new(std::iter::from_fn(move || { + std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(peers) { peers .get_mut(&peer) @@ -1959,7 +1593,8 @@ where } else { None } - })) + }) + .collect() } fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest<B>)> { @@ -2086,7 +1721,6 @@ where } }) .collect() - // Box::new(iter) } fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { @@ -2288,13 +1922,14 @@ where /// A batch of blocks have been processed, with or without errors. /// /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. + /// queue, with or without errors. If an error is returned, the pending response + /// from the peer must be dropped. fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>, - ) -> Box<dyn Iterator<Item = Result<(PeerId, BlockRequest<B>), BadPeer>>> { + ) -> Box<dyn Iterator<Item = Result<BlockRequestEvent<B>, BadPeer>>> { trace!(target: LOG_TARGET, "Imported {imported} of {count}"); let mut output = Vec::new(); @@ -2799,13 +2434,10 @@ fn validate_blocks<Block: BlockT>( #[cfg(test)] mod test { use super::*; - use crate::{mock::MockBlockDownloader, service::network::NetworkServiceProvider}; + use crate::service::network::NetworkServiceProvider; use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider; - use sc_network_common::{ - role::Role, - sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}, - }; + use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock}; use sp_blockchain::HeaderBackend; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, @@ -2825,21 +2457,16 @@ mod test { let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -2857,7 +2484,8 @@ mod test { // the justification request should be scheduled to that peer assert!(sync .justification_requests() - .any(|(who, request)| { who == peer_id && request.from == FromBlock::Hash(a1_hash) })); + .iter() + .any(|(who, request)| { *who == peer_id && request.from == FromBlock::Hash(a1_hash) })); // there are no extra pending requests assert_eq!(sync.extra_justifications.pending_requests().count(), 0); @@ -2891,21 +2519,16 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -2944,8 +2567,8 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block - assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 && + assert!(sync.justification_requests().iter().any(|(p, r)| { + *p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && r.from == FromBlock::Hash(b1_hash) })); @@ -2959,9 +2582,11 @@ mod test { let block_requests = sync.restart(); // which should make us send out block requests to the first two peers - assert!(block_requests - .map(|r| r.unwrap()) - .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { + BlockRequestEvent::SendRequest { peer_id, .. } => + peer_id == peer_id1 || peer_id == peer_id2, + BlockRequestEvent::RemoveStale { .. } => false, + })); // peer 3 should be unaffected it was downloading finality data assert_eq!( @@ -3065,21 +2690,16 @@ mod test { let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3191,21 +2811,16 @@ mod test { NetworkServiceProvider::new(); let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3348,21 +2963,16 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3490,21 +3100,16 @@ mod test { let info = client.info(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 5, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3634,21 +3239,16 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::<Vec<_>>(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3679,21 +3279,16 @@ mod test { let empty_client = Arc::new(TestClientBuilder::new().build()); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, empty_client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3731,21 +3326,16 @@ mod test { let import_queue = Box::new(sc_consensus::import_queue::mock::MockImportQueueHandle::new()); let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); - let (mut sync, _) = ChainSync::new( + let mut sync = ChainSync::new( SyncMode::Full, client.clone(), - ProtocolId::from("test-protocol-name"), - &Some(String::from("test-fork-id")), - Roles::from(&Role::Full), + ProtocolName::from("test-block-announce-protocol"), 1, 64, None, None, chain_sync_network_handle, import_queue, - Arc::new(MockBlockDownloader::new()), - ProtocolName::from("state-request"), - None, ) .unwrap(); @@ -3766,10 +3356,14 @@ mod test { // add new peer and request blocks from them sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + // we don't actually perform any requests, just keep track of peers waiting for a response + let mut pending_responses = HashSet::new(); + // we wil send block requests to these peers // for these blocks we don't know about - for (peer, request) in sync.block_requests() { - sync.send_block_request(peer, request); + for (peer, _request) in sync.block_requests() { + // "send" request + pending_responses.insert(peer); } // add a new peer at a known block @@ -3780,10 +3374,11 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block - let mut requests = sync.justification_requests().collect::<Vec<_>>(); + let mut requests = sync.justification_requests(); assert_eq!(requests.len(), 1); - let (peer, request) = requests.remove(0); - sync.send_block_request(peer, request); + let (peer, _request) = requests.remove(0); + // "send" request + assert!(pending_responses.insert(peer)); assert!(!std::matches!( sync.peers.get(&peers[0]).unwrap().state, @@ -3793,18 +3388,37 @@ mod test { sync.peers.get(&peers[1]).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); - assert_eq!(sync.pending_responses.len(), 2); - - let requests = sync.restart().collect::<Vec<_>>(); - assert!(requests.iter().any(|res| res.as_ref().unwrap().0 == peers[0])); + assert_eq!(pending_responses.len(), 2); + + // restart sync + let request_events = sync.restart().collect::<Vec<_>>(); + for event in request_events.iter() { + match event.as_ref().unwrap() { + BlockRequestEvent::RemoveStale { peer_id } => { + pending_responses.remove(&peer_id); + }, + BlockRequestEvent::SendRequest { peer_id, .. } => { + // we drop obsolete response, but don't register a new request, it's checked in + // the `assert!` below + pending_responses.remove(&peer_id); + }, + } + } + assert!(request_events.iter().any(|event| { + match event.as_ref().unwrap() { + BlockRequestEvent::RemoveStale { .. } => false, + BlockRequestEvent::SendRequest { peer_id, .. } => peer_id == &peers[0], + } + })); - assert_eq!(sync.pending_responses.len(), 1); - assert!(sync.pending_responses.get(&peers[1]).is_some()); + assert_eq!(pending_responses.len(), 1); + assert!(pending_responses.contains(&peers[1])); assert_eq!( sync.peers.get(&peers[1]).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); sync.peer_disconnected(&peers[1]); - assert_eq!(sync.pending_responses.len(), 0); + pending_responses.remove(&peers[1]); + assert_eq!(pending_responses.len(), 0); } } diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs index 859f9fb9c54b620f67fc2729043e8a9a145d97af..b51eec8d1499ea8258b3e47daa4a943782b9f743 100644 --- a/substrate/client/network/sync/src/mock.rs +++ b/substrate/client/network/sync/src/mock.rs @@ -20,7 +20,7 @@ use crate::block_relay_protocol::{BlockDownloader as BlockDownloaderT, BlockResponseError}; -use futures::{channel::oneshot, task::Poll}; +use futures::channel::oneshot; use libp2p::PeerId; use sc_network::RequestFailure; use sc_network_common::sync::{ @@ -39,7 +39,6 @@ mockall::mock! { fn num_sync_requests(&self) -> usize; fn num_downloaded_blocks(&self) -> usize; fn num_peers(&self) -> usize; - fn num_active_peers(&self) -> usize; fn new_peer( &mut self, who: PeerId, @@ -81,15 +80,6 @@ mockall::mock! { ); fn peer_disconnected(&mut self, who: &PeerId); fn metrics(&self) -> Metrics; - fn poll<'a>( - &mut self, - cx: &mut std::task::Context<'a>, - ) -> Poll<()>; - fn send_block_request( - &mut self, - who: PeerId, - request: BlockRequest<Block>, - ); } } diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs new file mode 100644 index 0000000000000000000000000000000000000000..c863267e7808f13104eacd3d30f39987946443bf --- /dev/null +++ b/substrate/client/network/sync/src/pending_responses.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see <https://www.gnu.org/licenses/>. + +//! [`PendingResponses`] is responsible for keeping track of pending responses and +//! polling them. + +use futures::{ + channel::oneshot, + future::BoxFuture, + stream::{BoxStream, Stream}, + FutureExt, StreamExt, +}; +use libp2p::PeerId; +use log::error; +use sc_network::request_responses::RequestFailure; +use sc_network_common::sync::PeerRequest; +use sp_runtime::traits::Block as BlockT; +use std::task::{Context, Poll}; +use tokio_stream::StreamMap; + +/// Response result. +type ResponseResult = Result<Result<Vec<u8>, RequestFailure>, oneshot::Canceled>; + +/// A future yielding [`ResponseResult`]. +type ResponseFuture = BoxFuture<'static, ResponseResult>; + +/// An event we receive once a pending response future resolves. +pub(crate) struct ResponseEvent<B: BlockT> { + pub peer_id: PeerId, + pub request: PeerRequest<B>, + pub response: ResponseResult, +} + +/// Stream taking care of polling pending responses. +pub(crate) struct PendingResponses<B: BlockT> { + /// Pending responses + pending_responses: StreamMap<PeerId, BoxStream<'static, (PeerRequest<B>, ResponseResult)>>, +} + +impl<B: BlockT> PendingResponses<B> { + pub fn new() -> Self { + Self { pending_responses: StreamMap::new() } + } + + pub fn insert( + &mut self, + peer_id: PeerId, + request: PeerRequest<B>, + response_future: ResponseFuture, + ) { + let request_type = request.get_type(); + + if self + .pending_responses + .insert( + peer_id, + Box::pin(async move { (request, response_future.await) }.into_stream()), + ) + .is_some() + { + error!( + target: crate::LOG_TARGET, + "Discarded pending response from peer {peer_id}, request type: {request_type:?}.", + ); + debug_assert!(false); + } + } + + pub fn remove(&mut self, peer_id: &PeerId) -> bool { + self.pending_responses.remove(peer_id).is_some() + } + + pub fn len(&self) -> usize { + self.pending_responses.len() + } +} + +impl<B: BlockT> Unpin for PendingResponses<B> {} + +impl<B: BlockT> Stream for PendingResponses<B> { + type Item = ResponseEvent<B>; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll<Option<Self::Item>> { + match futures::ready!(self.pending_responses.poll_next_unpin(cx)) { + Some((peer_id, (request, response))) => { + // We need to manually remove the stream, because `StreamMap` doesn't know yet that + // it's going to yield `None`, so may not remove it before the next request is made + // to the same peer. + self.pending_responses.remove(&peer_id); + + Poll::Ready(Some(ResponseEvent { peer_id, request, response })) + }, + None => Poll::Ready(None), + } + } +} diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index 92b31937a0cbb1e018cacf17c5e19f418293877b..dc625c3d6c4cf44d37b865d9e022047066b34e58 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -145,6 +145,8 @@ fn hosts_filtering(enabled: bool, addrs: &[SocketAddr]) -> AllowHosts { fn build_rpc_api<M: Send + Sync + 'static>(mut rpc_api: RpcModule<M>) -> RpcModule<M> { let mut available_methods = rpc_api.method_names().collect::<Vec<_>>(); + // The "rpc_methods" is defined below and we want it to be part of the reported methods. + available_methods.push("rpc_methods"); available_methods.sort(); rpc_api diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index c93006753afbbefedef3de6b27863aab0269fbfc..f4068d1bc59406085b399ce3ce5bbb53b166cad4 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -31,7 +31,7 @@ serde = "1.0" hex = "0.4" futures = "0.3.21" parking_lot = "0.12.1" -tokio-stream = { version = "0.1", features = ["sync"] } +tokio-stream = { version = "0.1.14", features = ["sync"] } tokio = { version = "1.22.0", features = ["sync"] } array-bytes = "6.1" log = "0.4.17" diff --git a/substrate/client/rpc-spec-v2/src/archive/api.rs b/substrate/client/rpc-spec-v2/src/archive/api.rs index ca94779c887cba2dcfae25eb46113ec343a51746..0583111cb488d1eeab5a175e13c6a592b2c7df3d 100644 --- a/substrate/client/rpc-spec-v2/src/archive/api.rs +++ b/substrate/client/rpc-spec-v2/src/archive/api.rs @@ -18,6 +18,7 @@ //! API trait of the archive methods. +use crate::MethodResult; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; #[rpc(client, server)] @@ -53,4 +54,38 @@ pub trait ArchiveApi<Hash> { /// This method is unstable and subject to change in the future. #[method(name = "archive_unstable_header")] fn archive_unstable_header(&self, hash: Hash) -> RpcResult<Option<String>>; + + /// Get the height of the current finalized block. + /// + /// Returns an integer height of the current finalized block of the chain. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_finalizedHeight")] + fn archive_unstable_finalized_height(&self) -> RpcResult<u64>; + + /// Get the hashes of blocks from the given height. + /// + /// Returns an array (possibly empty) of strings containing an hexadecimal-encoded hash of a + /// block header. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_hashByHeight")] + fn archive_unstable_hash_by_height(&self, height: u64) -> RpcResult<Vec<String>>; + + /// Call into the Runtime API at a specified block's state. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "archive_unstable_call")] + fn archive_unstable_call( + &self, + hash: Hash, + function: String, + call_parameters: String, + ) -> RpcResult<MethodResult>; } diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 4fb2e5671d30044f6e609f34d5453ffce8005b84..bded842d8fd0de408b0d233af12010d41a9a7df7 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -18,20 +18,34 @@ //! API implementation for `archive`. -use super::ArchiveApiServer; -use crate::chain_head::hex_string; +use crate::{ + archive::{error::Error as ArchiveError, ArchiveApiServer}, + chain_head::hex_string, + MethodResult, +}; + use codec::Encode; use jsonrpsee::core::{async_trait, RpcResult}; -use sc_client_api::{Backend, BlockBackend, BlockchainEvents, ExecutorProvider, StorageProvider}; -use sp_api::CallApiAt; -use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; -use sp_runtime::traits::Block as BlockT; -use std::{marker::PhantomData, sync::Arc}; +use sc_client_api::{ + Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, StorageProvider, +}; +use sp_api::{CallApiAt, CallContext, NumberFor}; +use sp_blockchain::{ + Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, +}; +use sp_core::Bytes; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + SaturatedConversion, +}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; /// An API for archive RPC calls. pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> { /// Substrate client. client: Arc<Client>, + /// Backend of the chain. + backend: Arc<BE>, /// The hexadecimal encoded hash of the genesis block. genesis_hash: String, /// Phantom member to pin the block type. @@ -40,10 +54,26 @@ pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> { impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> { /// Create a new [`Archive`]. - pub fn new<GenesisHash: AsRef<[u8]>>(client: Arc<Client>, genesis_hash: GenesisHash) -> Self { + pub fn new<GenesisHash: AsRef<[u8]>>( + client: Arc<Client>, + backend: Arc<BE>, + genesis_hash: GenesisHash, + ) -> Self { let genesis_hash = hex_string(&genesis_hash.as_ref()); - Self { client, genesis_hash, _phantom: PhantomData } + Self { client, backend, genesis_hash, _phantom: PhantomData } + } +} + +/// Parse hex-encoded string parameter as raw bytes. +/// +/// If the parsing fails, returns an error propagated to the RPC method. +fn parse_hex_param(param: String) -> Result<Vec<u8>, ArchiveError> { + // Methods can accept empty parameters. + if param.is_empty() { + return Ok(Default::default()) } + + array_bytes::hex2bytes(¶m).map_err(|_| ArchiveError::InvalidParam(param)) } #[async_trait] @@ -51,6 +81,7 @@ impl<BE, Block, Client> ArchiveApiServer<Block::Hash> for Archive<BE, Block, Cli where Block: BlockT + 'static, Block::Header: Unpin, + <<Block as BlockT>::Header as HeaderT>::Number: From<u64>, BE: Backend<Block> + 'static, Client: BlockBackend<Block> + ExecutorProvider<Block> @@ -83,4 +114,75 @@ where Ok(Some(hex_string(&header.encode()))) } + + fn archive_unstable_finalized_height(&self) -> RpcResult<u64> { + Ok(self.client.info().finalized_number.saturated_into()) + } + + fn archive_unstable_hash_by_height(&self, height: u64) -> RpcResult<Vec<String>> { + let height: NumberFor<Block> = height.into(); + let finalized_num = self.client.info().finalized_number; + + if finalized_num >= height { + let Ok(Some(hash)) = self.client.block_hash(height.into()) else { return Ok(vec![]) }; + return Ok(vec![hex_string(&hash.as_ref())]) + } + + let blockchain = self.backend.blockchain(); + // Fetch all the leaves of the blockchain that are on a higher or equal height. + let mut headers: Vec<_> = blockchain + .leaves() + .map_err(|error| ArchiveError::FetchLeaves(error.to_string()))? + .into_iter() + .filter_map(|hash| { + let Ok(Some(header)) = self.client.header(hash) else { return None }; + + if header.number() < &height { + return None + } + + Some(header) + }) + .collect(); + + let mut result = Vec::new(); + let mut visited = HashSet::new(); + + while let Some(header) = headers.pop() { + if header.number() == &height { + result.push(hex_string(&header.hash().as_ref())); + continue + } + + let parent_hash = *header.parent_hash(); + + // Continue the iteration for unique hashes. + // Forks might intersect on a common chain that is not yet finalized. + if visited.insert(parent_hash) { + let Ok(Some(next_header)) = self.client.header(parent_hash) else { continue }; + headers.push(next_header); + } + } + + Ok(result) + } + + fn archive_unstable_call( + &self, + hash: Block::Hash, + function: String, + call_parameters: String, + ) -> RpcResult<MethodResult> { + let call_parameters = Bytes::from(parse_hex_param(call_parameters)?); + + let result = + self.client + .executor() + .call(hash, &function, &call_parameters, CallContext::Offchain); + + Ok(match result { + Ok(result) => MethodResult::ok(hex_string(&result)), + Err(error) => MethodResult::err(error.to_string()), + }) + } } diff --git a/substrate/client/rpc-spec-v2/src/archive/error.rs b/substrate/client/rpc-spec-v2/src/archive/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..b858212399ce713abbb76e2182f3ccd8cf8cd880 --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/archive/error.rs @@ -0,0 +1,66 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see <https://www.gnu.org/licenses/>. + +//! Error helpers for `archive` RPC module. + +use jsonrpsee::{ + core::Error as RpcError, + types::error::{CallError, ErrorObject}, +}; + +/// ChainHead RPC errors. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Invalid parameter provided to the RPC method. + #[error("Invalid parameter: {0}")] + InvalidParam(String), + /// Runtime call failed. + #[error("Runtime call: {0}")] + RuntimeCall(String), + /// Failed to fetch leaves. + #[error("Failed to fetch leaves of the chain: {0}")] + FetchLeaves(String), +} + +// Base code for all `archive` errors. +const BASE_ERROR: i32 = 3000; +/// Invalid parameter error. +const INVALID_PARAM_ERROR: i32 = BASE_ERROR + 1; +/// Runtime call error. +const RUNTIME_CALL_ERROR: i32 = BASE_ERROR + 2; +/// Failed to fetch leaves. +const FETCH_LEAVES_ERROR: i32 = BASE_ERROR + 3; + +impl From<Error> for ErrorObject<'static> { + fn from(e: Error) -> Self { + let msg = e.to_string(); + + match e { + Error::InvalidParam(_) => ErrorObject::owned(INVALID_PARAM_ERROR, msg, None::<()>), + Error::RuntimeCall(_) => ErrorObject::owned(RUNTIME_CALL_ERROR, msg, None::<()>), + Error::FetchLeaves(_) => ErrorObject::owned(FETCH_LEAVES_ERROR, msg, None::<()>), + } + .into() + } +} + +impl From<Error> for RpcError { + fn from(e: Error) -> Self { + CallError::Custom(e.into()).into() + } +} diff --git a/substrate/client/rpc-spec-v2/src/archive/mod.rs b/substrate/client/rpc-spec-v2/src/archive/mod.rs index 767f658ecd7537074be16f9d72db97821f743cca..eb7d71d702f616133ec037d75f97784b781307c7 100644 --- a/substrate/client/rpc-spec-v2/src/archive/mod.rs +++ b/substrate/client/rpc-spec-v2/src/archive/mod.rs @@ -27,5 +27,6 @@ mod tests; pub mod api; pub mod archive; +pub mod error; pub use api::ArchiveApiServer; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index bc75fc749acc06f6e9638a0d7ff29696d1f64ee3..36f7716e393cee5f46d68f63e5b495ef1f98b5f7 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -16,16 +16,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. -use crate::chain_head::hex_string; +use crate::{chain_head::hex_string, MethodResult}; use super::{archive::Archive, *}; +use assert_matches::assert_matches; use codec::{Decode, Encode}; -use jsonrpsee::{types::EmptyServerParams as EmptyParams, RpcModule}; +use jsonrpsee::{ + core::error::Error, + types::{error::CallError, EmptyServerParams as EmptyParams}, + RpcModule, +}; use sc_block_builder::BlockBuilderProvider; - +use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; +use sp_runtime::SaturatedConversion; use std::sync::Arc; +use substrate_test_runtime::Transfer; use substrate_test_runtime_client::{ prelude::*, runtime, Backend, BlockBuilderExt, Client, ClientBlockImportExt, }; @@ -38,9 +45,10 @@ type Block = substrate_test_runtime_client::runtime::Block; fn setup_api() -> (Arc<Client<Backend>>, RpcModule<Archive<Backend, Block, Client<Backend>>>) { let builder = TestClientBuilder::new(); + let backend = builder.backend(); let client = Arc::new(builder.build()); - let api = Archive::new(client.clone(), CHAIN_GENESIS).into_rpc(); + let api = Archive::new(client.clone(), backend, CHAIN_GENESIS).into_rpc(); (client, api) } @@ -111,3 +119,140 @@ async fn archive_header() { let header: Header = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(header, block.header); } + +#[tokio::test] +async fn archive_finalized_height() { + let (client, api) = setup_api(); + + let client_height: u32 = client.info().finalized_number.saturated_into(); + + let height: u32 = + api.call("archive_unstable_finalizedHeight", EmptyParams::new()).await.unwrap(); + + assert_eq!(client_height, height); +} + +#[tokio::test] +async fn archive_hash_by_height() { + let (mut client, api) = setup_api(); + + // Genesis height. + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); + assert_eq!(hashes, vec![format!("{:?}", client.genesis_hash())]); + + // Block tree: + // genesis -> finalized -> block 1 -> block 2 -> block 3 + // -> block 1 -> block 4 + // + // ^^^ h = N + // ^^^ h = N + 1 + // ^^^ h = N + 2 + let finalized = client.new_block(Default::default()).unwrap().build().unwrap().block; + let finalized_hash = finalized.header.hash(); + client.import(BlockOrigin::Own, finalized.clone()).await.unwrap(); + client.finalize_block(finalized_hash, None).unwrap(); + + let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + let block_3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_3_hash = block_3.header.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + + // Import block 4 fork. + let mut block_builder = client.new_block_at(block_1_hash, Default::default(), false).unwrap(); + // This push is required as otherwise block 3 has the same hash as block 1 and won't get + // imported + block_builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let block_4 = block_builder.build().unwrap().block; + let block_4_hash = block_4.header.hash(); + client.import(BlockOrigin::Own, block_4.clone()).await.unwrap(); + + // Check finalized height. + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [1]).await.unwrap(); + assert_eq!(hashes, vec![format!("{:?}", finalized_hash)]); + + // Test nonfinalized heights. + // Height N must include block 1. + let mut height = block_1.header.number; + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [height]).await.unwrap(); + assert_eq!(hashes, vec![format!("{:?}", block_1_hash)]); + + // Height (N + 1) must include block 2 and 4. + height += 1; + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [height]).await.unwrap(); + assert_eq!(hashes, vec![format!("{:?}", block_4_hash), format!("{:?}", block_2_hash)]); + + // Height (N + 2) must include block 3. + height += 1; + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [height]).await.unwrap(); + assert_eq!(hashes, vec![format!("{:?}", block_3_hash)]); + + // Height (N + 3) has no blocks. + height += 1; + let hashes: Vec<String> = api.call("archive_unstable_hashByHeight", [height]).await.unwrap(); + assert!(hashes.is_empty()); +} + +#[tokio::test] +async fn archive_call() { + let (mut client, api) = setup_api(); + let invalid_hash = hex_string(&INVALID_HASH); + + // Invalid parameter (non-hex). + let err = api + .call::<_, serde_json::Value>( + "archive_unstable_call", + [&invalid_hash, "BabeApi_current_epoch", "0x00X"], + ) + .await + .unwrap_err(); + assert_matches!(err, Error::Call(CallError::Custom(ref err)) if err.code() == 3001 && err.message().contains("Invalid parameter")); + + // Pass an invalid parameters that cannot be decode. + let err = api + .call::<_, serde_json::Value>( + "archive_unstable_call", + // 0x0 is invalid. + [&invalid_hash, "BabeApi_current_epoch", "0x0"], + ) + .await + .unwrap_err(); + assert_matches!(err, Error::Call(CallError::Custom(ref err)) if err.code() == 3001 && err.message().contains("Invalid parameter")); + + // Invalid hash. + let result: MethodResult = api + .call("archive_unstable_call", [&invalid_hash, "BabeApi_current_epoch", "0x00"]) + .await + .unwrap(); + assert_matches!(result, MethodResult::Err(_)); + + let block_1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + // Valid call. + let alice_id = AccountKeyring::Alice.to_account_id(); + // Hex encoded scale encoded bytes representing the call parameters. + let call_parameters = hex_string(&alice_id.encode()); + let result: MethodResult = api + .call( + "archive_unstable_call", + [&format!("{:?}", block_1_hash), "AccountNonceApi_account_nonce", &call_parameters], + ) + .await + .unwrap(); + let expected = MethodResult::ok("0x0000000000000000"); + assert_eq!(result, expected); +} diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 7095548a2b16cd518e874a86acd44cc0fad26b73..6e19f59a5d68747dce1c9a9506dc4f36e662a5d3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -71,8 +71,10 @@ impl<Client, Block, BE> ChainHeadStorage<Client, Block, BE> { /// Query to iterate over storage. struct QueryIter { - /// The next key from which the iteration should continue. - next_key: StorageKey, + /// The key from which the iteration was started. + query_key: StorageKey, + /// The key after which pagination should resume. + pagination_start_key: Option<StorageKey>, /// The type of the query (either value or hash). ty: IterQueryType, } @@ -184,20 +186,27 @@ where hash: Block::Hash, child_key: Option<&ChildInfo>, ) -> QueryIterResult { - let QueryIter { next_key, ty } = query; + let QueryIter { ty, query_key, pagination_start_key } = query; let mut keys_iter = if let Some(child_key) = child_key { - self.client - .child_storage_keys(hash, child_key.to_owned(), Some(&next_key), None) + self.client.child_storage_keys( + hash, + child_key.to_owned(), + Some(&query_key), + pagination_start_key.as_ref(), + ) } else { - self.client.storage_keys(hash, Some(&next_key), None) + self.client.storage_keys(hash, Some(&query_key), pagination_start_key.as_ref()) } .map_err(|err| err.to_string())?; let mut ret = Vec::with_capacity(self.operation_max_storage_items); + let mut next_pagination_key = None; for _ in 0..self.operation_max_storage_items { let Some(key) = keys_iter.next() else { break }; + next_pagination_key = Some(key.clone()); + let result = match ty { IterQueryType::Value => self.query_storage_value(hash, &key, child_key), IterQueryType::Hash => self.query_storage_hash(hash, &key, child_key), @@ -209,7 +218,11 @@ where } // Save the next key if any to continue the iteration. - let maybe_next_query = keys_iter.next().map(|next_key| QueryIter { next_key, ty }); + let maybe_next_query = keys_iter.next().map(|_| QueryIter { + ty, + query_key, + pagination_start_key: next_pagination_key, + }); Ok((ret, maybe_next_query)) } @@ -325,12 +338,16 @@ where return }, }, - StorageQueryType::DescendantsValues => self - .iter_operations - .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Value }), - StorageQueryType::DescendantsHashes => self - .iter_operations - .push_back(QueryIter { next_key: item.key, ty: IterQueryType::Hash }), + StorageQueryType::DescendantsValues => self.iter_operations.push_back(QueryIter { + query_key: item.key, + ty: IterQueryType::Value, + pagination_start_key: None, + }), + StorageQueryType::DescendantsHashes => self.iter_operations.push_back(QueryIter { + query_key: item.key, + ty: IterQueryType::Hash, + pagination_start_key: None, + }), }; } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 3ab47991c4e551456241707d4ad7ef828b042ce7..1d5b45260a22df9943383d589d2f3b3473fac51e 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -2352,6 +2352,7 @@ async fn check_continue_operation() { builder.push_storage_change(b":m".to_vec(), Some(b"a".to_vec())).unwrap(); builder.push_storage_change(b":mo".to_vec(), Some(b"ab".to_vec())).unwrap(); builder.push_storage_change(b":moc".to_vec(), Some(b"abc".to_vec())).unwrap(); + builder.push_storage_change(b":moD".to_vec(), Some(b"abcmoD".to_vec())).unwrap(); builder.push_storage_change(b":mock".to_vec(), Some(b"abcd".to_vec())).unwrap(); let block = builder.build().unwrap().block; let block_hash = format!("{:?}", block.header.hash()); @@ -2430,6 +2431,25 @@ async fn check_continue_operation() { res.items[0].result == StorageResultType::Value(hex_string(b"ab")) ); + // Pagination event. + assert_matches!( + get_next_event::<FollowEvent<String>>(&mut sub).await, + FollowEvent::OperationWaitingForContinue(res) if res.operation_id == operation_id + ); + does_not_produce_event::<FollowEvent<String>>( + &mut sub, + std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), + ) + .await; + let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + assert_matches!( + get_next_event::<FollowEvent<String>>(&mut sub).await, + FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && + res.items.len() == 1 && + res.items[0].key == hex_string(b":moD") && + res.items[0].result == StorageResultType::Value(hex_string(b"abcmoD")) + ); + // Pagination event. assert_matches!( get_next_event::<FollowEvent<String>>(&mut sub).await, diff --git a/substrate/client/rpc-spec-v2/src/lib.rs b/substrate/client/rpc-spec-v2/src/lib.rs index 9a455c5984a5f758c7b0b274cdf0c89be2685163..d202bfef4a74ced5a9f32d50705d596a4403db42 100644 --- a/substrate/client/rpc-spec-v2/src/lib.rs +++ b/substrate/client/rpc-spec-v2/src/lib.rs @@ -23,6 +23,8 @@ #![warn(missing_docs)] #![deny(unused_crate_dependencies)] +use serde::{Deserialize, Serialize}; + pub mod archive; pub mod chain_head; pub mod chain_spec; @@ -30,3 +32,74 @@ pub mod transaction; /// Task executor that is being used by RPC subscriptions. pub type SubscriptionTaskExecutor = std::sync::Arc<dyn sp_core::traits::SpawnNamed>; + +/// The result of an RPC method. +#[derive(Debug, Deserialize, Serialize, PartialEq)] +#[serde(untagged)] +pub enum MethodResult { + /// Method generated a result. + Ok(MethodResultOk), + /// Method ecountered an error. + Err(MethodResultErr), +} + +impl MethodResult { + /// Constructs a successful result. + pub fn ok(result: impl Into<String>) -> MethodResult { + MethodResult::Ok(MethodResultOk { success: true, result: result.into() }) + } + + /// Constructs an error result. + pub fn err(error: impl Into<String>) -> MethodResult { + MethodResult::Err(MethodResultErr { success: false, error: error.into() }) + } +} + +/// The successful result of an RPC method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MethodResultOk { + /// Method was successful. + success: bool, + /// The result of the method. + pub result: String, +} + +/// The error result of an RPC method. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MethodResultErr { + /// Method encountered an error. + success: bool, + /// The error of the method. + pub error: String, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn method_result_ok() { + let ok = MethodResult::ok("hello"); + + let ser = serde_json::to_string(&ok).unwrap(); + let exp = r#"{"success":true,"result":"hello"}"#; + assert_eq!(ser, exp); + + let ok_dec: MethodResult = serde_json::from_str(exp).unwrap(); + assert_eq!(ok_dec, ok); + } + + #[test] + fn method_result_error() { + let ok = MethodResult::err("hello"); + + let ser = serde_json::to_string(&ok).unwrap(); + let exp = r#"{"success":false,"error":"hello"}"#; + assert_eq!(ser, exp); + + let ok_dec: MethodResult = serde_json::from_str(exp).unwrap(); + assert_eq!(ok_dec, ok); + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 44f4bd36c8b8b655d9207f8687c5ad41144a8cda..fe16310aeffa126cb66ababe68c58f445d9611fe 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -46,7 +46,7 @@ use std::sync::Arc; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_runtime::{generic, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use codec::Decode; use futures::{FutureExt, StreamExt, TryFutureExt}; @@ -110,11 +110,7 @@ where let submit = self .pool - .submit_and_watch( - &generic::BlockId::hash(best_block_hash), - TX_SOURCE, - decoded_extrinsic, - ) + .submit_and_watch(best_block_hash, TX_SOURCE, decoded_extrinsic) .map_err(|e| { e.into_pool_error() .map(Error::from) diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index feee22641ef3469fa23d8b5bc048e985bf1f7bd7..55d0a504aa678b82a3327683b5d417d61c8ad4c5 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -41,7 +41,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_keystore::{KeystoreExt, KeystorePtr}; -use sp_runtime::{generic, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use sp_session::SessionKeys; use self::error::{Error, Result}; @@ -97,15 +97,12 @@ where Err(err) => return Err(Error::Client(Box::new(err)).into()), }; let best_block_hash = self.client.info().best_hash; - self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .await - .map_err(|e| { - e.into_pool_error() - .map(|e| Error::Pool(e)) - .unwrap_or_else(|e| Error::Verification(Box::new(e))) - .into() - }) + self.pool.submit_one(best_block_hash, TX_SOURCE, xt).await.map_err(|e| { + e.into_pool_error() + .map(|e| Error::Pool(e)) + .unwrap_or_else(|e| Error::Verification(Box::new(e))) + .into() + }) } fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> RpcResult<()> { @@ -191,14 +188,11 @@ where }, }; - let submit = self - .pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .map_err(|e| { - e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }); + let submit = self.pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) + }); let fut = async move { let stream = match submit.await { diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index ccf23bc8994b344fb66ac658b0f47ac1d8ad5a50..2386bebf24d26d9ec98ab51b1bdad7d078efc734 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -80,7 +80,7 @@ tracing-futures = { version = "0.2.4" } async-trait = "0.1.57" tokio = { version = "1.22.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" -directories = "4.0.1" +directories = "5.0.1" static_init = "1.0.3" [dev-dependencies] diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 0d927880d9d7824c72cc0af756d2191873eb3706..54b37eb8bf41dfdaa839a87691a0c0ee3e733925 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -487,8 +487,7 @@ where CallExecutor::runtime_version(&self.executor, hash) } - /// Apply a checked and validated block to an operation. If a justification is provided - /// then `finalized` *must* be true. + /// Apply a checked and validated block to an operation. fn apply_block( &self, operation: &mut ClientImportOperation<Block, B>, @@ -1769,8 +1768,7 @@ where { type Error = ConsensusError; - /// Import a checked and validated block. If a justification is provided in - /// `BlockImportParams` then `finalized` *must* be true. + /// Import a checked and validated block. /// /// NOTE: only use this implementation when there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index cd720e1c1e096671a07fd849be40071cdfbea979..ff9eb982b862f9ebb36132c730835dc2087ef291 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -48,10 +48,7 @@ use sc_network_sync::SyncingService; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_consensus::SyncOracle; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ @@ -481,10 +478,8 @@ where }, }; - let best_block_id = BlockId::hash(self.client.info().best_hash); - let import_future = self.pool.submit_one( - &best_block_id, + self.client.info().best_hash, sc_transaction_pool_api::TransactionSource::External, uxt, ); @@ -549,10 +544,9 @@ mod tests { to: AccountKeyring::Bob.into(), } .into_unchecked_extrinsic(); - block_on(pool.submit_one(&BlockId::hash(best.hash()), source, transaction.clone())) - .unwrap(); + block_on(pool.submit_one(best.hash(), source, transaction.clone())).unwrap(); block_on(pool.submit_one( - &BlockId::hash(best.hash()), + best.hash(), source, ExtrinsicBuilder::new_call_do_not_propagate().nonce(1).build(), )) diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 99a0a79ce02a34937740941e95e4d619dcca61dc..087794affc5c63171e5e496363f82b05571133bb 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -34,7 +34,6 @@ use sc_service::{ RuntimeGenesis, SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; -use sp_api::BlockId; use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, task::Context, time::Duration}; @@ -502,15 +501,13 @@ pub fn sync<G, E, Fb, F, B, ExF, U>( info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); let first_user_data = &network.full_nodes[0].2; - let best_block = BlockId::number(first_service.client().info().best_number); + let best_block = first_service.client().info().best_hash; let extrinsic = extrinsic_factory(&first_service, first_user_data); let source = sc_transaction_pool_api::TransactionSource::External; - futures::executor::block_on(first_service.transaction_pool().submit_one( - &best_block, - source, - extrinsic, - )) + futures::executor::block_on( + first_service.transaction_pool().submit_one(best_block, source, extrinsic), + ) .expect("failed to submit extrinsic"); network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 73cc513708d2d8e55f85ac2b5c35e861c3383a65..a795917528f9cca0831068fc77dd346fd2390838 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -26,10 +26,7 @@ use codec::Codec; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_core::offchain::TransactionPoolExt; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Member, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, Member, NumberFor}; use std::{collections::HashMap, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc}; const LOG_TARGET: &str = "txpool::api"; @@ -202,7 +199,7 @@ pub trait TransactionPool: Send + Sync { /// Returns a future that imports a bunch of unverified transactions to the pool. fn submit_at( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xts: Vec<TransactionFor<Self>>, ) -> PoolFuture<Vec<Result<TxHash<Self>, Self::Error>>, Self::Error>; @@ -210,7 +207,7 @@ pub trait TransactionPool: Send + Sync { /// Returns a future that imports one unverified transaction to the pool. fn submit_one( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xt: TransactionFor<Self>, ) -> PoolFuture<TxHash<Self>, Self::Error>; @@ -219,7 +216,7 @@ pub trait TransactionPool: Send + Sync { /// pool. fn submit_and_watch( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xt: TransactionFor<Self>, ) -> PoolFuture<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error>; diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index d114acc343d500909ac3c64cae8580bcad922c66..0caf00bf29551d009d7c9164a9ad2067040de198 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -33,6 +33,7 @@ use sp_runtime::{ ValidTransaction, }, }; +use std::sync::Arc; use substrate_test_runtime::{AccountId, Block, Extrinsic, ExtrinsicBuilder, TransferData, H256}; #[derive(Clone, Debug, Default)] @@ -61,7 +62,7 @@ impl ChainApi for TestApi { fn validate_transaction( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, _source: TransactionSource, uxt: <Self::Block as BlockT>::Extrinsic, ) -> Self::ValidationFuture { @@ -70,7 +71,7 @@ impl ChainApi for TestApi { let nonce = transfer.nonce; let from = transfer.from; - match self.block_id_to_number(at) { + match self.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), _ => {}, } @@ -94,6 +95,8 @@ impl ChainApi for TestApi { ) -> Result<Option<NumberFor<Self::Block>>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(*num), + BlockId::Hash(hash) if *hash == H256::from_low_u64_be(hash.to_low_u64_be()) => + Some(hash.to_low_u64_be()), BlockId::Hash(_) => None, }) } @@ -104,7 +107,7 @@ impl ChainApi for TestApi { ) -> Result<Option<<Self::Block as BlockT>::Hash>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, + BlockId::Hash(hash) => Some(*hash), }) } @@ -137,7 +140,7 @@ fn uxt(transfer: TransferData) -> Extrinsic { ExtrinsicBuilder::new_bench_call(transfer).build() } -fn bench_configured(pool: Pool<TestApi>, number: u64) { +fn bench_configured(pool: Pool<TestApi>, number: u64, api: Arc<TestApi>) { let source = TransactionSource::External; let mut futures = Vec::new(); let mut tags = Vec::new(); @@ -151,7 +154,12 @@ fn bench_configured(pool: Pool<TestApi>, number: u64) { }); tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&BlockId::Number(1), source, xt)); + + futures.push(pool.submit_one( + api.block_id_to_hash(&BlockId::Number(1)).unwrap().unwrap(), + source, + xt, + )); } let res = block_on(futures::future::join_all(futures.into_iter())); @@ -162,7 +170,12 @@ fn bench_configured(pool: Pool<TestApi>, number: u64) { // Prune all transactions. let block_num = 6; - block_on(pool.prune_tags(&BlockId::Number(block_num), tags, vec![])).expect("Prune failed"); + block_on(pool.prune_tags( + api.block_id_to_hash(&BlockId::Number(block_num)).unwrap().unwrap(), + tags, + vec![], + )) + .expect("Prune failed"); // pool is empty assert_eq!(pool.validated_pool().status().ready, 0); @@ -172,19 +185,15 @@ fn bench_configured(pool: Pool<TestApi>, number: u64) { fn benchmark_main(c: &mut Criterion) { c.bench_function("sequential 50 tx", |b| { b.iter(|| { - bench_configured( - Pool::new(Default::default(), true.into(), TestApi::new_dependant().into()), - 50, - ); + let api = Arc::from(TestApi::new_dependant()); + bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api); }); }); c.bench_function("random 100 tx", |b| { b.iter(|| { - bench_configured( - Pool::new(Default::default(), true.into(), TestApi::default().into()), - 100, - ); + let api = Arc::from(TestApi::default()); + bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api); }); }); } diff --git a/substrate/client/transaction-pool/src/api.rs b/substrate/client/transaction-pool/src/api.rs index 871d8e9c81707b9fd3454ce201f898de692f76df..cccaad7c89949cb82e7f7576edf11dc48baab41c 100644 --- a/substrate/client/transaction-pool/src/api.rs +++ b/substrate/client/transaction-pool/src/api.rs @@ -133,13 +133,12 @@ where fn validate_transaction( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor<Self>, ) -> Self::ValidationFuture { let (tx, rx) = oneshot::channel(); let client = self.client.clone(); - let at = *at; let validation_pool = self.validation_pool.clone(); let metrics = self.metrics.clone(); @@ -151,7 +150,7 @@ where .await .send( async move { - let res = validate_transaction_blocking(&*client, &at, source, uxt); + let res = validate_transaction_blocking(&*client, at, source, uxt); let _ = tx.send(res); metrics.report(|m| m.validations_finished.inc()); } @@ -209,7 +208,7 @@ where /// This method will call into the runtime to perform the validation. fn validate_transaction_blocking<Client, Block>( client: &Client, - at: &BlockId<Block>, + at: Block::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor<FullChainApi<Client, Block>>, ) -> error::Result<TransactionValidity> @@ -225,14 +224,10 @@ where { sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { - let block_hash = client.to_hash(at) - .map_err(|e| Error::RuntimeApi(e.to_string()))? - .ok_or_else(|| Error::RuntimeApi(format!("Could not get hash for block `{:?}`.", at)))?; - let runtime_api = client.runtime_api(); let api_version = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; runtime_api - .api_version::<dyn TaggedTransactionQueue<Block>>(block_hash) + .api_version::<dyn TaggedTransactionQueue<Block>>(at) .map_err(|e| Error::RuntimeApi(e.to_string()))? .ok_or_else(|| Error::RuntimeApi( format!("Could not find `TaggedTransactionQueue` api for block `{:?}`.", at) @@ -245,31 +240,31 @@ where sp_tracing::Level::TRACE, "runtime::validate_transaction"; { if api_version >= 3 { - runtime_api.validate_transaction(block_hash, source, uxt, block_hash) + runtime_api.validate_transaction(at, source, uxt, at) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { - let block_number = client.to_number(at) + let block_number = client.to_number(&BlockId::Hash(at)) .map_err(|e| Error::RuntimeApi(e.to_string()))? .ok_or_else(|| Error::RuntimeApi(format!("Could not get number for block `{:?}`.", at)) )?; // The old versions require us to call `initialize_block` before. - runtime_api.initialize_block(block_hash, &sp_runtime::traits::Header::new( + runtime_api.initialize_block(at, &sp_runtime::traits::Header::new( block_number + sp_runtime::traits::One::one(), Default::default(), Default::default(), - block_hash, + at, Default::default()), ).map_err(|e| Error::RuntimeApi(e.to_string()))?; if api_version == 2 { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_3(block_hash, source, uxt) + runtime_api.validate_transaction_before_version_3(at, source, uxt) .map_err(|e| Error::RuntimeApi(e.to_string())) } else { #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(block_hash, uxt) + runtime_api.validate_transaction_before_version_2(at, uxt) .map_err(|e| Error::RuntimeApi(e.to_string())) } } @@ -294,7 +289,7 @@ where /// the runtime locally. pub fn validate_transaction_blocking( &self, - at: &BlockId<Block>, + at: Block::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor<Self>, ) -> error::Result<TransactionValidity> { diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 4d34737a7ba704d72aaad67de60f6925499db6c3..5305b5f1c12ebe99668413f87e8becfe4ba45e82 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -71,7 +71,7 @@ pub trait ChainApi: Send + Sync { /// Verify extrinsic at given block. fn validate_transaction( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, uxt: ExtrinsicFor<Self>, ) -> Self::ValidationFuture; @@ -154,7 +154,7 @@ impl<B: ChainApi> Pool<B> { /// Imports a bunch of unverified extrinsics to the pool pub async fn submit_at( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, source: TransactionSource, xts: impl IntoIterator<Item = ExtrinsicFor<B>>, ) -> Result<Vec<Result<ExtrinsicHash<B>, B::Error>>, B::Error> { @@ -168,7 +168,7 @@ impl<B: ChainApi> Pool<B> { /// This does not check if a transaction is banned, before we verify it again. pub async fn resubmit_at( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, source: TransactionSource, xts: impl IntoIterator<Item = ExtrinsicFor<B>>, ) -> Result<Vec<Result<ExtrinsicHash<B>, B::Error>>, B::Error> { @@ -180,7 +180,7 @@ impl<B: ChainApi> Pool<B> { /// Imports one unverified extrinsic to the pool pub async fn submit_one( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, source: TransactionSource, xt: ExtrinsicFor<B>, ) -> Result<ExtrinsicHash<B>, B::Error> { @@ -191,11 +191,11 @@ impl<B: ChainApi> Pool<B> { /// Import a single extrinsic and starts to watch its progress in the pool. pub async fn submit_and_watch( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, source: TransactionSource, xt: ExtrinsicFor<B>, ) -> Result<Watcher<ExtrinsicHash<B>, ExtrinsicHash<B>>, B::Error> { - let block_number = self.resolve_block_number(at)?; + let block_number = self.resolve_block_number(&BlockId::Hash(at))?; let (_, tx) = self .verify_one(at, block_number, source, xt, CheckBannedBeforeVerify::Yes) .await; @@ -246,8 +246,8 @@ impl<B: ChainApi> Pool<B> { /// their provided tags from there. Otherwise we query the runtime at the `parent` block. pub async fn prune( &self, - at: &BlockId<B::Block>, - parent: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, + parent: <B::Block as BlockT>::Hash, extrinsics: &[ExtrinsicFor<B>], ) -> Result<(), B::Error> { log::debug!( @@ -324,7 +324,7 @@ impl<B: ChainApi> Pool<B> { /// prevent importing them in the (near) future. pub async fn prune_tags( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, tags: impl IntoIterator<Item = Tag>, known_imported_hashes: impl IntoIterator<Item = ExtrinsicHash<B>> + Clone, ) -> Result<(), B::Error> { @@ -351,7 +351,7 @@ impl<B: ChainApi> Pool<B> { // And finally - submit reverified transactions back to the pool self.validated_pool.resubmit_pruned( - at, + &BlockId::Hash(at), known_imported_hashes, pruned_hashes, reverified_transactions.into_values().collect(), @@ -373,12 +373,12 @@ impl<B: ChainApi> Pool<B> { /// Returns future that validates a bunch of transactions at given block. async fn verify( &self, - at: &BlockId<B::Block>, + at: <B::Block as BlockT>::Hash, xts: impl IntoIterator<Item = (TransactionSource, ExtrinsicFor<B>)>, check: CheckBannedBeforeVerify, ) -> Result<HashMap<ExtrinsicHash<B>, ValidatedTransactionFor<B>>, B::Error> { // we need a block number to compute tx validity - let block_number = self.resolve_block_number(at)?; + let block_number = self.resolve_block_number(&BlockId::Hash(at))?; let res = futures::future::join_all( xts.into_iter() @@ -394,7 +394,7 @@ impl<B: ChainApi> Pool<B> { /// Returns future that validates single transaction at given block. async fn verify_one( &self, - block_id: &BlockId<B::Block>, + block_hash: <B::Block as BlockT>::Hash, block_number: NumberFor<B>, source: TransactionSource, xt: ExtrinsicFor<B>, @@ -410,7 +410,7 @@ impl<B: ChainApi> Pool<B> { let validation_result = self .validated_pool .api() - .validate_transaction(block_id, source, xt.clone()) + .validate_transaction(block_hash, source, xt.clone()) .await; let status = match validation_result { @@ -458,6 +458,7 @@ mod tests { use super::{super::base_pool::Limit, *}; use crate::tests::{pool, uxt, TestApi, INVALID_NONCE}; use assert_matches::assert_matches; + use codec::Encode; use futures::executor::block_on; use parking_lot::Mutex; use sc_transaction_pool_api::TransactionStatus; @@ -471,11 +472,11 @@ mod tests { #[test] fn should_validate_and_import_transaction() { // given - let pool = pool(); + let (pool, api) = pool(); // when let hash = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -493,7 +494,7 @@ mod tests { #[test] fn should_reject_if_temporarily_banned() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -503,7 +504,7 @@ mod tests { // when pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -514,18 +515,19 @@ mod tests { #[test] fn should_reject_unactionable_transactions() { // given + let api = Arc::new(TestApi::default()); let pool = Pool::new( Default::default(), // the node does not author blocks false.into(), - TestApi::default().into(), + api.clone(), ); // after validation `IncludeData` will be set to non-propagable (validate_transaction mock) let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build(); // when - let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + let res = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt)); // then assert_matches!(res.unwrap_err(), error::Error::Unactionable); @@ -535,12 +537,13 @@ mod tests { fn should_notify_about_pool_events() { let (stream, hash0, hash1) = { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); let stream = pool.validated_pool().import_notification_stream(); // when let hash0 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -551,7 +554,7 @@ mod tests { )) .unwrap(); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -563,7 +566,7 @@ mod tests { .unwrap(); // future doesn't count let _hash = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -590,9 +593,10 @@ mod tests { #[test] fn should_clear_stale_transactions() { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -603,7 +607,7 @@ mod tests { )) .unwrap(); let hash2 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -614,7 +618,7 @@ mod tests { )) .unwrap(); let hash3 = block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -641,9 +645,9 @@ mod tests { #[test] fn should_ban_mined_transactions() { // given - let pool = pool(); + let (pool, api) = pool(); let hash1 = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -655,12 +659,12 @@ mod tests { .unwrap(); // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1])).unwrap(); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![0]], vec![hash1])) + .unwrap(); // then assert!(pool.validated_pool.is_banned(&hash1)); } - use codec::Encode; #[test] fn should_limit_futures() { @@ -678,14 +682,15 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let hash1 = block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().future, 1); // when let hash2 = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Bob.into(), @@ -709,11 +714,12 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); // when block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -732,11 +738,11 @@ mod tests { #[test] fn should_reject_transactions_with_no_provides() { // given - let pool = pool(); + let (pool, api) = pool(); // when let err = block_on(pool.submit_one( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -759,9 +765,9 @@ mod tests { #[test] fn should_trigger_ready_and_finalized() { // given - let pool = pool(); + let (pool, api) = pool(); let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -774,26 +780,25 @@ mod tests { assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); + let hash_of_block2 = api.expect_hash_from_number(2); + // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![])).unwrap(); + block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![])).unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!( - stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), - ); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); } #[test] fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given - let pool = pool(); + let (pool, api) = pool(); let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + api.expect_hash_from_number(0), SOURCE, uxt(Transfer { from: Alice.into(), @@ -806,8 +811,10 @@ mod tests { assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); + let hash_of_block2 = api.expect_hash_from_number(2); + // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![*watcher.hash()])) + block_on(pool.prune_tags(hash_of_block2, vec![vec![0u8]], vec![*watcher.hash()])) .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -815,18 +822,17 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!( - stream.next(), - Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), - ); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((hash_of_block2.into(), 0))),); } #[test] fn should_trigger_future_and_ready_after_promoted() { // given - let pool = pool(); + let (pool, api) = pool(); + let hash_of_block0 = api.expect_hash_from_number(0); + let watcher = block_on(pool.submit_and_watch( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -841,7 +847,7 @@ mod tests { // when block_on(pool.submit_one( - &BlockId::Number(0), + hash_of_block0, SOURCE, uxt(Transfer { from: Alice.into(), @@ -862,7 +868,7 @@ mod tests { #[test] fn should_trigger_invalid_and_ban() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -870,7 +876,8 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -886,7 +893,7 @@ mod tests { #[test] fn should_trigger_broadcasted() { // given - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Transfer { from: Alice.into(), to: AccountId::from_h256(H256::from_low_u64_be(2)), @@ -894,7 +901,8 @@ mod tests { nonce: 0, }); let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -916,7 +924,8 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); let xt = uxt(Transfer { from: Alice.into(), @@ -924,7 +933,9 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt)) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -934,7 +945,7 @@ mod tests { amount: 4, nonce: 1, }); - block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -951,12 +962,13 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -968,7 +980,7 @@ mod tests { amount: 4, nonce: 1, }); - let result = block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)); + let result = block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)); assert!(matches!( result, Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped) @@ -980,12 +992,15 @@ mod tests { let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; - let pool = Pool::new(options, true.into(), TestApi::default().into()); + let api = Arc::new(TestApi::default()); + let pool = Pool::new(options, true.into(), api.clone()); + + let hash_of_block0 = api.expect_hash_from_number(0); // after validation `IncludeData` will have priority set to 9001 // (validate_transaction mock) let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // after validation `Transfer` will have priority set to 4 (validate_transaction @@ -996,15 +1011,14 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + let watcher = block_on(pool.submit_and_watch(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // when // after validation `Store` will have priority set to 9001 (validate_transaction // mock) let xt = ExtrinsicBuilder::new_indexed_call(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // then @@ -1021,7 +1035,10 @@ mod tests { let (tx, rx) = std::sync::mpsc::sync_channel(1); let mut api = TestApi::default(); api.delay = Arc::new(Mutex::new(rx.into())); - let pool = Arc::new(Pool::new(Default::default(), true.into(), api.into())); + let api = Arc::new(api); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + + let hash_of_block0 = api.expect_hash_from_number(0); // when let xt = uxt(Transfer { @@ -1034,7 +1051,7 @@ mod tests { // This transaction should go to future, since we use `nonce: 1` let pool2 = pool.clone(); std::thread::spawn(move || { - block_on(pool2.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool2.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); ready.send(()).unwrap(); }); @@ -1048,12 +1065,13 @@ mod tests { }); // The tag the above transaction provides (TestApi is using just nonce as u8) let provides = vec![0_u8]; - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + block_on(pool.submit_one(hash_of_block0, SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // Now block import happens before the second transaction is able to finish // verification. - block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![provides], vec![])) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); // so when we release the verification of the previous one it will have diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index ffaab89d982360b33d67efffac1255e11f9ad1e5..faa3f455a580c8713ced21b14818da874fd972a7 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -166,8 +166,11 @@ where finalized_hash: Block::Hash, ) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); - let (revalidation_queue, background_task) = - revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); ( Self { api: pool_api, @@ -203,8 +206,11 @@ where RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { - let (queue, background) = - revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + let (queue, background) = revalidation::RevalidationQueue::new_background( + pool_api.clone(), + pool.clone(), + finalized_hash, + ); (queue, Some(background)) }, }; @@ -254,46 +260,43 @@ where fn submit_at( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xts: Vec<TransactionFor<Self>>, ) -> PoolFuture<Vec<Result<TxHash<Self>, Self::Error>>, Self::Error> { let pool = self.pool.clone(); - let at = *at; self.metrics .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); - async move { pool.submit_at(&at, source, xts).await }.boxed() + async move { pool.submit_at(at, source, xts).await }.boxed() } fn submit_one( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xt: TransactionFor<Self>, ) -> PoolFuture<TxHash<Self>, Self::Error> { let pool = self.pool.clone(); - let at = *at; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - async move { pool.submit_one(&at, source, xt).await }.boxed() + async move { pool.submit_one(at, source, xt).await }.boxed() } fn submit_and_watch( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, source: TransactionSource, xt: TransactionFor<Self>, ) -> PoolFuture<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error> { - let at = *at; let pool = self.pool.clone(); self.metrics.report(|metrics| metrics.submitted_transactions.inc()); async move { - let watcher = pool.submit_and_watch(&at, source, xt).await?; + let watcher = pool.submit_and_watch(at, source, xt).await?; Ok(watcher.into_stream().boxed()) } @@ -433,11 +436,7 @@ where let validity = self .api - .validate_transaction_blocking( - &BlockId::hash(at), - TransactionSource::Local, - xt.clone(), - )? + .validate_transaction_blocking(at, TransactionSource::Local, xt.clone())? .map_err(|e| { Self::Error::Pool(match e { TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), @@ -577,10 +576,7 @@ async fn prune_known_txs_for_block<Block: BlockT, Api: graph::ChainApi<Block = B }, }; - if let Err(e) = pool - .prune(&BlockId::Hash(block_hash), &BlockId::hash(*header.parent_hash()), &extrinsics) - .await - { + if let Err(e) = pool.prune(block_hash, *header.parent_hash(), &extrinsics).await { log::error!("Cannot prune known in the pool: {}", e); } @@ -691,7 +687,7 @@ where if let Err(e) = pool .resubmit_at( - &BlockId::Hash(*hash), + *hash, // These transactions are coming from retracted blocks, we should // simply consider them external. TransactionSource::External, @@ -717,7 +713,7 @@ where if next_action.revalidate { let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); - self.revalidation_queue.revalidate_later(*block_number, hashes).await; + self.revalidation_queue.revalidate_later(*hash, hashes).await; self.revalidation_strategy.lock().clear(); } diff --git a/substrate/client/transaction-pool/src/revalidation.rs b/substrate/client/transaction-pool/src/revalidation.rs index b2c41be92eea3cdbe55bf2db5ddfe5d23d91d8bd..488ab19d8eabc3c11cb575f588400d03ffea39b0 100644 --- a/substrate/client/transaction-pool/src/revalidation.rs +++ b/substrate/client/transaction-pool/src/revalidation.rs @@ -25,14 +25,12 @@ use std::{ }; use crate::{ - graph::{ChainApi, ExtrinsicHash, NumberFor, Pool, ValidatedTransaction}, + graph::{BlockHash, ChainApi, ExtrinsicHash, Pool, ValidatedTransaction}, LOG_TARGET, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::{ - generic::BlockId, - traits::{SaturatedConversion, Zero}, - transaction_validity::TransactionValidityError, + generic::BlockId, traits::SaturatedConversion, transaction_validity::TransactionValidityError, }; use futures::prelude::*; @@ -44,7 +42,7 @@ const MIN_BACKGROUND_REVALIDATION_BATCH_SIZE: usize = 20; /// Payload from queue to worker. struct WorkerPayload<Api: ChainApi> { - at: NumberFor<Api>, + at: BlockHash<Api>, transactions: Vec<ExtrinsicHash<Api>>, } @@ -54,9 +52,9 @@ struct WorkerPayload<Api: ChainApi> { struct RevalidationWorker<Api: ChainApi> { api: Arc<Api>, pool: Arc<Pool<Api>>, - best_block: NumberFor<Api>, - block_ordered: BTreeMap<NumberFor<Api>, HashSet<ExtrinsicHash<Api>>>, - members: HashMap<ExtrinsicHash<Api>, NumberFor<Api>>, + best_block: BlockHash<Api>, + block_ordered: BTreeMap<BlockHash<Api>, HashSet<ExtrinsicHash<Api>>>, + members: HashMap<ExtrinsicHash<Api>, BlockHash<Api>>, } impl<Api: ChainApi> Unpin for RevalidationWorker<Api> {} @@ -68,15 +66,30 @@ impl<Api: ChainApi> Unpin for RevalidationWorker<Api> {} async fn batch_revalidate<Api: ChainApi>( pool: Arc<Pool<Api>>, api: Arc<Api>, - at: NumberFor<Api>, + at: BlockHash<Api>, batch: impl IntoIterator<Item = ExtrinsicHash<Api>>, ) { + // This conversion should work. Otherwise, for unknown block the revalidation shall be skipped, + // all the transactions will be kept in the validated pool, and can be scheduled for + // revalidation with the next request. + let block_number = match api.block_id_to_number(&BlockId::Hash(at)) { + Ok(Some(n)) => n, + Ok(None) => { + log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}, could not get block number."); + return + }, + Err(e) => { + log::debug!(target: LOG_TARGET, "revalidation skipped at block {at:?}: {e:?}."); + return + }, + }; + let mut invalid_hashes = Vec::new(); let mut revalidated = HashMap::new(); let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) + api.validate_transaction(at, ext.source, ext.data.clone()) .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) @@ -107,7 +120,7 @@ async fn batch_revalidate<Api: ChainApi>( revalidated.insert( ext_hash, ValidatedTransaction::valid_at( - at.saturated_into::<u64>(), + block_number.saturated_into::<u64>(), ext_hash, ext.source, ext.data.clone(), @@ -135,13 +148,13 @@ async fn batch_revalidate<Api: ChainApi>( } impl<Api: ChainApi> RevalidationWorker<Api> { - fn new(api: Arc<Api>, pool: Arc<Pool<Api>>) -> Self { + fn new(api: Arc<Api>, pool: Arc<Pool<Api>>, best_block: BlockHash<Api>) -> Self { Self { api, pool, + best_block, block_ordered: Default::default(), members: Default::default(), - best_block: Zero::zero(), } } @@ -303,10 +316,11 @@ where api: Arc<Api>, pool: Arc<Pool<Api>>, interval: Duration, + best_block: BlockHash<Api>, ) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) { let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue", 100_000); - let worker = RevalidationWorker::new(api.clone(), pool.clone()); + let worker = RevalidationWorker::new(api.clone(), pool.clone(), best_block); let queue = Self { api, pool, background: Some(to_worker) }; @@ -317,8 +331,9 @@ where pub fn new_background( api: Arc<Api>, pool: Arc<Pool<Api>>, + best_block: BlockHash<Api>, ) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) { - Self::new_with_interval(api, pool, BACKGROUND_REVALIDATION_INTERVAL) + Self::new_with_interval(api, pool, BACKGROUND_REVALIDATION_INTERVAL, best_block) } /// Queue some transaction for later revalidation. @@ -328,7 +343,7 @@ where /// revalidation is actually done. pub async fn revalidate_later( &self, - at: NumberFor<Api>, + at: BlockHash<Api>, transactions: Vec<ExtrinsicHash<Api>>, ) { if transactions.len() > 0 { @@ -360,9 +375,8 @@ mod tests { }; use futures::executor::block_on; use sc_transaction_pool_api::TransactionSource; - use sp_runtime::generic::BlockId; use substrate_test_runtime::{AccountId, Transfer, H256}; - use substrate_test_runtime_client::AccountKeyring::Alice; + use substrate_test_runtime_client::AccountKeyring::{Alice, Bob}; #[test] fn revalidation_queue_works() { @@ -376,18 +390,63 @@ mod tests { amount: 5, nonce: 0, }); - let uxt_hash = block_on(pool.submit_one( - &BlockId::number(0), - TransactionSource::External, - uxt.clone(), - )) - .expect("Should be valid"); - block_on(queue.revalidate_later(0, vec![uxt_hash])); + let hash_of_block0 = api.expect_hash_from_number(0); + + let uxt_hash = + block_on(pool.submit_one(hash_of_block0, TransactionSource::External, uxt.clone())) + .expect("Should be valid"); + + block_on(queue.revalidate_later(hash_of_block0, vec![uxt_hash])); // revalidated in sync offload 2nd time assert_eq!(api.validation_requests().len(), 2); // number of ready assert_eq!(pool.validated_pool().status().ready, 1); } + + #[test] + fn revalidation_queue_skips_revalidation_for_unknown_block_hash() { + let api = Arc::new(TestApi::default()); + let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone())); + let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); + + let uxt0 = uxt(Transfer { + from: Alice.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let uxt1 = uxt(Transfer { + from: Bob.into(), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 4, + nonce: 1, + }); + + let hash_of_block0 = api.expect_hash_from_number(0); + let unknown_block = H256::repeat_byte(0x13); + + let uxt_hashes = + block_on(pool.submit_at(hash_of_block0, TransactionSource::External, vec![uxt0, uxt1])) + .expect("Should be valid") + .into_iter() + .map(|r| r.expect("Should be valid")) + .collect::<Vec<_>>(); + + assert_eq!(api.validation_requests().len(), 2); + assert_eq!(pool.validated_pool().status().ready, 2); + + // revalidation works fine for block 0: + block_on(queue.revalidate_later(hash_of_block0, uxt_hashes.clone())); + assert_eq!(api.validation_requests().len(), 4); + assert_eq!(pool.validated_pool().status().ready, 2); + + // revalidation shall be skipped for unknown block: + block_on(queue.revalidate_later(unknown_block, uxt_hashes)); + // no revalidation shall be done + assert_eq!(api.validation_requests().len(), 4); + // number of ready shall not change + assert_eq!(pool.validated_pool().status().ready, 2); + } } diff --git a/substrate/client/transaction-pool/src/tests.rs b/substrate/client/transaction-pool/src/tests.rs index 62911d5cbb471e37f7075d5506d98b60f8f46b83..325add3fb1c5a81a56a4a2591ed12ac4630b0b06 100644 --- a/substrate/client/transaction-pool/src/tests.rs +++ b/substrate/client/transaction-pool/src/tests.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }; use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::{ - substrate_test_pallet::pallet::Call as PalletCall, BalancesCall, Block, Extrinsic, + substrate_test_pallet::pallet::Call as PalletCall, BalancesCall, Block, BlockNumber, Extrinsic, ExtrinsicBuilder, Hashing, RuntimeCall, Transfer, TransferData, H256, }; @@ -53,6 +53,11 @@ impl TestApi { pub fn validation_requests(&self) -> Vec<Extrinsic> { self.validation_requests.lock().clone() } + + /// Helper function for mapping block number to hash. Use if mapping shall not fail. + pub fn expect_hash_from_number(&self, n: BlockNumber) -> H256 { + self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() + } } impl ChainApi for TestApi { @@ -64,13 +69,13 @@ impl ChainApi for TestApi { /// Verify extrinsic at given block. fn validate_transaction( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, _source: TransactionSource, uxt: ExtrinsicFor<Self>, ) -> Self::ValidationFuture { self.validation_requests.lock().push(uxt.clone()); let hash = self.hash_and_length(&uxt).0; - let block_number = self.block_id_to_number(at).unwrap().unwrap(); + let block_number = self.block_id_to_number(&BlockId::Hash(at)).unwrap().unwrap(); let res = match uxt { Extrinsic { @@ -153,6 +158,8 @@ impl ChainApi for TestApi { ) -> Result<Option<NumberFor<Self>>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(*num), + BlockId::Hash(hash) if *hash == H256::from_low_u64_be(hash.to_low_u64_be()) => + Some(hash.to_low_u64_be()), BlockId::Hash(_) => None, }) } @@ -164,7 +171,7 @@ impl ChainApi for TestApi { ) -> Result<Option<<Self::Block as BlockT>::Hash>, Self::Error> { Ok(match at { BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, + BlockId::Hash(hash) => Some(*hash), }) } @@ -199,6 +206,7 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { ExtrinsicBuilder::new_transfer(transfer).build() } -pub(crate) fn pool() -> Pool<TestApi> { - Pool::new(Default::default(), true.into(), TestApi::default().into()) +pub(crate) fn pool() -> (Pool<TestApi>, Arc<TestApi>) { + let api = Arc::new(TestApi::default()); + (Pool::new(Default::default(), true.into(), api.clone()), api) } diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 4adf811b42521c165bcb777f45cc43e674940146..e5ab01ffbf09f73bb4f86632aeada334ec6dafc0 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -47,8 +47,9 @@ use substrate_test_runtime_transaction_pool::{uxt, TestApi}; const LOG_TARGET: &str = "txpool"; -fn pool() -> Pool<TestApi> { - Pool::new(Default::default(), true.into(), TestApi::with_alice_nonce(209).into()) +fn pool() -> (Pool<TestApi>, Arc<TestApi>) { + let api = Arc::new(TestApi::with_alice_nonce(209)); + (Pool::new(Default::default(), true.into(), api.clone()), api) } fn maintained_pool() -> (BasicPool<TestApi, Block>, Arc<TestApi>, futures::executor::ThreadPool) { @@ -83,8 +84,8 @@ const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -96,9 +97,9 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -111,8 +112,8 @@ fn multiple_submission_should_work() { #[test] fn early_nonce_should_be_culled() { sp_tracing::try_init_simple(); - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 208))).unwrap(); + let (pool, api) = pool(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 208))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -124,9 +125,9 @@ fn early_nonce_should_be_culled() { #[test] fn late_nonce_should_be_queued() { - let pool = pool(); + let (pool, api) = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() @@ -134,7 +135,7 @@ fn late_nonce_should_be_queued() { .collect(); assert_eq!(pending, Vec::<Nonce>::new()); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool .validated_pool() .ready() @@ -145,9 +146,10 @@ fn late_nonce_should_be_queued() { #[test] fn prune_tags_should_work() { - let pool = pool(); - let hash209 = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + let (pool, api) = pool(); + let hash209 = + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool .validated_pool() @@ -157,7 +159,7 @@ fn prune_tags_should_work() { assert_eq!(pending, vec![209, 210]); pool.validated_pool().api().push_block(1, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![hash209])) + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![hash209])) .expect("Prune tags"); let pending: Vec<_> = pool @@ -170,11 +172,12 @@ fn prune_tags_should_work() { #[test] fn should_ban_invalid_transactions() { - let pool = pool(); + let (pool, api) = pool(); let uxt = uxt(Alice, 209); - let hash = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap(); + let hash = + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool @@ -185,7 +188,7 @@ fn should_ban_invalid_transactions() { assert_eq!(pending, Vec::<Nonce>::new()); // then - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -193,9 +196,9 @@ fn only_prune_on_new_best() { let (pool, api, _) = maintained_pool(); let uxt = uxt(Alice, 209); - let _ = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone())) + let _ = block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, uxt.clone())) .expect("1. Imported"); - pool.api().push_block(1, vec![uxt.clone()], true); + api.push_block(1, vec![uxt.clone()], true); assert_eq!(pool.status().ready, 1); let header = api.push_block(2, vec![uxt], true); @@ -212,13 +215,15 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), true.into(), api.clone()); let xt = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); // remove the transaction that just got imported. api.increment_nonce(Alice.into()); api.push_block(1, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![])).expect("1. Pruned"); + block_on(pool.prune_tags(api.expect_hash_from_number(1), vec![vec![209]], vec![])) + .expect("1. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); // it's re-imported to future assert_eq!(pool.validated_pool().status().future, 1); @@ -227,7 +232,8 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { api.increment_nonce(Alice.into()); api.push_block(2, Vec::new(), true); let xt = uxt(Alice, 211); - block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(2), SOURCE, xt.clone())) + .expect("2. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 1); let pending: Vec<_> = pool @@ -240,7 +246,8 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // prune it and make sure the pool is empty api.increment_nonce(Alice.into()); api.push_block(3, Vec::new(), true); - block_on(pool.prune_tags(&BlockId::number(3), vec![vec![155]], vec![])).expect("2. Pruned"); + block_on(pool.prune_tags(api.expect_hash_from_number(3), vec![vec![155]], vec![])) + .expect("2. Pruned"); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 2); } @@ -270,7 +277,8 @@ fn should_prune_old_during_maintenance() { let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt.clone()], true); @@ -285,9 +293,11 @@ fn should_revalidate_during_maintenance() { let xt2 = uxt(Alice, 210); let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt2.clone())) - .expect("2. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt2.clone())) + .expect("2. Imported"); assert_eq!(pool.status().ready, 2); assert_eq!(api.validation_requests().len(), 2); @@ -311,7 +321,8 @@ fn should_resubmit_from_retracted_during_maintenance() { let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![], true); @@ -329,7 +340,8 @@ fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacte let (pool, api, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt.clone()], true); @@ -347,8 +359,9 @@ fn should_not_retain_invalid_hashes_from_retracted() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![], true); @@ -374,15 +387,18 @@ fn should_revalidate_across_many_blocks() { let (pool, api, _guard) = maintained_pool(); - let watcher1 = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt1.clone())) + let watcher1 = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt2.clone())) .expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("1. Imported"); assert_eq!(pool.status().ready, 2); let header = api.push_block(1, vec![], true); block_on(pool.maintain(block_event(header))); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt3.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt3.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 3); let header = api.push_block(2, vec![xt1.clone()], true); @@ -409,19 +425,24 @@ fn should_push_watchers_during_maintenance() { let tx0 = alice_uxt(0); let watcher0 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx0.clone())) + .unwrap(); let tx1 = alice_uxt(1); let watcher1 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx1.clone())) + .unwrap(); let tx2 = alice_uxt(2); let watcher2 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx2.clone())) + .unwrap(); let tx3 = alice_uxt(3); let watcher3 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx3.clone())) + .unwrap(); let tx4 = alice_uxt(4); let watcher4 = - block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone())).unwrap(); + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, tx4.clone())) + .unwrap(); assert_eq!(pool.status().ready, 5); // when @@ -489,11 +510,13 @@ fn finalization() { let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); let pool = create_basic_pool(api); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) - .expect("1. Imported"); - pool.api().push_block(2, vec![xt.clone()], true); + let api = pool.api(); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, xt.clone())) + .expect("1. Imported"); + api.push_block(2, vec![xt.clone()], true); - let header = pool.api().chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); + let header = api.chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); @@ -515,16 +538,17 @@ fn fork_aware_finalization() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); let from_dave = uxt(Dave, 2); let from_bob = uxt(Bob, 1); let from_charlie = uxt(Charlie, 1); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); - pool.api().increment_nonce(Charlie.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); + api.increment_nonce(Charlie.into()); + api.increment_nonce(Bob.into()); let from_dave_watcher; let from_bob_watcher; @@ -538,10 +562,13 @@ fn fork_aware_finalization() { // block B1 { - let watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block(2, vec![from_alice.clone()], true); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); @@ -556,10 +583,13 @@ fn fork_aware_finalization() { // block C2 { - let header = pool.api().push_block_with_parent(b1, vec![from_dave.clone()], true); - from_dave_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) - .expect("1. Imported"); + let header = api.push_block_with_parent(b1, vec![from_dave.clone()], true); + from_dave_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_dave.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> C2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -570,11 +600,14 @@ fn fork_aware_finalization() { // block D2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); + let header = api.push_block_with_parent(c2, vec![from_bob.clone()], true); log::trace!(target: LOG_TARGET, ">> D2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -585,15 +618,18 @@ fn fork_aware_finalization() { // block C1 { - let watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) - .expect("1.Imported"); + let watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_charlie.clone(), + )) + .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true); + let header = api.push_block_with_parent(b1, vec![from_charlie.clone()], true); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); c1 = header.hash(); canon_watchers.push((watcher, header.hash())); - let event = block_event_with_retracted(header.clone(), d2, pool.api()); + let event = block_event_with_retracted(header.clone(), d2, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -604,10 +640,10 @@ fn fork_aware_finalization() { // block D1 { let xt = uxt(Eve, 0); - let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + let w = block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, xt.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true); + let header = api.push_block_with_parent(c1, vec![xt.clone()], true); log::trace!(target: LOG_TARGET, ">> D1: {:?} {:?}", header.hash(), header); d1 = header.hash(); canon_watchers.push((w, header.hash())); @@ -623,7 +659,7 @@ fn fork_aware_finalization() { // block E1 { - let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true); + let header = api.push_block_with_parent(d1, vec![from_dave, from_bob], true); log::trace!(target: LOG_TARGET, ">> E1: {:?} {:?}", header.hash(), header); e1 = header.hash(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -673,16 +709,18 @@ fn prune_and_retract_tx_at_same_time() { api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); - pool.api().increment_nonce(Alice.into()); + api.increment_nonce(Alice.into()); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); // Block B1 let b1 = { - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let header = api.push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -693,10 +731,10 @@ fn prune_and_retract_tx_at_same_time() { // Block B2 let b2 = { - let header = pool.api().push_block(2, vec![from_alice.clone()], true); + let header = api.push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 0); - let event = block_event_with_retracted(header.clone(), b1, pool.api()); + let event = block_event_with_retracted(header.clone(), b1, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -739,19 +777,21 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); let d0; // Block D0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx0.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; @@ -762,17 +802,18 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D1 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) - .expect("1. Imported"); - pool.api().push_block(2, vec![tx1.clone()], false); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + api.push_block(2, vec![tx1.clone()], false); assert_eq!(pool.status().ready, 1); } // Block D2 { //push new best block - let header = pool.api().push_block(2, vec![], true); - let event = block_event_with_retracted(header, d0, pool.api()); + let header = api.push_block(2, vec![], true); + let event = block_event_with_retracted(header, d0, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); } @@ -786,6 +827,8 @@ fn resubmit_from_retracted_fork() { let pool = create_basic_pool(api); + let api = pool.api(); + let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); let tx2 = uxt(Bob, 3); @@ -795,18 +838,19 @@ fn resubmit_from_retracted_fork() { let tx4 = uxt(Ferdie, 2); let tx5 = uxt(One, 3); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Dave.into()); - pool.api().increment_nonce(Bob.into()); - pool.api().increment_nonce(Eve.into()); - pool.api().increment_nonce(Ferdie.into()); - pool.api().increment_nonce(One.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Dave.into()); + api.increment_nonce(Bob.into()); + api.increment_nonce(Eve.into()); + api.increment_nonce(Ferdie.into()); + api.increment_nonce(One.into()); // Block D0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx0.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); block_on(pool.maintain(block_event(header))); @@ -815,18 +859,20 @@ fn resubmit_from_retracted_fork() { // Block E0 { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(3, vec![tx1.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); + let header = api.push_block(3, vec![tx1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); } // Block F0 let f0 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(4, vec![tx2.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx2.clone())) + .expect("1. Imported"); + let header = api.push_block(4, vec![tx2.clone()], true); block_on(pool.maintain(block_event(header.clone()))); assert_eq!(pool.status().ready, 0); header.hash() @@ -834,27 +880,30 @@ fn resubmit_from_retracted_fork() { // Block D1 let d1 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone())) - .expect("1. Imported"); - let header = pool.api().push_block(2, vec![tx3.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx3.clone())) + .expect("1. Imported"); + let header = api.push_block(2, vec![tx3.clone()], true); assert_eq!(pool.status().ready, 1); header.hash() }; // Block E1 let e1 = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent(d1, vec![tx4.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx4.clone())) + .expect("1. Imported"); + let header = api.push_block_with_parent(d1, vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; // Block F1 let f1_header = { - let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent(e1, vec![tx5.clone()], true); + let _ = + block_on(pool.submit_and_watch(api.expect_hash_from_number(1), SOURCE, tx5.clone())) + .expect("1. Imported"); + let header = api.push_block_with_parent(e1, vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); @@ -865,7 +914,7 @@ fn resubmit_from_retracted_fork() { let expected_ready = vec![tx3, tx4, tx5].iter().map(Encode::encode).collect::<BTreeSet<_>>(); assert_eq!(expected_ready, ready); - let event = block_event_with_retracted(f1_header, f0, pool.api()); + let event = block_event_with_retracted(f1_header, f0, api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 3); @@ -876,9 +925,10 @@ fn resubmit_from_retracted_fork() { #[test] fn ready_set_should_not_resolve_before_block_update() { - let (pool, _api, _guard) = maintained_pool(); + let (pool, api, _guard) = maintained_pool(); let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); assert!(pool.ready_at(1).now_or_never().is_none()); } @@ -890,7 +940,8 @@ fn ready_set_should_resolve_after_block_update() { let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt1.clone())) + .expect("1. Imported"); block_on(pool.maintain(block_event(header))); assert!(pool.ready_at(1).now_or_never().is_some()); @@ -903,7 +954,8 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(1), SOURCE, xt1.clone())) + .expect("1. Imported"); let noop_waker = futures::task::noop_waker(); let mut context = futures::task::Context::from_waker(&noop_waker); @@ -948,7 +1000,12 @@ fn import_notification_to_pool_maintain_works() { // Prepare the extrisic, push it to the pool and check that it was added. let xt = uxt(Alice, 0); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + block_on(pool.submit_one( + pool.api().block_id_to_hash(&BlockId::Number(0)).unwrap().unwrap(), + SOURCE, + xt.clone(), + )) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let mut import_stream = block_on_stream(client.import_notification_stream()); @@ -973,7 +1030,8 @@ fn pruning_a_transaction_should_remove_it_from_best_transaction() { let xt1 = ExtrinsicBuilder::new_include_data(Vec::new()).build(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(api.expect_hash_from_number(0), SOURCE, xt1.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt1.clone()], true); @@ -997,8 +1055,12 @@ fn stale_transactions_are_pruned() { let (pool, api, _guard) = maintained_pool(); xts.into_iter().for_each(|xt| { - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.into_unchecked_extrinsic())) - .expect("1. Imported"); + block_on(pool.submit_one( + api.expect_hash_from_number(0), + SOURCE, + xt.into_unchecked_extrinsic(), + )) + .expect("1. Imported"); }); assert_eq!(pool.status().ready, 0); assert_eq!(pool.status().future, 3); @@ -1038,8 +1100,9 @@ fn finalized_only_handled_correctly() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt], true); @@ -1066,8 +1129,9 @@ fn best_block_after_finalized_handled_correctly() { let (pool, api, _guard) = maintained_pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) - .expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(api.expect_hash_from_number(0), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = api.push_block(1, vec![xt], true); @@ -1096,11 +1160,12 @@ fn switching_fork_with_finalized_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1109,12 +1174,13 @@ fn switching_fork_with_finalized_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); b1_header = header; @@ -1122,10 +1188,13 @@ fn switching_fork_with_finalized_works() { // block B2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent( a_header.hash(), vec![from_alice.clone(), from_bob.clone()], true, @@ -1174,11 +1243,12 @@ fn switching_fork_multiple_times_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1187,12 +1257,13 @@ fn switching_fork_multiple_times_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); b1_header = header; @@ -1200,10 +1271,13 @@ fn switching_fork_multiple_times_works() { // block B2 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = pool.api().push_block_with_parent( + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent( a_header.hash(), vec![from_alice.clone(), from_bob.clone()], true, @@ -1223,14 +1297,14 @@ fn switching_fork_multiple_times_works() { { // phase-1 - let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api()); + let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); } { // phase-2 - let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api()); + let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), api); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 1); } @@ -1282,13 +1356,14 @@ fn two_blocks_delayed_finalization_works() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); let from_charlie = uxt(Charlie, 3); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); - pool.api().increment_nonce(Charlie.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); + api.increment_nonce(Charlie.into()); let from_alice_watcher; let from_bob_watcher; @@ -1299,12 +1374,13 @@ fn two_blocks_delayed_finalization_works() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1313,12 +1389,13 @@ fn two_blocks_delayed_finalization_works() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); @@ -1327,12 +1404,13 @@ fn two_blocks_delayed_finalization_works() { // block D1 { - from_charlie_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); + from_charlie_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_charlie.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); assert_eq!(pool.status().ready, 3); log::trace!(target: LOG_TARGET, ">> D1: {:?} {:?}", header.hash(), header); @@ -1398,11 +1476,12 @@ fn delayed_finalization_does_not_retract() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1411,12 +1490,13 @@ fn delayed_finalization_does_not_retract() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1425,12 +1505,13 @@ fn delayed_finalization_does_not_retract() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); @@ -1493,11 +1574,12 @@ fn best_block_after_finalization_does_not_retract() { let a_header = api.push_block(1, vec![], true); let pool = create_basic_pool(api); + let api = pool.api(); let from_alice = uxt(Alice, 1); let from_bob = uxt(Bob, 2); - pool.api().increment_nonce(Alice.into()); - pool.api().increment_nonce(Bob.into()); + api.increment_nonce(Alice.into()); + api.increment_nonce(Bob.into()); let from_alice_watcher; let from_bob_watcher; @@ -1506,12 +1588,13 @@ fn best_block_after_finalization_does_not_retract() { // block B1 { - from_alice_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + from_alice_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_alice.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); log::trace!(target: LOG_TARGET, ">> B1: {:?} {:?}", header.hash(), header); @@ -1520,12 +1603,13 @@ fn best_block_after_finalization_does_not_retract() { // block C1 { - from_bob_watcher = - block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) - .expect("1. Imported"); - let header = - pool.api() - .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + from_bob_watcher = block_on(pool.submit_and_watch( + api.expect_hash_from_number(1), + SOURCE, + from_bob.clone(), + )) + .expect("1. Imported"); + let header = api.push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); assert_eq!(pool.status().ready, 2); log::trace!(target: LOG_TARGET, ">> C1: {:?} {:?}", header.hash(), header); diff --git a/substrate/docs/Upgrading-2.0-to-3.0.md b/substrate/docs/Upgrading-2.0-to-3.0.md index 58066ce074de63c83ea1a1def1273e67d79be1b4..3f2a3e7c5be22ee738321c6589bb4ce644a6c9d4 100644 --- a/substrate/docs/Upgrading-2.0-to-3.0.md +++ b/substrate/docs/Upgrading-2.0-to-3.0.md @@ -261,6 +261,7 @@ impl pallet_tips::Config for Runtime { type TipCountdown = TipCountdown; type TipFindersFee = TipFindersFee; type TipReportDepositBase = TipReportDepositBase; + type MaxTipAmount = MaxTipAmount; type WeightInfo = pallet_tips::weights::SubstrateWeight<Runtime>; } ``` diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index 9549fac9fe2b629ce3968b80c8d2e424ae252dd5..4b99cd517968d8ddcf336cb5ccb022b611d57a5a 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -590,7 +590,6 @@ impl<T: Config> Pallet<T> { if authorities.is_empty() { log::warn!(target: LOG_TARGET, "Ignoring empty epoch change."); - return } @@ -664,7 +663,7 @@ impl<T: Config> Pallet<T> { let next_randomness = NextRandomness::<T>::get(); let next_epoch = NextEpochDescriptor { - authorities: next_authorities.to_vec(), + authorities: next_authorities.into_inner(), randomness: next_randomness, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); @@ -700,7 +699,7 @@ impl<T: Config> Pallet<T> { epoch_index: EpochIndex::<T>::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities().to_vec(), + authorities: Self::authorities().into_inner(), randomness: Self::randomness(), config: EpochConfig::<T>::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), @@ -725,7 +724,7 @@ impl<T: Config> Pallet<T> { epoch_index: next_epoch_index, start_slot, duration: T::EpochDuration::get(), - authorities: NextAuthorities::<T>::get().to_vec(), + authorities: NextAuthorities::<T>::get().into_inner(), randomness: NextRandomness::<T>::get(), config: NextEpochConfig::<T>::get().unwrap_or_else(|| { EpochConfig::<T>::get().expect( @@ -778,7 +777,7 @@ impl<T: Config> Pallet<T> { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities().to_vec(), + authorities: Self::authorities().into_inner(), randomness: Self::randomness(), }; diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index ae0c3e3873c5023f25c6ab6c6d3f1e4bdceb6d39..ec4e6fd972708a643d8daccbcf811866966fd37b 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -95,7 +95,7 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities().to_vec(), + authorities: Babe::authorities().into_inner(), randomness: Babe::randomness(), }, ); diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index c64a35672c7f728b6c528ebb1a99646a6c631c75..c099fc48b7a3bceb94e8b026386c84aed4edf818 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -291,6 +291,14 @@ pub mod pallet { BountyCanceled { index: BountyIndex }, /// A bounty expiry is extended. BountyExtended { index: BountyIndex }, + /// A bounty is approved. + BountyApproved { index: BountyIndex }, + /// A bounty curator is proposed. + CuratorProposed { bounty_id: BountyIndex, curator: T::AccountId }, + /// A bounty curator is unassigned. + CuratorUnassigned { bounty_id: BountyIndex }, + /// A bounty curator is accepted. + CuratorAccepted { bounty_id: BountyIndex, curator: T::AccountId }, } /// Number of bounty proposals that have been made. @@ -375,10 +383,12 @@ pub mod pallet { Ok(()) })?; + + Self::deposit_event(Event::<T, I>::BountyApproved { index: bounty_id }); Ok(()) } - /// Assign a curator to a funded bounty. + /// Propose a curator to a funded bounty. /// /// May only be called from `T::SpendOrigin`. /// @@ -408,9 +418,11 @@ pub mod pallet { ensure!(fee < bounty.value, Error::<T, I>::InvalidFee); - bounty.status = BountyStatus::CuratorProposed { curator }; + bounty.status = BountyStatus::CuratorProposed { curator: curator.clone() }; bounty.fee = fee; + Self::deposit_event(Event::<T, I>::CuratorProposed { bounty_id, curator }); + Ok(()) })?; Ok(()) @@ -508,6 +520,8 @@ pub mod pallet { bounty.status = BountyStatus::Funded; Ok(()) })?; + + Self::deposit_event(Event::<T, I>::CuratorUnassigned { bounty_id }); Ok(()) } @@ -542,6 +556,10 @@ pub mod pallet { bounty.status = BountyStatus::Active { curator: curator.clone(), update_due }; + Self::deposit_event(Event::<T, I>::CuratorAccepted { + bounty_id, + curator: signer, + }); Ok(()) }, _ => Err(Error::<T, I>::UnexpectedStatus.into()), diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 3e3d6f37884c7a757285a79a22e79065b69fc5f7..2714620731201b225485805e67a5ccc431217a2c 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -328,6 +328,32 @@ impl<T: Config, const TEST_ALL_STEPS: bool> OnRuntimeUpgrade for Migration<T, TE ); Ok(Default::default()) } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> { + if !TEST_ALL_STEPS { + return Ok(()) + } + + log::info!(target: LOG_TARGET, "=== POST UPGRADE CHECKS ==="); + + // Ensure that the hashing algorithm is correct for each storage map. + if let Some(hash) = crate::CodeInfoOf::<T>::iter_keys().next() { + crate::CodeInfoOf::<T>::get(hash).expect("CodeInfo exists for hash; qed"); + } + if let Some(hash) = crate::PristineCode::<T>::iter_keys().next() { + crate::PristineCode::<T>::get(hash).expect("PristineCode exists for hash; qed"); + } + if let Some(account_id) = crate::ContractInfoOf::<T>::iter_keys().next() { + crate::ContractInfoOf::<T>::get(account_id) + .expect("ContractInfo exists for account_id; qed"); + } + if let Some(nonce) = crate::DeletionQueue::<T>::iter_keys().next() { + crate::DeletionQueue::<T>::get(nonce).expect("DeletionQueue exists for nonce; qed"); + } + + Ok(()) + } } /// The result of running the migration. diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index eb045aa42e9d70d1245384768ae2db62627757e1..4ddc57584b30eb74c0ed18f7da2989d3db2ff53c 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -96,7 +96,7 @@ where #[storage_alias] pub type CodeInfoOf<T: Config, OldCurrency> = - StorageMap<Pallet<T>, Twox64Concat, CodeHash<T>, CodeInfo<T, OldCurrency>>; + StorageMap<Pallet<T>, Identity, CodeHash<T>, CodeInfo<T, OldCurrency>>; #[storage_alias] pub type PristineCode<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, Vec<u8>>; diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index efb49dff4f10ac2b8a27ebae1f58541b6e7f793b..94534d05fdf889d05227149efd57ede584ecb013 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -70,7 +70,7 @@ mod old { #[storage_alias] pub type CodeInfoOf<T: Config, OldCurrency> = - StorageMap<Pallet<T>, Twox64Concat, CodeHash<T>, CodeInfo<T, OldCurrency>>; + StorageMap<Pallet<T>, Identity, CodeHash<T>, CodeInfo<T, OldCurrency>>; } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index e4a21a4e1d9b8a1a82c7b45f96bba22264383ed5..b4aa17726b8d28869110f44acbd6d21895da5218 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -25,7 +25,6 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use sp_core::H256; use sp_runtime::{traits::Bounded, BoundedVec}; use crate::Pallet as Democracy; @@ -46,7 +45,7 @@ fn make_proposal<T: Config>(n: u32) -> BoundedCallOf<T> { <T as Config>::Preimages::bound(call).unwrap() } -fn add_proposal<T: Config>(n: u32) -> Result<H256, &'static str> { +fn add_proposal<T: Config>(n: u32) -> Result<T::Hash, &'static str> { let other = funded_account::<T>("proposer", n); let value = T::MinimumDeposit::get(); let proposal = make_proposal::<T>(n); @@ -55,7 +54,7 @@ fn add_proposal<T: Config>(n: u32) -> Result<H256, &'static str> { } // add a referendum with a metadata. -fn add_referendum<T: Config>(n: u32) -> (ReferendumIndex, H256, PreimageHash) { +fn add_referendum<T: Config>(n: u32) -> (ReferendumIndex, T::Hash, T::Hash) { let vote_threshold = VoteThreshold::SimpleMajority; let proposal = make_proposal::<T>(n); let hash = proposal.hash(); @@ -85,7 +84,7 @@ fn assert_has_event<T: Config>(generic_event: <T as Config>::RuntimeEvent) { } // note a new preimage. -fn note_preimage<T: Config>() -> PreimageHash { +fn note_preimage<T: Config>() -> T::Hash { use core::sync::atomic::{AtomicU8, Ordering}; use sp_std::borrow::Cow; // note a new preimage on every function invoke. diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index e538d31c6ad0312e9a81d155786fac16ab403c54..089556191cd14eee735d12976ba6b0ed471a4eac 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -159,9 +159,8 @@ use frame_support::{ traits::{ defensive_prelude::*, schedule::{v3::Named as ScheduleNamed, DispatchTime}, - Bounded, Currency, EnsureOrigin, Get, Hash as PreimageHash, LockIdentifier, - LockableCurrency, OnUnbalanced, QueryPreimage, ReservableCurrency, StorePreimage, - WithdrawReasons, + Bounded, Currency, EnsureOrigin, Get, LockIdentifier, LockableCurrency, OnUnbalanced, + QueryPreimage, ReservableCurrency, StorePreimage, WithdrawReasons, }, weights::Weight, }; @@ -203,7 +202,7 @@ type NegativeImbalanceOf<T> = <<T as Config>::Currency as Currency< <T as frame_system::Config>::AccountId, >>::NegativeImbalance; pub type CallOf<T> = <T as frame_system::Config>::RuntimeCall; -pub type BoundedCallOf<T> = Bounded<CallOf<T>>; +pub type BoundedCallOf<T> = Bounded<CallOf<T>, <T as frame_system::Config>::Hashing>; type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source; #[frame_support::pallet] @@ -211,7 +210,6 @@ pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use sp_core::H256; /// The current storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); @@ -226,10 +224,15 @@ pub mod pallet { type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>; /// The Scheduler. - type Scheduler: ScheduleNamed<BlockNumberFor<Self>, CallOf<Self>, Self::PalletsOrigin>; + type Scheduler: ScheduleNamed< + BlockNumberFor<Self>, + CallOf<Self>, + Self::PalletsOrigin, + Hasher = Self::Hashing, + >; /// The Preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage; /// Currency type for this pallet. type Currency: ReservableCurrency<Self::AccountId> @@ -421,22 +424,22 @@ pub mod pallet { pub type Blacklist<T: Config> = StorageMap< _, Identity, - H256, + T::Hash, (BlockNumberFor<T>, BoundedVec<T::AccountId, T::MaxBlacklisted>), >; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] - pub type Cancellations<T: Config> = StorageMap<_, Identity, H256, bool, ValueQuery>; + pub type Cancellations<T: Config> = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; /// General information concerning any proposal or referendum. - /// The `PreimageHash` refers to the preimage of the `Preimages` provider which can be a JSON + /// The `Hash` refers to the preimage of the `Preimages` provider which can be a JSON /// dump or IPFS hash of a JSON file. /// /// Consider a garbage collection for a metadata of finished referendums to `unrequest` (remove) /// large preimages. #[pallet::storage] - pub type MetadataOf<T: Config> = StorageMap<_, Blake2_128Concat, MetadataOwner, PreimageHash>; + pub type MetadataOf<T: Config> = StorageMap<_, Blake2_128Concat, MetadataOwner, T::Hash>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -476,9 +479,9 @@ pub mod pallet { /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: H256, until: BlockNumberFor<T> }, + Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: BlockNumberFor<T> }, /// A proposal_hash has been blacklisted permanently. - Blacklisted { proposal_hash: H256 }, + Blacklisted { proposal_hash: T::Hash }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote<BalanceOf<T>> }, /// An account has secconded a proposal @@ -490,14 +493,14 @@ pub mod pallet { /// Metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata for a proposal or a referendum has been cleared. MetadataCleared { /// Metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata has been transferred to new owner. MetadataTransferred { @@ -506,7 +509,7 @@ pub mod pallet { /// New metadata owner. owner: MetadataOwner, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, } @@ -775,7 +778,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor<T>, - proposal_hash: H256, + proposal_hash: T::Hash, voting_period: BlockNumberFor<T>, delay: BlockNumberFor<T>, ) -> DispatchResult { @@ -827,7 +830,7 @@ pub mod pallet { /// Weight: `O(V + log(V))` where V is number of `existing vetoers` #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::veto_external())] - pub fn veto_external(origin: OriginFor<T>, proposal_hash: H256) -> DispatchResult { + pub fn veto_external(origin: OriginFor<T>, proposal_hash: T::Hash) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; if let Some((ext_proposal, _)) = NextExternal::<T>::get() { @@ -1042,7 +1045,7 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor<T>, - proposal_hash: H256, + proposal_hash: T::Hash, maybe_ref_index: Option<ReferendumIndex>, ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; @@ -1139,7 +1142,7 @@ pub mod pallet { pub fn set_metadata( origin: OriginFor<T>, owner: MetadataOwner, - maybe_hash: Option<PreimageHash>, + maybe_hash: Option<T::Hash>, ) -> DispatchResult { match owner { MetadataOwner::External => { @@ -1173,15 +1176,16 @@ pub mod pallet { } pub trait EncodeInto: Encode { - fn encode_into<T: AsMut<[u8]> + Default>(&self) -> T { + fn encode_into<T: AsMut<[u8]> + Default, H: sp_core::Hasher>(&self) -> T { let mut t = T::default(); self.using_encoded(|data| { if data.len() <= t.as_mut().len() { t.as_mut()[0..data.len()].copy_from_slice(data); } else { - // encoded self is too big to fit into a T. hash it and use the first bytes of that - // instead. - let hash = sp_io::hashing::blake2_256(data); + // encoded self is too big to fit into a T. + // hash it and use the first bytes of that instead. + let hash = H::hash(&data); + let hash = hash.as_ref(); let l = t.as_mut().len().min(hash.len()); t.as_mut()[0..l].copy_from_slice(&hash[0..l]); } @@ -1610,7 +1614,7 @@ impl<T: Config> Pallet<T> { // Earliest it can be scheduled for is next block. let when = now.saturating_add(status.delay.max(One::one())); if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index).encode_into(), + (DEMOCRACY_ID, index).encode_into::<_, T::Hashing>(), DispatchTime::At(when), None, 63, diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 9b885fb5915b7a3d92f28d7a7220f5c5ddf7d7f5..ebccf32e342377a56999c49b6cdd6fa95504b943 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -277,7 +277,7 @@ fn tally(r: ReferendumIndex) -> Tally<u64> { } /// note a new preimage without registering. -fn note_preimage(who: u64) -> PreimageHash { +fn note_preimage(who: u64) -> <Test as frame_system::Config>::Hash { use std::sync::atomic::{AtomicU8, Ordering}; // note a new preimage on every function invoke. static COUNTER: AtomicU8 = AtomicU8::new(0); diff --git a/substrate/frame/democracy/src/tests/metadata.rs b/substrate/frame/democracy/src/tests/metadata.rs index 5a36d80b72637dbae4d931016df11484f2228256..1b6d66a8bc4486c555a5f0e1241f76280b33eeb7 100644 --- a/substrate/frame/democracy/src/tests/metadata.rs +++ b/substrate/frame/democracy/src/tests/metadata.rs @@ -22,9 +22,8 @@ use super::*; #[test] fn set_external_metadata_works() { new_test_ext().execute_with(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: <Test as frame_system::Config>::Hash = [1u8; 32].into(); // metadata owner is an external proposal. let owner = MetadataOwner::External; // fails to set metadata if an external proposal does not exist. @@ -83,9 +82,8 @@ fn clear_metadata_works() { #[test] fn set_proposal_metadata_works() { new_test_ext().execute_with(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: <Test as frame_system::Config>::Hash = [1u8; 32].into(); // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 8b6e0827c715834de9101dd1f061c9d362b1705c..05f9b24f8f9c53f833b5de3504645c40c1df6a6d 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -2365,7 +2365,7 @@ mod tests { assert_eq!(MultiPhase::desired_targets().unwrap(), 2); // mine seq_phragmen solution with 2 iters. - let (solution, witness) = MultiPhase::mine_solution().unwrap(); + let (solution, witness, _) = MultiPhase::mine_solution().unwrap(); // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); @@ -2647,7 +2647,7 @@ mod tests { // set the solution balancing to get the desired score. crate::mock::Balancing::set(Some(BalancingConfig { iterations: 2, tolerance: 0 })); - let (solution, _) = MultiPhase::mine_solution().unwrap(); + let (solution, _, _) = MultiPhase::mine_solution().unwrap(); // Default solution's score. assert!(matches!(solution.score, ElectionScore { minimal_stake: 50, .. })); diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index a5fe8ce55582064078882ff125e3f945ac1b895c..7e4b029ff8c80cff6a57c0868e335d11aa4a2cc6 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -1348,7 +1348,7 @@ mod tests { roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, _) = MultiPhase::mine_solution().unwrap(); let solution_weight = <Runtime as MinerConfig>::solution_weight( witness.voters, witness.targets, diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index f1c9e92a704e926bc5e2ccf1e18ec590f1c69fd6..e3d0ded97515b9cbac011956afebbc5d4f15e5d6 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -108,6 +108,27 @@ impl From<FeasibilityError> for MinerError { } } +/// Reports the trimming result of a mined solution +#[derive(Debug, Clone)] +pub struct TrimmingStatus { + weight: usize, + length: usize, +} + +impl TrimmingStatus { + pub fn is_trimmed(&self) -> bool { + self.weight > 0 || self.length > 0 + } + + pub fn trimmed_weight(&self) -> usize { + self.weight + } + + pub fn trimmed_length(&self) -> usize { + self.length + } +} + /// Save a given call into OCW storage. fn save_solution<T: Config>(call: &Call<T>) -> Result<(), MinerError> { log!(debug, "saving a call to the offchain storage."); @@ -162,16 +183,21 @@ impl<T: Config> Pallet<T> { /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the /// [`crate::Config::Solver`] in order to create a unified return type. - pub fn mine_solution( - ) -> Result<(RawSolution<SolutionOf<T::MinerConfig>>, SolutionOrSnapshotSize), MinerError> { + pub fn mine_solution() -> Result< + (RawSolution<SolutionOf<T::MinerConfig>>, SolutionOrSnapshotSize, TrimmingStatus), + MinerError, + > { let RoundSnapshot { voters, targets } = Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; - let (solution, score, size) = Miner::<T::MinerConfig>::mine_solution_with_snapshot::< - T::Solver, - >(voters, targets, desired_targets)?; + let (solution, score, size, is_trimmed) = + Miner::<T::MinerConfig>::mine_solution_with_snapshot::<T::Solver>( + voters, + targets, + desired_targets, + )?; let round = Self::round(); - Ok((RawSolution { solution, score, round }, size)) + Ok((RawSolution { solution, score, round }, size, is_trimmed)) } /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, submit @@ -232,7 +258,7 @@ impl<T: Config> Pallet<T> { /// Mine a new solution as a call. Performs all checks. pub fn mine_checked_call() -> Result<Call<T>, MinerError> { // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. - let (raw_solution, witness) = Self::mine_and_check()?; + let (raw_solution, witness, _) = Self::mine_and_check()?; let score = raw_solution.score; let call: Call<T> = Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness }; @@ -282,11 +308,13 @@ impl<T: Config> Pallet<T> { /// If you want an unchecked solution, use [`Pallet::mine_solution`]. /// If you want a checked solution and submit it at the same time, use /// [`Pallet::mine_check_save_submit`]. - pub fn mine_and_check( - ) -> Result<(RawSolution<SolutionOf<T::MinerConfig>>, SolutionOrSnapshotSize), MinerError> { - let (raw_solution, witness) = Self::mine_solution()?; + pub fn mine_and_check() -> Result< + (RawSolution<SolutionOf<T::MinerConfig>>, SolutionOrSnapshotSize, TrimmingStatus), + MinerError, + > { + let (raw_solution, witness, is_trimmed) = Self::mine_solution()?; Self::basic_checks(&raw_solution, "mined")?; - Ok((raw_solution, witness)) + Ok((raw_solution, witness, is_trimmed)) } /// Checks if an execution of the offchain worker is permitted at the given block number, or @@ -408,7 +436,7 @@ impl<T: MinerConfig> Miner<T> { voters: Vec<(T::AccountId, VoteWeight, BoundedVec<T::AccountId, T::MaxVotesPerVoter>)>, targets: Vec<T::AccountId>, desired_targets: u32, - ) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize), MinerError> + ) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), MinerError> where S: NposSolver<AccountId = T::AccountId>, { @@ -436,7 +464,8 @@ impl<T: MinerConfig> Miner<T> { voters: Vec<(T::AccountId, VoteWeight, BoundedVec<T::AccountId, T::MaxVotesPerVoter>)>, targets: Vec<T::AccountId>, desired_targets: u32, - ) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize), MinerError> { + ) -> Result<(SolutionOf<T>, ElectionScore, SolutionOrSnapshotSize, TrimmingStatus), MinerError> + { // now make some helper closures. let cache = helpers::generate_voter_cache::<T>(&voters); let voter_index = helpers::voter_index_fn::<T>(&cache); @@ -495,13 +524,13 @@ impl<T: MinerConfig> Miner<T> { // trim assignments list for weight and length. let size = SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; - Self::trim_assignments_weight( + let weight_trimmed = Self::trim_assignments_weight( desired_targets, size, T::MaxWeight::get(), &mut index_assignments, ); - Self::trim_assignments_length( + let length_trimmed = Self::trim_assignments_length( T::MaxLength::get(), &mut index_assignments, &encoded_size_of, @@ -513,7 +542,9 @@ impl<T: MinerConfig> Miner<T> { // re-calc score. let score = solution.clone().score(stake_of, voter_at, target_at)?; - Ok((solution, score, size)) + let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed }; + + Ok((solution, score, size, is_trimmed)) } /// Greedily reduce the size of the solution to fit into the block w.r.t length. @@ -534,7 +565,7 @@ impl<T: MinerConfig> Miner<T> { max_allowed_length: u32, assignments: &mut Vec<IndexAssignmentOf<T>>, encoded_size_of: impl Fn(&[IndexAssignmentOf<T>]) -> Result<usize, sp_npos_elections::Error>, - ) -> Result<(), MinerError> { + ) -> Result<usize, MinerError> { // Perform a binary search for the max subset of which can fit into the allowed // length. Having discovered that, we can truncate efficiently. let max_allowed_length: usize = max_allowed_length.saturated_into(); @@ -543,7 +574,7 @@ impl<T: MinerConfig> Miner<T> { // not much we can do if assignments are already empty. if high == low { - return Ok(()) + return Ok(0) } while high - low > 1 { @@ -577,16 +608,18 @@ impl<T: MinerConfig> Miner<T> { // after this point, we never error. // check before edit. + let remove = assignments.len().saturating_sub(maximum_allowed_voters); + log_no_system!( debug, "from {} assignments, truncating to {} for length, removing {}", assignments.len(), maximum_allowed_voters, - assignments.len().saturating_sub(maximum_allowed_voters), + remove ); assignments.truncate(maximum_allowed_voters); - Ok(()) + Ok(remove) } /// Greedily reduce the size of the solution to fit into the block w.r.t. weight. @@ -609,7 +642,7 @@ impl<T: MinerConfig> Miner<T> { size: SolutionOrSnapshotSize, max_weight: Weight, assignments: &mut Vec<IndexAssignmentOf<T>>, - ) { + ) -> usize { let maximum_allowed_voters = Self::maximum_voter_for_weight(desired_targets, size, max_weight); let removing: usize = @@ -622,6 +655,8 @@ impl<T: MinerConfig> Miner<T> { removing, ); assignments.truncate(maximum_allowed_voters as usize); + + removing } /// Find the maximum `len` that a solution can have in order to fit into the block weight. @@ -1230,7 +1265,7 @@ mod tests { assert_eq!(MultiPhase::desired_targets().unwrap(), 2); // mine seq_phragmen solution with 2 iters. - let (solution, witness) = MultiPhase::mine_solution().unwrap(); + let (solution, witness, _) = MultiPhase::mine_solution().unwrap(); // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); @@ -1268,7 +1303,7 @@ mod tests { roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, t) = MultiPhase::mine_solution().unwrap(); let solution_weight = <Runtime as MinerConfig>::solution_weight( witness.voters, witness.targets, @@ -1278,11 +1313,12 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_parts(35, 0)); assert_eq!(raw.solution.voter_count(), 5); + assert_eq!(t.trimmed_weight(), 0); // now reduce the max weight <MinerMaxWeight>::set(Weight::from_parts(25, u64::MAX)); - let (raw, witness) = MultiPhase::mine_solution().unwrap(); + let (raw, witness, t) = MultiPhase::mine_solution().unwrap(); let solution_weight = <Runtime as MinerConfig>::solution_weight( witness.voters, witness.targets, @@ -1292,6 +1328,7 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_parts(25, 0)); assert_eq!(raw.solution.voter_count(), 3); + assert_eq!(t.trimmed_weight(), 2); }) } @@ -1303,7 +1340,7 @@ mod tests { assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail - let (mut solution, _) = MultiPhase::mine_solution().unwrap(); + let (mut solution, _, _) = MultiPhase::mine_solution().unwrap(); solution.solution.votes1[0].1 = 4; assert_eq!( @@ -1342,7 +1379,7 @@ mod tests { let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); let desired_targets = MultiPhase::desired_targets().unwrap(); - let (raw, score, witness) = + let (raw, score, witness, _) = Miner::<Runtime>::prepare_election_result_with_snapshot( result, voters.clone(), @@ -1371,7 +1408,7 @@ mod tests { }, ], }; - let (raw, score, _) = Miner::<Runtime>::prepare_election_result_with_snapshot( + let (raw, score, _, _) = Miner::<Runtime>::prepare_election_result_with_snapshot( result, voters.clone(), targets.clone(), @@ -1400,7 +1437,7 @@ mod tests { }, ], }; - let (raw, score, witness) = + let (raw, score, witness, _) = Miner::<Runtime>::prepare_election_result_with_snapshot( result, voters.clone(), @@ -1769,7 +1806,7 @@ mod tests { let solution_clone = solution.clone(); // when - Miner::<Runtime>::trim_assignments_length( + let trimmed_len = Miner::<Runtime>::trim_assignments_length( encoded_len, &mut assignments, encoded_size_of, @@ -1779,6 +1816,7 @@ mod tests { // then let solution = SolutionOf::<Runtime>::try_from(assignments.as_slice()).unwrap(); assert_eq!(solution, solution_clone); + assert_eq!(trimmed_len, 0); }); } @@ -1794,7 +1832,7 @@ mod tests { let solution_clone = solution.clone(); // when - Miner::<Runtime>::trim_assignments_length( + let trimmed_len = Miner::<Runtime>::trim_assignments_length( encoded_len as u32 - 1, &mut assignments, encoded_size_of, @@ -1805,6 +1843,7 @@ mod tests { let solution = SolutionOf::<Runtime>::try_from(assignments.as_slice()).unwrap(); assert_ne!(solution, solution_clone); assert!(solution.encoded_size() < encoded_len); + assert_eq!(trimmed_len, 1); }); } diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 2a0e707ac4148c81777a99014fb907a23ee0ca67..95d1c8aa609495f567e23a0650365c924bad040b 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -129,18 +129,16 @@ pub mod pallet { if let Some(pending_change) = <PendingChange<T>>::get() { // emit signal if we're at the block that scheduled the change if block_number == pending_change.scheduled_at { + let next_authorities = pending_change.next_authorities.to_vec(); if let Some(median) = pending_change.forced { Self::deposit_log(ConsensusLog::ForcedChange( median, - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.to_vec(), - }, + ScheduledChange { delay: pending_change.delay, next_authorities }, )) } else { Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.to_vec(), + next_authorities, })); } } @@ -149,7 +147,7 @@ pub mod pallet { if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { - authority_set: pending_change.next_authorities.to_vec(), + authority_set: pending_change.next_authorities.into_inner(), }); <PendingChange<T>>::kill(); } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 4dda0207e62c816882b0e4ffcc38476452c274ea..e344bdfe2d8feff5a25ed686d7301f462fa41189 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -49,8 +49,8 @@ use frame_support::{ ensure, pallet_prelude::Get, traits::{ - Consideration, Currency, Defensive, FetchResult, Footprint, Hash as PreimageHash, - PreimageProvider, PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, + Consideration, Currency, Defensive, FetchResult, Footprint, PreimageProvider, + PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, }, BoundedSlice, BoundedVec, }; @@ -531,7 +531,9 @@ impl<T: Config> PreimageRecipient<T::Hash> for Pallet<T> { } } -impl<T: Config<Hash = PreimageHash>> QueryPreimage for Pallet<T> { +impl<T: Config> QueryPreimage for Pallet<T> { + type H = T::Hashing; + fn len(hash: &T::Hash) -> Option<u32> { Pallet::<T>::len(hash) } @@ -555,7 +557,7 @@ impl<T: Config<Hash = PreimageHash>> QueryPreimage for Pallet<T> { } } -impl<T: Config<Hash = PreimageHash>> StorePreimage for Pallet<T> { +impl<T: Config> StorePreimage for Pallet<T> { const MAX_LENGTH: usize = MAX_SIZE as usize; fn note(bytes: Cow<[u8]>) -> Result<T::Hash, DispatchError> { diff --git a/substrate/frame/preimage/src/tests.rs b/substrate/frame/preimage/src/tests.rs index a473a0ae8e25f51aeb5d825eae497bba68cfc4b2..7609ec83e9039a66ce22b1fbcd4c49a591c86187 100644 --- a/substrate/frame/preimage/src/tests.rs +++ b/substrate/frame/preimage/src/tests.rs @@ -24,25 +24,32 @@ use crate::mock::*; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, - traits::{fungible::InspectHold, Bounded, BoundedInline, Hash as PreimageHash}, + traits::{fungible::InspectHold, Bounded, BoundedInline}, StorageNoopGuard, }; -use sp_core::{blake2_256, H256}; use sp_runtime::{bounded_vec, TokenError}; /// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. -pub fn make_bounded_values() -> (Bounded<Vec<u8>>, Bounded<Vec<u8>>, Bounded<Vec<u8>>) { +pub fn make_bounded_values() -> ( + Bounded<Vec<u8>, <Test as frame_system::Config>::Hashing>, + Bounded<Vec<u8>, <Test as frame_system::Config>::Hashing>, + Bounded<Vec<u8>, <Test as frame_system::Config>::Hashing>, +) { let data: BoundedInline = bounded_vec![1]; - let inline = Bounded::<Vec<u8>>::Inline(data); + let inline = Bounded::<Vec<u8>, <Test as frame_system::Config>::Hashing>::Inline(data); let data = vec![1, 2]; - let hash: H256 = blake2_256(&data[..]).into(); + let hash = <Test as frame_system::Config>::Hashing::hash(&data[..]).into(); let len = data.len() as u32; - let lookup = Bounded::<Vec<u8>>::unrequested(hash, len); + let lookup = + Bounded::<Vec<u8>, <Test as frame_system::Config>::Hashing>::unrequested(hash, len); let data = vec![1, 2, 3]; - let hash: H256 = blake2_256(&data[..]).into(); - let legacy = Bounded::<Vec<u8>>::Legacy { hash, dummy: Default::default() }; + let hash = <Test as frame_system::Config>::Hashing::hash(&data[..]).into(); + let legacy = Bounded::<Vec<u8>, <Test as frame_system::Config>::Hashing>::Legacy { + hash, + dummy: Default::default(), + }; (inline, lookup, legacy) } @@ -303,7 +310,7 @@ fn query_and_store_preimage_workflow() { let bound = Preimage::bound(data.clone()).unwrap(); let (len, hash) = (bound.len().unwrap(), bound.hash()); - assert_eq!(hash, blake2_256(&encoded).into()); + assert_eq!(hash, <Test as frame_system::Config>::Hashing::hash(&encoded).into()); assert_eq!(bound.len(), Some(len)); assert!(bound.lookup_needed(), "Should not be Inlined"); assert_eq!(bound.lookup_len(), Some(len)); @@ -364,7 +371,7 @@ fn query_preimage_request_works() { new_test_ext().execute_with(|| { let _guard = StorageNoopGuard::default(); let data: Vec<u8> = vec![1; 10]; - let hash: PreimageHash = blake2_256(&data[..]).into(); + let hash = <Test as frame_system::Config>::Hashing::hash(&data[..]).into(); // Request the preimage. <Preimage as QueryPreimage>::request(&hash); @@ -454,7 +461,7 @@ fn store_preimage_basic_works() { // Cleanup. <Preimage as StorePreimage>::unnote(&bound.hash()); - let data_hash = blake2_256(&data); + let data_hash = <Test as frame_system::Config>::Hashing::hash(&data); <Preimage as StorePreimage>::unnote(&data_hash.into()); // No storage changes remain. Checked by `StorageNoopGuard`. diff --git a/substrate/frame/referenda/src/benchmarking.rs b/substrate/frame/referenda/src/benchmarking.rs index e884a0bb6ec921edf3f3ff93818482941ee1abea..47d43cc0600c076b6fce0e541e0f78c97649921e 100644 --- a/substrate/frame/referenda/src/benchmarking.rs +++ b/substrate/frame/referenda/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_benchmarking::v1::{ }; use frame_support::{ assert_ok, - traits::{Bounded, Currency, EnsureOrigin, EnsureOriginWithArg, UnfilteredDispatchable}, + traits::{Currency, EnsureOrigin, EnsureOriginWithArg, UnfilteredDispatchable}, }; use frame_system::RawOrigin; use sp_runtime::traits::Bounded as ArithBounded; @@ -42,7 +42,7 @@ fn funded_account<T: Config<I>, I: 'static>(name: &'static str, index: u32) -> T caller } -fn dummy_call<T: Config<I>, I: 'static>() -> Bounded<<T as Config<I>>::RuntimeCall> { +fn dummy_call<T: Config<I>, I: 'static>() -> BoundedCallOf<T, I> { let inner = frame_system::Call::remark { remark: vec![] }; let call = <T as Config<I>>::RuntimeCall::from(inner); T::Preimages::bound(call).unwrap() diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index d4dbbf8a3c998e845165fb7a3d2fa7132c1824d4..be21375f526f034d072318afefca1f99d0e99b3d 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -73,8 +73,8 @@ use frame_support::{ v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, DispatchTime, }, - Currency, Hash as PreimageHash, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, - Polling, QueryPreimage, ReservableCurrency, StorePreimage, VoteTally, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, + ReservableCurrency, StorePreimage, VoteTally, }, BoundedVec, }; @@ -163,8 +163,17 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon<BlockNumberFor<Self>, CallOf<Self, I>, PalletsOriginOf<Self>> - + ScheduleNamed<BlockNumberFor<Self>, CallOf<Self, I>, PalletsOriginOf<Self>>; + type Scheduler: ScheduleAnon< + BlockNumberFor<Self>, + CallOf<Self, I>, + PalletsOriginOf<Self>, + Hasher = Self::Hashing, + > + ScheduleNamed< + BlockNumberFor<Self>, + CallOf<Self, I>, + PalletsOriginOf<Self>, + Hasher = Self::Hashing, + >; /// Currency type for this pallet. type Currency: ReservableCurrency<Self::AccountId>; // Origins and unbalances. @@ -226,7 +235,7 @@ pub mod pallet { >; /// The preimage provider. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage; } /// The next free referendum index, aka the number of referenda started so far. @@ -257,14 +266,14 @@ pub mod pallet { StorageMap<_, Twox64Concat, TrackIdOf<T, I>, u32, ValueQuery>; /// The metadata is a general information concerning the referendum. - /// The `PreimageHash` refers to the preimage of the `Preimages` provider which can be a JSON + /// The `Hash` refers to the preimage of the `Preimages` provider which can be a JSON /// dump or IPFS hash of a JSON file. /// /// Consider a garbage collection for a metadata of finished referendums to `unrequest` (remove) /// large preimages. #[pallet::storage] pub type MetadataOf<T: Config<I>, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, ReferendumIndex, PreimageHash>; + StorageMap<_, Blake2_128Concat, ReferendumIndex, T::Hash>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -376,14 +385,14 @@ pub mod pallet { /// Index of the referendum. index: ReferendumIndex, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, /// Metadata for a referendum has been cleared. MetadataCleared { /// Index of the referendum. index: ReferendumIndex, /// Preimage hash. - hash: PreimageHash, + hash: T::Hash, }, } @@ -691,7 +700,7 @@ pub mod pallet { pub fn set_metadata( origin: OriginFor<T>, index: ReferendumIndex, - maybe_hash: Option<PreimageHash>, + maybe_hash: Option<T::Hash>, ) -> DispatchResult { let who = ensure_signed(origin)?; if let Some(hash) = maybe_hash { diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index c23fa4609d4340dc92e712a439ab07b8b4ab7605..dce91f7dbfd780fbbac5d53afd0d58249853713a 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -473,7 +473,7 @@ impl RefState { } /// note a new preimage without registering. -pub fn note_preimage(who: u64) -> PreimageHash { +pub fn note_preimage(who: u64) -> <Test as frame_system::Config>::Hash { use std::sync::atomic::{AtomicU8, Ordering}; // note a new preimage on every function invoke. static COUNTER: AtomicU8 = AtomicU8::new(0); diff --git a/substrate/frame/referenda/src/tests.rs b/substrate/frame/referenda/src/tests.rs index d748c524605a839b5d9f5b4804cf0d799452bc0c..8f51136de0bfdb48df1a43e47dd0de63f7a0e21d 100644 --- a/substrate/frame/referenda/src/tests.rs +++ b/substrate/frame/referenda/src/tests.rs @@ -599,9 +599,8 @@ fn curve_handles_all_inputs() { #[test] fn set_metadata_works() { ExtBuilder::default().build_and_execute(|| { - use frame_support::traits::Hash as PreimageHash; // invalid preimage hash. - let invalid_hash: PreimageHash = [1u8; 32].into(); + let invalid_hash: <Test as frame_system::Config>::Hash = [1u8; 32].into(); // fails to set metadata for a finished referendum. assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), diff --git a/substrate/frame/referenda/src/types.rs b/substrate/frame/referenda/src/types.rs index ba89383888a7d7062267875095bbe439c01bb36b..8d6a13ef27c981b3996425b18062e2ef04a2f68b 100644 --- a/substrate/frame/referenda/src/types.rs +++ b/substrate/frame/referenda/src/types.rs @@ -34,7 +34,8 @@ pub type NegativeImbalanceOf<T, I> = <<T as Config<I>>::Currency as Currency< <T as frame_system::Config>::AccountId, >>::NegativeImbalance; pub type CallOf<T, I> = <T as Config<I>>::RuntimeCall; -pub type BoundedCallOf<T, I> = Bounded<<T as Config<I>>::RuntimeCall>; +pub type BoundedCallOf<T, I> = + Bounded<<T as Config<I>>::RuntimeCall, <T as frame_system::Config>::Hashing>; pub type VotesOf<T, I> = <T as Config<I>>::Votes; pub type TallyOf<T, I> = <T as Config<I>>::Tally; pub type PalletsOriginOf<T> = diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 341a19cdb230223536621db424f0e300399279e8..cc86a17973780870970a7b604b4fd5d30cffe263 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -82,13 +82,13 @@ fn make_task<T: Config>( Scheduled { maybe_id, priority, call, maybe_periodic, origin, _phantom: PhantomData } } -fn bounded<T: Config>(len: u32) -> Option<Bounded<<T as Config>::RuntimeCall>> { +fn bounded<T: Config>(len: u32) -> Option<BoundedCallOf<T>> { let call = <<T as Config>::RuntimeCall>::from(SystemCall::remark { remark: vec![0; len as usize] }); T::Preimages::bound(call).ok() } -fn make_call<T: Config>(maybe_lookup_len: Option<u32>) -> Bounded<<T as Config>::RuntimeCall> { +fn make_call<T: Config>(maybe_lookup_len: Option<u32>) -> BoundedCallOf<T> { let bound = BoundedInline::bound() as u32; let mut len = match maybe_lookup_len { Some(len) => len.min(T::Preimages::MAX_LENGTH as u32 - 2).max(bound) - 3, diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 3da4aa03caeb040bea5c3fc2ac6e63d1ca958e1e..532ffe7bd71bc5012767687d6a545962965df7f9 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -91,8 +91,8 @@ use frame_support::{ ensure, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, - PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, + Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, + PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, }, weights::{Weight, WeightMeter}, }; @@ -119,6 +119,9 @@ pub type TaskAddress<BlockNumber> = (BlockNumber, u32); pub type CallOrHashOf<T> = MaybeHashed<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hash>; +pub type BoundedCallOf<T> = + Bounded<<T as Config>::RuntimeCall, <T as frame_system::Config>::Hashing>; + #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] struct ScheduledV1<Call, BlockNumber> { @@ -165,7 +168,7 @@ pub type ScheduledV3Of<T> = ScheduledV3< pub type ScheduledOf<T> = Scheduled< TaskName, - Bounded<<T as Config>::RuntimeCall>, + BoundedCallOf<T>, BlockNumberFor<T>, <T as Config>::PalletsOrigin, <T as frame_system::Config>::AccountId, @@ -254,7 +257,7 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The preimage provider with which we look up call hashes to get the call. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage; } #[pallet::storage] @@ -440,7 +443,7 @@ pub mod pallet { } } -impl<T: Config<Hash = PreimageHash>> Pallet<T> { +impl<T: Config> Pallet<T> { /// Migrate storage format from V1 to V4. /// /// Returns the weight consumed by this migration. @@ -627,7 +630,7 @@ impl<T: Config<Hash = PreimageHash>> Pallet<T> { >(&bounded) { log::error!( - "Dropping undecodable call {}: {:?}", + "Dropping undecodable call {:?}: {:?}", &h, &err ); @@ -695,7 +698,7 @@ impl<T: Config> Pallet<T> { Option< Scheduled< TaskName, - Bounded<<T as Config>::RuntimeCall>, + BoundedCallOf<T>, BlockNumberFor<T>, OldOrigin, T::AccountId, @@ -797,7 +800,7 @@ impl<T: Config> Pallet<T> { maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<<T as Config>::RuntimeCall>, + call: BoundedCallOf<T>, ) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> { let when = Self::resolve_time(when)?; @@ -886,7 +889,7 @@ impl<T: Config> Pallet<T> { maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<<T as Config>::RuntimeCall>, + call: BoundedCallOf<T>, ) -> Result<TaskAddress<BlockNumberFor<T>>, DispatchError> { // ensure id it is unique if Lookup::<T>::contains_key(&id) { @@ -1191,8 +1194,8 @@ impl<T: Config> Pallet<T> { } } -impl<T: Config<Hash = PreimageHash>> - schedule::v2::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin> for Pallet<T> +impl<T: Config> schedule::v2::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin> + for Pallet<T> { type Address = TaskAddress<BlockNumberFor<T>>; type Hash = T::Hash; @@ -1225,8 +1228,8 @@ impl<T: Config<Hash = PreimageHash>> } } -impl<T: Config<Hash = PreimageHash>> - schedule::v2::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin> for Pallet<T> +impl<T: Config> schedule::v2::Named<BlockNumberFor<T>, <T as Config>::RuntimeCall, T::PalletsOrigin> + for Pallet<T> { type Address = TaskAddress<BlockNumberFor<T>>; type Hash = T::Hash; @@ -1270,13 +1273,14 @@ impl<T: Config> schedule::v3::Anon<BlockNumberFor<T>, <T as Config>::RuntimeCall for Pallet<T> { type Address = TaskAddress<BlockNumberFor<T>>; + type Hasher = T::Hashing; fn schedule( when: DispatchTime<BlockNumberFor<T>>, maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<<T as Config>::RuntimeCall>, + call: BoundedCallOf<T>, ) -> Result<Self::Address, DispatchError> { Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -1308,6 +1312,7 @@ impl<T: Config> schedule::v3::Named<BlockNumberFor<T>, <T as Config>::RuntimeCal for Pallet<T> { type Address = TaskAddress<BlockNumberFor<T>>; + type Hasher = T::Hashing; fn schedule_named( id: TaskName, @@ -1315,7 +1320,7 @@ impl<T: Config> schedule::v3::Named<BlockNumberFor<T>, <T as Config>::RuntimeCal maybe_periodic: Option<schedule::Period<BlockNumberFor<T>>>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: Bounded<<T as Config>::RuntimeCall>, + call: BoundedCallOf<T>, ) -> Result<Self::Address, DispatchError> { Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call) } diff --git a/substrate/frame/scheduler/src/migration.rs b/substrate/frame/scheduler/src/migration.rs index 06259768f0aa1be7ef836284c3aecf8e5070b117..9c8b0da03fc0b6368b2734c07db164a4ccc3b683 100644 --- a/substrate/frame/scheduler/src/migration.rs +++ b/substrate/frame/scheduler/src/migration.rs @@ -83,7 +83,7 @@ pub mod v3 { /// Migrate the scheduler pallet from V3 to V4. pub struct MigrateToV4<T>(sp_std::marker::PhantomData<T>); - impl<T: Config<Hash = PreimageHash>> OnRuntimeUpgrade for MigrateToV4<T> { + impl<T: Config> OnRuntimeUpgrade for MigrateToV4<T> { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> { ensure!(StorageVersion::get::<Pallet<T>>() == 3, "Can only upgrade from version 3"); diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 25a60732e060bcb535a1bed46fcb9cecd6041931..1bf8b3e5f3a03bd2c9d233a24351d1ae2d923c45 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1019,7 +1019,7 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec<Option<Scheduled<[u8; 32], Bounded<RuntimeCall>, u64, u32, u64>>> = vec![ + let old: Vec<Option<Scheduled<[u8; 32], BoundedCallOf<Test>, u64, u32, u64>>> = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 9957cf1cff85c821b50d132d92762eac4a70b7ac..466ceca42961d100f19cb1085e3f1814b47c334f 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1417,57 +1417,15 @@ pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration -/// for the pallet. -/// -/// Item is defined as either an enum or a struct. It needs to be public and implement the -/// trait `GenesisBuild` with [`#[pallet::genesis_build]`](`macro@genesis_build`). The type -/// generics are constrained to be either none, or `T` or `T: Config`. -/// -/// E.g: -/// -/// ```ignore -/// #[pallet::genesis_config] -/// pub struct GenesisConfig<T: Config> { -/// _myfield: BalanceOf<T>, -/// } -/// ``` +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::genesis_config`. #[proc_macro_attribute] pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` -/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's -/// initial state. -/// -/// The impl must be defined as: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl<T: Config> GenesisBuild<T> for GenesisConfig<$maybe_generics> { -/// fn build(&self) { $expr } -/// } -/// ``` -/// -/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild<T>` on -/// type `GenesisConfig` with generics none or `T`. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::genesis_build] -/// impl<T: Config> GenesisBuild<T> for GenesisConfig { -/// fn build(&self) {} -/// } -/// ``` -/// -/// ## Macro expansion -/// -/// The macro will add the following attribute: -/// * `#[cfg(feature = "std")]` -/// -/// The macro will implement `sp_runtime::BuildStorage`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::genesis_build`. #[proc_macro_attribute] pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/procedural/src/no_bound/default.rs b/substrate/frame/support/procedural/src/no_bound/default.rs index da05f19e0f817d3b1a3c8709bee87dd39f67277d..35d0eaeecf5616731a70ed9e6f3825dcd895da9c 100644 --- a/substrate/frame/support/procedural/src/no_bound/default.rs +++ b/substrate/frame/support/procedural/src/no_bound/default.rs @@ -66,15 +66,12 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To .collect::<Vec<_>>(); match &*default_variants { - [] => { - return syn::Error::new( - name.clone().span(), - // writing this as a regular string breaks rustfmt for some reason - r#"no default declared, make a variant default by placing `#[default]` above it"#, - ) - .into_compile_error() - .into() - }, + [] => return syn::Error::new( + name.clone().span(), + "no default declared, make a variant default by placing `#[default]` above it", + ) + .into_compile_error() + .into(), // only one variant with the #[default] attribute set [default_variant] => { let variant_attrs = default_variant diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs index 15ddfcf1d49d4e81c26fe96d70589c4fa431fe0c..248e83469435c96960f34ddccd55c71ebf650566 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -38,7 +38,7 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { #[cfg(feature = "std")] impl<#type_impl_gen> #frame_support::sp_runtime::BuildStorage for #gen_cfg_ident<#gen_cfg_use_gen> #where_clause { - fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> std::result::Result<(), std::string::String> { + fn assimilate_storage(&self, storage: &mut #frame_support::sp_runtime::Storage) -> std::result::Result<(), std::string::String> { #frame_support::__private::BasicExternalities::execute_with_storage(storage, || { self.build(); Ok(()) diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 1e6e83d7eeba7e1b63b18693ee1fce5cce72dc76..bfa19d8ddc39be343cb8f17f7c63db67d57de944 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -494,7 +494,7 @@ pub fn check_type_def_gen( /// return the instance if found for `GenesisBuild` /// return None for BuildGenesisConfig pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result<Option<InstanceUsage>> { - let expected = "expected `GenesisBuild<T>` or `GenesisBuild<T, I>`"; + let expected = "expected `BuildGenesisConfig` (or the deprecated `GenesisBuild<T>` or `GenesisBuild<T, I>`)"; pub struct Checker(Option<InstanceUsage>); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a7106780e0215069af1d6191e5d3ad79fef7170d..8c4b3de49b5d5e6357a3864a9e5d8001c19ffebd 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2188,10 +2188,80 @@ pub mod pallet_macros { pub use frame_support_procedural::{ call_index, compact, composite_enum, config, constant, disable_frame_system_supertrait_check, error, event, extra_constants, generate_deposit, - generate_store, genesis_build, genesis_config, getter, hooks, import_section, inherent, - no_default, no_default_bounds, origin, pallet_section, storage, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, + generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, + origin, pallet_section, storage, storage_prefix, storage_version, type_value, unbounded, + validate_unsigned, weight, whitelist_storage, }; + + /// Allows you to define the genesis configuration for the pallet. + /// + /// Item is defined as either an enum or a struct. It needs to be public and implement the + /// trait [`frame_support::traits::BuildGenesisConfig`]. + /// + /// See [`genesis_build`] for an example. + pub use frame_support_procedural::genesis_config; + + /// Allows you to define how the state of your pallet at genesis is built. This + /// takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's initial + /// state. + /// + /// The fields of the `GenesisConfig` can in turn be populated by the chain-spec. + /// + /// ## Example: + /// + /// ``` + /// #[frame_support::pallet] + /// pub mod pallet { + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet<T>(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::genesis_config] + /// #[derive(frame_support::DefaultNoBound)] + /// pub struct GenesisConfig<T: Config> { + /// foo: Vec<T::AccountId> + /// } + /// + /// #[pallet::genesis_build] + /// impl<T: Config> BuildGenesisConfig for GenesisConfig<T> { + /// fn build(&self) { + /// // use &self to access fields. + /// let foo = &self.foo; + /// todo!() + /// } + /// } + /// } + /// ``` + /// + /// ## Former Usage + /// + /// Prior to <https://github.com/paritytech/substrate/pull/14306>, the following syntax was used. + /// This is deprecated and will soon be removed. + /// + /// ``` + /// #[frame_support::pallet] + /// pub mod pallet { + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet<T>(_); + /// # use frame_support::traits::GenesisBuild; + /// #[pallet::genesis_config] + /// #[derive(frame_support::DefaultNoBound)] + /// pub struct GenesisConfig<T: Config> { + /// foo: Vec<T::AccountId> + /// } + /// + /// #[pallet::genesis_build] + /// impl<T: Config> GenesisBuild<T> for GenesisConfig<T> { + /// fn build(&self) { + /// todo!() + /// } + /// } + /// } + /// ``` + pub use frame_support_procedural::genesis_build; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/support/src/storage/storage_noop_guard.rs b/substrate/frame/support/src/storage/storage_noop_guard.rs index d00e6e18ecc484e3139ea27e25e3d580871c8c3c..c4d40fa99a35cef9a05d55b71f27de1717a72cee 100644 --- a/substrate/frame/support/src/storage/storage_noop_guard.rs +++ b/substrate/frame/support/src/storage/storage_noop_guard.rs @@ -37,15 +37,38 @@ /// }); /// ``` #[must_use] -pub struct StorageNoopGuard(sp_std::vec::Vec<u8>); +pub struct StorageNoopGuard<'a> { + storage_root: sp_std::vec::Vec<u8>, + error_message: &'a str, +} -impl Default for StorageNoopGuard { +impl<'a> Default for StorageNoopGuard<'a> { fn default() -> Self { - Self(sp_io::storage::root(sp_runtime::StateVersion::V1)) + Self { + storage_root: sp_io::storage::root(sp_runtime::StateVersion::V1), + error_message: "`StorageNoopGuard` detected an attempted storage change.", + } + } +} + +impl<'a> StorageNoopGuard<'a> { + /// Alias to `default()`. + pub fn new() -> Self { + Self::default() + } + + /// Creates a new [`StorageNoopGuard`] with a custom error message. + pub fn from_error_message(error_message: &'a str) -> Self { + Self { storage_root: sp_io::storage::root(sp_runtime::StateVersion::V1), error_message } + } + + /// Sets a custom error message for a [`StorageNoopGuard`]. + pub fn set_error_message(&mut self, error_message: &'a str) { + self.error_message = error_message; } } -impl Drop for StorageNoopGuard { +impl<'a> Drop for StorageNoopGuard<'a> { fn drop(&mut self) { // No need to double panic, eg. inside a test assertion failure. if sp_std::thread::panicking() { @@ -53,8 +76,9 @@ impl Drop for StorageNoopGuard { } assert_eq!( sp_io::storage::root(sp_runtime::StateVersion::V1), - self.0, - "StorageNoopGuard detected wrongful storage changes.", + self.storage_root, + "{}", + self.error_message, ); } } @@ -65,7 +89,7 @@ mod tests { use sp_io::TestExternalities; #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] fn storage_noop_guard_panics_on_changed() { TestExternalities::default().execute_with(|| { let _guard = StorageNoopGuard::default(); @@ -83,7 +107,7 @@ mod tests { } #[test] - #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] fn storage_noop_guard_panics_on_early_drop() { TestExternalities::default().execute_with(|| { let guard = StorageNoopGuard::default(); @@ -111,4 +135,34 @@ mod tests { panic!("Something else"); }); } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` found unexpected storage changes.")] + fn storage_noop_guard_panics_created_from_error_message() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::from_error_message( + "`StorageNoopGuard` found unexpected storage changes.", + ); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` found unexpected storage changes.")] + fn storage_noop_guard_panics_with_set_error_message() { + TestExternalities::default().execute_with(|| { + let mut guard = StorageNoopGuard::default(); + guard.set_error_message("`StorageNoopGuard` found unexpected storage changes."); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } + + #[test] + #[should_panic(expected = "`StorageNoopGuard` detected an attempted storage change.")] + fn storage_noop_guard_panics_new_alias() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::new(); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } } diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 10ae5d83b5ac958fca06be636d38bb0fe4020cb4..2179ee38f8481e19ce2ba3ccc83a16415a8b25c3 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -107,7 +107,7 @@ mod voting; pub use voting::{ClassCountOf, PollStatus, Polling, VoteTally}; mod preimages; -pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; +pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePreimage}; mod messages; pub use messages::{ diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs index bf08a286dd7cfb617e3271256e06fbb9a27e15c0..647af029c16dce07f998e34c30cb076f6a670d4c 100644 --- a/substrate/frame/support/src/traits/preimages.rs +++ b/substrate/frame/support/src/traits/preimages.rs @@ -15,16 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Stuff for dealing with 32-byte hashed preimages. +//! Stuff for dealing with hashed preimages. use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_core::{RuntimeDebug, H256}; -use sp_io::hashing::blake2_256; -use sp_runtime::{traits::ConstU32, DispatchError}; +use sp_core::RuntimeDebug; +use sp_runtime::{ + traits::{ConstU32, Hash}, + DispatchError, +}; use sp_std::borrow::Cow; -pub type Hash = H256; pub type BoundedInline = crate::BoundedVec<u8, ConstU32<128>>; /// The maximum we expect a single legacy hash lookup to be. @@ -32,29 +33,29 @@ const MAX_LEGACY_LEN: u32 = 1_000_000; #[derive(Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, TypeInfo, RuntimeDebug)] #[codec(mel_bound())] -pub enum Bounded<T> { - /// A Blake2 256 hash with no preimage length. We - /// do not support creation of this except for transitioning from legacy state. - /// In the future we will make this a pure `Dummy` item storing only the final `dummy` field. - Legacy { hash: Hash, dummy: sp_std::marker::PhantomData<T> }, +pub enum Bounded<T, H: Hash> { + /// A hash with no preimage length. We do not support creation of this except + /// for transitioning from legacy state. In the future we will make this a pure + /// `Dummy` item storing only the final `dummy` field. + Legacy { hash: H::Output, dummy: sp_std::marker::PhantomData<T> }, /// A an bounded `Call`. Its encoding must be at most 128 bytes. Inline(BoundedInline), - /// A Blake2-256 hash of the call together with an upper limit for its size. - Lookup { hash: Hash, len: u32 }, + /// A hash of the call together with an upper limit for its size.` + Lookup { hash: H::Output, len: u32 }, } -impl<T> Bounded<T> { +impl<T, H: Hash> Bounded<T, H> { /// Casts the wrapped type into something that encodes alike. /// /// # Examples /// ``` - /// use frame_support::traits::Bounded; + /// use frame_support::{traits::Bounded, sp_runtime::traits::BlakeTwo256}; /// /// // Transmute from `String` to `&str`. - /// let x: Bounded<String> = Bounded::Inline(Default::default()); - /// let _: Bounded<&str> = x.transmute(); + /// let x: Bounded<String, BlakeTwo256> = Bounded::Inline(Default::default()); + /// let _: Bounded<&str, BlakeTwo256> = x.transmute(); /// ``` - pub fn transmute<S: Encode>(self) -> Bounded<S> + pub fn transmute<S: Encode>(self) -> Bounded<S, H> where T: Encode + EncodeLike<S>, { @@ -69,18 +70,18 @@ impl<T> Bounded<T> { /// Returns the hash of the preimage. /// /// The hash is re-calculated every time if the preimage is inlined. - pub fn hash(&self) -> Hash { + pub fn hash(&self) -> H::Output { use Bounded::*; match self { Lookup { hash, .. } | Legacy { hash, .. } => *hash, - Inline(x) => blake2_256(x.as_ref()).into(), + Inline(x) => <H as Hash>::hash(x.as_ref()), } } /// Returns the hash to lookup the preimage. /// /// If this is a `Bounded::Inline`, `None` is returned as no lookup is required. - pub fn lookup_hash(&self) -> Option<Hash> { + pub fn lookup_hash(&self) -> Option<H::Output> { use Bounded::*; match self { Lookup { hash, .. } | Legacy { hash, .. } => Some(*hash), @@ -115,13 +116,13 @@ impl<T> Bounded<T> { } /// Constructs a `Lookup` bounded item. - pub fn unrequested(hash: Hash, len: u32) -> Self { + pub fn unrequested(hash: H::Output, len: u32) -> Self { Self::Lookup { hash, len } } /// Constructs a `Legacy` bounded item. #[deprecated = "This API is only for transitioning to Scheduler v3 API"] - pub fn from_legacy_hash(hash: impl Into<Hash>) -> Self { + pub fn from_legacy_hash(hash: impl Into<H::Output>) -> Self { Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } } } @@ -130,24 +131,27 @@ pub type FetchResult = Result<Cow<'static, [u8]>, DispatchError>; /// A interface for looking up preimages from their hash on chain. pub trait QueryPreimage { + /// The hasher used in the runtime. + type H: Hash; + /// Returns whether a preimage exists for a given hash and if so its length. - fn len(hash: &Hash) -> Option<u32>; + fn len(hash: &<Self::H as sp_core::Hasher>::Out) -> Option<u32>; /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. - fn fetch(hash: &Hash, len: Option<u32>) -> FetchResult; + fn fetch(hash: &<Self::H as sp_core::Hasher>::Out, len: Option<u32>) -> FetchResult; /// Returns whether a preimage request exists for a given hash. - fn is_requested(hash: &Hash) -> bool; + fn is_requested(hash: &<Self::H as sp_core::Hasher>::Out) -> bool; /// Request that someone report a preimage. Providers use this to optimise the economics for /// preimage reporting. - fn request(hash: &Hash); + fn request(hash: &<Self::H as sp_core::Hasher>::Out); /// Cancel a previous preimage request. - fn unrequest(hash: &Hash); + fn unrequest(hash: &<Self::H as sp_core::Hasher>::Out); /// Request that the data required for decoding the given `bounded` value is made available. - fn hold<T>(bounded: &Bounded<T>) { + fn hold<T>(bounded: &Bounded<T, Self::H>) { use Bounded::*; match bounded { Inline(..) => {}, @@ -157,7 +161,7 @@ pub trait QueryPreimage { /// No longer request that the data required for decoding the given `bounded` value is made /// available. - fn drop<T>(bounded: &Bounded<T>) { + fn drop<T>(bounded: &Bounded<T, Self::H>) { use Bounded::*; match bounded { Inline(..) => {}, @@ -167,7 +171,7 @@ pub trait QueryPreimage { /// Check to see if all data required for the given `bounded` value is available for its /// decoding. - fn have<T>(bounded: &Bounded<T>) -> bool { + fn have<T>(bounded: &Bounded<T, Self::H>) -> bool { use Bounded::*; match bounded { Inline(..) => true, @@ -180,7 +184,7 @@ pub trait QueryPreimage { /// It also directly requests the given `hash` using [`Self::request`]. /// /// This may not be `peek`-able or `realize`-able. - fn pick<T>(hash: Hash, len: u32) -> Bounded<T> { + fn pick<T>(hash: <Self::H as sp_core::Hasher>::Out, len: u32) -> Bounded<T, Self::H> { Self::request(&hash); Bounded::Lookup { hash, len } } @@ -190,7 +194,7 @@ pub trait QueryPreimage { /// /// NOTE: This does not remove any data needed for realization. If you will no longer use the /// `bounded`, call `realize` instead or call `drop` afterwards. - fn peek<T: Decode>(bounded: &Bounded<T>) -> Result<(T, Option<u32>), DispatchError> { + fn peek<T: Decode>(bounded: &Bounded<T, Self::H>) -> Result<(T, Option<u32>), DispatchError> { use Bounded::*; match bounded { Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), @@ -209,7 +213,9 @@ pub trait QueryPreimage { /// Convert the given `bounded` value back into its original instance. If successful, /// `drop` any data backing it. This will not break the realisability of independently /// created instances of `Bounded` which happen to have identical data. - fn realize<T: Decode>(bounded: &Bounded<T>) -> Result<(T, Option<u32>), DispatchError> { + fn realize<T: Decode>( + bounded: &Bounded<T, Self::H>, + ) -> Result<(T, Option<u32>), DispatchError> { let r = Self::peek(bounded)?; Self::drop(bounded); Ok(r) @@ -230,11 +236,11 @@ pub trait StorePreimage: QueryPreimage { /// Request and attempt to store the bytes of a preimage on chain. /// /// May return `DispatchError::Exhausted` if the preimage is just too big. - fn note(bytes: Cow<[u8]>) -> Result<Hash, DispatchError>; + fn note(bytes: Cow<[u8]>) -> Result<<Self::H as sp_core::Hasher>::Out, DispatchError>; /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is /// provided for symmetry. - fn unnote(hash: &Hash) { + fn unnote(hash: &<Self::H as sp_core::Hasher>::Out) { Self::unrequest(hash) } @@ -244,7 +250,7 @@ pub trait StorePreimage: QueryPreimage { /// /// NOTE: Once this API is used, you should use either `drop` or `realize`. /// The value is also noted using [`Self::note`]. - fn bound<T: Encode>(t: T) -> Result<Bounded<T>, DispatchError> { + fn bound<T: Encode>(t: T) -> Result<Bounded<T, Self::H>, DispatchError> { let data = t.encode(); let len = data.len() as u32; Ok(match BoundedInline::try_from(data) { @@ -255,22 +261,24 @@ pub trait StorePreimage: QueryPreimage { } impl QueryPreimage for () { - fn len(_: &Hash) -> Option<u32> { + type H = sp_runtime::traits::BlakeTwo256; + + fn len(_: &sp_core::H256) -> Option<u32> { None } - fn fetch(_: &Hash, _: Option<u32>) -> FetchResult { + fn fetch(_: &sp_core::H256, _: Option<u32>) -> FetchResult { Err(DispatchError::Unavailable) } - fn is_requested(_: &Hash) -> bool { + fn is_requested(_: &sp_core::H256) -> bool { false } - fn request(_: &Hash) {} - fn unrequest(_: &Hash) {} + fn request(_: &sp_core::H256) {} + fn unrequest(_: &sp_core::H256) {} } impl StorePreimage for () { const MAX_LENGTH: usize = 0; - fn note(_: Cow<[u8]>) -> Result<Hash, DispatchError> { + fn note(_: Cow<[u8]>) -> Result<sp_core::H256, DispatchError> { Err(DispatchError::Exhausted) } } @@ -279,22 +287,22 @@ impl StorePreimage for () { mod tests { use super::*; use crate::BoundedVec; - use sp_runtime::bounded_vec; + use sp_runtime::{bounded_vec, traits::BlakeTwo256}; #[test] fn bounded_size_is_correct() { - assert_eq!(<Bounded<Vec<u8>> as MaxEncodedLen>::max_encoded_len(), 131); + assert_eq!(<Bounded<Vec<u8>, BlakeTwo256> as MaxEncodedLen>::max_encoded_len(), 131); } #[test] fn bounded_basic_works() { let data: BoundedVec<u8, _> = bounded_vec![b'a', b'b', b'c']; let len = data.len() as u32; - let hash = blake2_256(&data).into(); + let hash = BlakeTwo256::hash(&data).into(); // Inline works { - let bound: Bounded<Vec<u8>> = Bounded::Inline(data.clone()); + let bound: Bounded<Vec<u8>, BlakeTwo256> = Bounded::Inline(data.clone()); assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), Some(len)); assert!(!bound.lookup_needed()); @@ -302,7 +310,8 @@ mod tests { } // Legacy works { - let bound: Bounded<Vec<u8>> = Bounded::Legacy { hash, dummy: Default::default() }; + let bound: Bounded<Vec<u8>, BlakeTwo256> = + Bounded::Legacy { hash, dummy: Default::default() }; assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), None); assert!(bound.lookup_needed()); @@ -310,7 +319,8 @@ mod tests { } // Lookup works { - let bound: Bounded<Vec<u8>> = Bounded::Lookup { hash, len: data.len() as u32 }; + let bound: Bounded<Vec<u8>, BlakeTwo256> = + Bounded::Lookup { hash, len: data.len() as u32 }; assert_eq!(bound.hash(), hash); assert_eq!(bound.len(), Some(len)); assert!(bound.lookup_needed()); @@ -323,8 +333,8 @@ mod tests { let data: BoundedVec<u8, _> = bounded_vec![b'a', b'b', b'c']; // Transmute a `String` into a `&str`. - let x: Bounded<String> = Bounded::Inline(data.clone()); - let y: Bounded<&str> = x.transmute(); + let x: Bounded<String, BlakeTwo256> = Bounded::Inline(data.clone()); + let y: Bounded<&str, BlakeTwo256> = x.transmute(); assert_eq!(y, Bounded::Inline(data)); } } diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 74a5951142d522861b568e28023a791a493ac4e6..7a7d1357da1e7dba9588866d07d734f2a8fedda8 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -387,6 +387,8 @@ pub mod v3 { pub trait Anon<BlockNumber, Call, Origin> { /// An address which can be used for removing a scheduled task. type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + Debug + TypeInfo; + /// The hasher used in the runtime. + type Hasher: sp_runtime::traits::Hash; /// Schedule a dispatch to happen at the beginning of some block in the future. /// @@ -396,7 +398,7 @@ pub mod v3 { maybe_periodic: Option<Period<BlockNumber>>, priority: Priority, origin: Origin, - call: Bounded<Call>, + call: Bounded<Call, Self::Hasher>, ) -> Result<Self::Address, DispatchError>; /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, @@ -434,6 +436,8 @@ pub mod v3 { pub trait Named<BlockNumber, Call, Origin> { /// An address which can be used for removing a scheduled task. type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + /// The hasher used in the runtime. + type Hasher: sp_runtime::traits::Hash; /// Schedule a dispatch to happen at the beginning of some block in the future. /// @@ -446,7 +450,7 @@ pub mod v3 { maybe_periodic: Option<Period<BlockNumber>>, priority: Priority, origin: Origin, - call: Bounded<Call>, + call: Bounded<Call, Self::Hasher>, ) -> Result<Self::Address, DispatchError>; /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances diff --git a/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr index e3dd0b7aa1d3434f873240154758bf73a59adf25..b54a23c91b4ee966cea5b0509de24ada95f41b1a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -1,4 +1,4 @@ -error: Invalid genesis builder: expected `GenesisBuild<T>` or `GenesisBuild<T, I>` +error: Invalid genesis builder: expected `BuildGenesisConfig` (or the deprecated `GenesisBuild<T>` or `GenesisBuild<T, I>`) --> tests/pallet_ui/genesis_invalid_generic.rs:36:7 | 36 | impl GenesisBuild for GenesisConfig {} diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 6e8f72e0540e63bd88560fc13b778d34be740e2e..8764486d5f4fde2aafb20fa7daf919f1f1349a18 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -154,6 +154,10 @@ pub mod pallet { #[pallet::constant] type TipReportDepositBase: Get<BalanceOf<Self, I>>; + /// The maximum amount for a single tip. + #[pallet::constant] + type MaxTipAmount: Get<BalanceOf<Self, I>>; + /// Origin from which tippers must come. /// /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy @@ -208,6 +212,8 @@ pub mod pallet { AlreadyKnown, /// The tip hash is unknown. UnknownTip, + /// The tip given was too generous. + MaxTipAmountExceeded, /// The account attempting to retract the tip is not the finder of the tip. NotFinder, /// The tip cannot be claimed/closed because there are not enough tippers yet. @@ -336,10 +342,13 @@ pub mod pallet { let tipper = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); + + ensure!(T::MaxTipAmount::get() >= tip_value, Error::<T, I>::MaxTipAmountExceeded); + let reason_hash = T::Hashing::hash(&reason[..]); ensure!(!Reasons::<T, I>::contains_key(&reason_hash), Error::<T, I>::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); Reasons::<T, I>::insert(&reason_hash, &reason); Self::deposit_event(Event::NewTip { tip_hash: hash }); let tips = vec![(tipper.clone(), tip_value)]; @@ -387,7 +396,10 @@ pub mod pallet { let tipper = ensure_signed(origin)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); + ensure!(T::MaxTipAmount::get() >= tip_value, Error::<T, I>::MaxTipAmountExceeded); + let mut tip = Tips::<T, I>::get(hash).ok_or(Error::<T, I>::UnknownTip)?; + if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { Self::deposit_event(Event::TipClosing { tip_hash: hash }); } diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index a700892d42703f3560d1b6cabc1e1e7641cb1ddc..9cb90c3798018f1700dc0cb6baa5a2d407131e98 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -172,6 +172,7 @@ impl Config for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; + type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } @@ -183,6 +184,7 @@ impl Config<Instance1> for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; + type MaxTipAmount = ConstU64<10_000_000>; type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } @@ -396,6 +398,23 @@ fn tip_median_calculation_works() { }); } +#[test] +fn tip_large_should_fail() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + let h = tip_hash(); + assert_noop!( + Tips::tip( + RuntimeOrigin::signed(12), + h, + <<Test as Config>::MaxTipAmount as Get<u64>>::get() + 1 + ), + Error::<Test>::MaxTipAmountExceeded + ); + }); +} + #[test] fn tip_changing_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index 1982f5eb8738e2928946ba62d066bc46a710a854..9d356f09a9d2ec411c6af5ae73940741319ab590 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -79,7 +79,7 @@ benchmarks! { let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call); Pallet::<T>::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); @@ -106,7 +106,7 @@ benchmarks! { let remark = sp_std::vec![1u8; n as usize]; let call: <T as Config>::RuntimeCall = frame_system::Call::remark { remark }.into(); - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call); Pallet::<T>::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); diff --git a/substrate/frame/whitelist/src/lib.rs b/substrate/frame/whitelist/src/lib.rs index decf010b06757ff7ed03f101100d038078b5e67a..44551abd10715e0e4dd23d9667b2786f8ae850a1 100644 --- a/substrate/frame/whitelist/src/lib.rs +++ b/substrate/frame/whitelist/src/lib.rs @@ -44,12 +44,11 @@ use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ dispatch::{GetDispatchInfo, PostDispatchInfo}, ensure, - traits::{Hash as PreimageHash, QueryPreimage, StorePreimage}, + traits::{QueryPreimage, StorePreimage}, weights::Weight, - Hashable, }; use scale_info::TypeInfo; -use sp_runtime::traits::Dispatchable; +use sp_runtime::traits::{Dispatchable, Hash}; use sp_std::prelude::*; pub use pallet::*; @@ -81,7 +80,7 @@ pub mod pallet { type DispatchWhitelistedOrigin: EnsureOrigin<Self::RuntimeOrigin>; /// The handler of pre-images. - type Preimages: QueryPreimage + StorePreimage; + type Preimages: QueryPreimage<H = Self::Hashing> + StorePreimage; /// The weight information for this pallet. type WeightInfo: WeightInfo; @@ -93,9 +92,9 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { - CallWhitelisted { call_hash: PreimageHash }, - WhitelistedCallRemoved { call_hash: PreimageHash }, - WhitelistedCallDispatched { call_hash: PreimageHash, result: DispatchResultWithPostInfo }, + CallWhitelisted { call_hash: T::Hash }, + WhitelistedCallRemoved { call_hash: T::Hash }, + WhitelistedCallDispatched { call_hash: T::Hash, result: DispatchResultWithPostInfo }, } #[pallet::error] @@ -113,14 +112,13 @@ pub mod pallet { } #[pallet::storage] - pub type WhitelistedCall<T: Config> = - StorageMap<_, Twox64Concat, PreimageHash, (), OptionQuery>; + pub type WhitelistedCall<T: Config> = StorageMap<_, Twox64Concat, T::Hash, (), OptionQuery>; #[pallet::call] impl<T: Config> Pallet<T> { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::whitelist_call())] - pub fn whitelist_call(origin: OriginFor<T>, call_hash: PreimageHash) -> DispatchResult { + pub fn whitelist_call(origin: OriginFor<T>, call_hash: T::Hash) -> DispatchResult { T::WhitelistOrigin::ensure_origin(origin)?; ensure!( @@ -138,10 +136,7 @@ pub mod pallet { #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::remove_whitelisted_call())] - pub fn remove_whitelisted_call( - origin: OriginFor<T>, - call_hash: PreimageHash, - ) -> DispatchResult { + pub fn remove_whitelisted_call(origin: OriginFor<T>, call_hash: T::Hash) -> DispatchResult { T::WhitelistOrigin::ensure_origin(origin)?; WhitelistedCall::<T>::take(call_hash).ok_or(Error::<T>::CallIsNotWhitelisted)?; @@ -160,7 +155,7 @@ pub mod pallet { )] pub fn dispatch_whitelisted_call( origin: OriginFor<T>, - call_hash: PreimageHash, + call_hash: T::Hash, call_encoded_len: u32, call_weight_witness: Weight, ) -> DispatchResultWithPostInfo { @@ -206,7 +201,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::DispatchWhitelistedOrigin::ensure_origin(origin)?; - let call_hash = call.blake2_256().into(); + let call_hash = T::Hashing::hash_of(&call).into(); ensure!( WhitelistedCall::<T>::contains_key(call_hash), @@ -227,10 +222,7 @@ impl<T: Config> Pallet<T> { /// Clean whitelisting/preimage and dispatch call. /// /// Return the call actual weight of the dispatched call if there is some. - fn clean_and_dispatch( - call_hash: PreimageHash, - call: <T as Config>::RuntimeCall, - ) -> Option<Weight> { + fn clean_and_dispatch(call_hash: T::Hash, call: <T as Config>::RuntimeCall) -> Option<Weight> { WhitelistedCall::<T>::remove(call_hash); T::Preimages::unrequest(&call_hash); diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index 30bb42e2c7589094147f6ba2732e8c587c106302..a299ce395ea4fa79dd95bd2b9b9446e537983dd1 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -24,7 +24,7 @@ use scale_info::TypeInfo; use sp_timestamp::Timestamp; /// Unit type wrapper that represents a slot. -#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, TypeInfo)] +#[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, Hash, TypeInfo)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Slot(u64); @@ -44,6 +44,26 @@ impl core::ops::Add for Slot { } } +impl core::ops::Sub for Slot { + type Output = Self; + + fn sub(self, other: Self) -> Self { + Self(self.0 - other.0) + } +} + +impl core::ops::AddAssign for Slot { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0 + } +} + +impl core::ops::SubAssign for Slot { + fn sub_assign(&mut self, rhs: Self) { + self.0 -= rhs.0 + } +} + impl core::ops::Add<u64> for Slot { type Output = Self; diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 727d46cb8f53de54ab14704580614e2b41a34ec5..4d279c7b703dd44977287b41f8451400a768d073 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -107,6 +107,3 @@ std = [ ] # Special feature to disable logging disable-logging = [ "sp-api/disable-logging" ] - -#Enabling this flag will disable GenesisBuilder API implementation in runtime. -disable-genesis-builder = [] diff --git a/substrate/test-utils/runtime/build.rs b/substrate/test-utils/runtime/build.rs index 230606635f7dc5a4fa452dc7f5dfd021747a6002..dd79ce2c5ae842ad7fc1c759a14fa8dedd200a1a 100644 --- a/substrate/test-utils/runtime/build.rs +++ b/substrate/test-utils/runtime/build.rs @@ -15,8 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -const BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV: &str = "BUILD_NO_GENESIS_BUILDER_SUPPORT"; - fn main() { #[cfg(feature = "std")] { @@ -31,19 +29,6 @@ fn main() { .build(); } - #[cfg(feature = "std")] - if std::env::var(BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV).is_ok() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") - .set_file_name("wasm_binary_no_genesis_builder") - .import_memory() - .enable_feature("disable-genesis-builder") - .build(); - } - println!("cargo:rerun-if-env-changed={}", BUILD_NO_GENESIS_BUILDER_SUPPORT_ENV); - #[cfg(feature = "std")] { substrate_wasm_builder::WasmBuilder::new() diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index b116c8556815f3eef73e08252faab9d9f586ff24..687790f2ffaf8bfe13a4bb58075efc3da750114a 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -26,11 +26,10 @@ pub mod genesismap; pub mod substrate_test_pallet; use codec::{Decode, Encode}; -#[cfg(not(feature = "disable-genesis-builder"))] -use frame_support::genesis_builder_helper::{build_config, create_default_config}; use frame_support::{ construct_runtime, dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ConstU32, ConstU64}, weights::{ @@ -722,7 +721,6 @@ impl_runtime_apis! { } } - #[cfg(not(feature = "disable-genesis-builder"))] impl sp_genesis_builder::GenesisBuilder<Block> for Runtime { fn create_default_config() -> Vec<u8> { create_default_config::<RuntimeGenesisConfig>() @@ -1203,7 +1201,6 @@ mod tests { }) } - #[cfg(not(feature = "disable-genesis-builder"))] mod genesis_builder_tests { use super::*; use crate::genesismap::GenesisStorageBuilder; diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 7b529200440279a59a74bdb9d3dbf315a21d9261..8c8345b06bd32e4efbe2f721ddebe2cc2f2f9c4e 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,6 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; +use sc_transaction_pool::ChainApi; use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, @@ -237,9 +238,14 @@ impl TestApi { ) -> Result<sp_blockchain::TreeRoute<Block>, Error> { sp_blockchain::tree_route(self, from, to) } + + /// Helper function for mapping block number to hash. Use if mapping shall not fail. + pub fn expect_hash_from_number(&self, n: BlockNumber) -> Hash { + self.block_id_to_hash(&BlockId::Number(n)).unwrap().unwrap() + } } -impl sc_transaction_pool::ChainApi for TestApi { +impl ChainApi for TestApi { type Block = Block; type Error = Error; type ValidationFuture = futures::future::Ready<Result<TransactionValidity, Error>>; @@ -247,13 +253,13 @@ impl sc_transaction_pool::ChainApi for TestApi { fn validate_transaction( &self, - at: &BlockId<Self::Block>, + at: <Self::Block as BlockT>::Hash, _source: TransactionSource, uxt: <Self::Block as BlockT>::Extrinsic, ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); - match self.block_id_to_number(at) { + match self.block_id_to_number(&BlockId::Hash(at)) { Ok(Some(number)) => { let found_best = self .chain diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index 1eff71e3390a38af2f1cd0d2ba417783a86ee5a2..f467a87989048d9cca418ba04c57d363cc05162d 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -219,7 +219,6 @@ mod tests { use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_transaction_pool::BasicPool; use sp_runtime::{ - generic::BlockId, transaction_validity::{InvalidTransaction, TransactionValidityError}, ApplyExtrinsicResult, }; @@ -245,11 +244,12 @@ mod tests { }; t.into_unchecked_extrinsic() }; + let hash_of_block0 = client.info().genesis_hash; // Populate the pool let ext0 = new_transaction(0); - block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); + block_on(pool.submit_one(hash_of_block0, source, ext0)).unwrap(); let ext1 = new_transaction(1); - block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); + block_on(pool.submit_one(hash_of_block0, source, ext1)).unwrap(); let accounts = System::new(client, pool, DenyUnsafe::Yes); diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index 581666635ab54698d0426bc80007e245ee8568fe..ed1f9137aec4efad97218b2340e49436a05fd577 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -111,10 +111,16 @@ async fn init_prometheus_with_listener( } }); - let server = Server::builder(listener).serve(service); + let (signal, on_exit) = tokio::sync::oneshot::channel::<()>(); + let server = Server::builder(listener).serve(service).with_graceful_shutdown(async { + let _ = on_exit.await; + }); let result = server.await.map_err(Into::into); + // Gracefully shutdown server, otherwise the server does not stop if it has open connections + let _ = signal.send(()); + result }