diff --git a/.config/lychee.toml b/.config/lychee.toml index 733b77ec0cff9e616ecc0f851d9a3ed5e8574636..1de9fcd559dd9ea14fb603b7740efebcb893ed93 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -32,12 +32,10 @@ exclude = [ "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", "https://github.com/paritytech/substrate/frame/fast-unstake", "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", + "https://polkadot-try-runtime-node.parity-chains.parity.io/", "https://polkadot.network/the-path-of-a-parachain-block/", - "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", @@ -53,5 +51,6 @@ exclude = [ "https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/", # 403 rate limited: "https://etherscan.io/block/11090290", + "https://subscan.io/", "https://substrate.stackexchange.com/.*", ] diff --git a/.config/zepter.yaml b/.config/zepter.yaml index f701392d16b15aab8351b730efa13f3abffe2406..9b3bd9d618c14e41f1dbf420aff3fee1677e2830 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -25,9 +25,13 @@ workflows: '--show-path', '--quiet', ] - # Same as `check`, but with the `--fix` flag. + # The umbrella crate uses more features, so we to check those too: + check_umbrella: + - [ $check.0, '--features=serde,experimental,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ] + # Same as `check_*`, but with the `--fix` flag. default: - [ $check.0, '--fix' ] + - [ $check_umbrella.0, '--fix' ] # Will be displayed when any workflow fails: help: diff --git a/.forklift/config.toml b/.forklift/config.toml new file mode 100644 index 0000000000000000000000000000000000000000..ab3b2729a46d4e54dc77df1175d4ebe79eda46d0 --- /dev/null +++ b/.forklift/config.toml @@ -0,0 +1,33 @@ +[compression] +type = "zstd" + +[compression.zstd] +compressionLevel = 3 + +[general] +jobNameVariable = "CI_JOB_NAME" +jobsBlackList = [] +logLevel = "warn" +threadsCount = 6 + +[cache] +extraEnv = ["RUNTIME_METADATA_HASH"] + +[metrics] +enabled = true +pushEndpoint = "placeholder" + +[metrics.extraLabels] +environment = "production" +job_name = "$CI_JOB_NAME" +project_name = "$CI_PROJECT_PATH" + +[storage] +type = "s3" + +[storage.s3] +accessKeyId = "placeholder" +bucketName = "placeholder" +concurrency = 10 +endpointUrl = "placeholder" +secretAccessKey = "placeholder" diff --git a/.github/scripts/deny-git-deps.py b/.github/scripts/deny-git-deps.py new file mode 100644 index 0000000000000000000000000000000000000000..4b831c9347f75bdc3c74c80d3af652c37e7ae459 --- /dev/null +++ b/.github/scripts/deny-git-deps.py @@ -0,0 +1,40 @@ +""" +Script to deny Git dependencies in the Cargo workspace. Can be passed one optional argument for the +root folder. If not provided, it will use the cwd. + +## Usage + python3 .github/scripts/deny-git-deps.py polkadot-sdk +""" + +import os +import sys + +from cargo_workspace import Workspace, DependencyLocation + +KNOWN_BAD_GIT_DEPS = { + 'simple-mermaid': ['xcm-docs'], + # Fix in + 'bandersnatch_vrfs': ['sp-core'], +} + +root = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() +workspace = Workspace.from_path(root) + +def check_dep(dep, used_by): + if dep.location != DependencyLocation.GIT: + return + + if used_by in KNOWN_BAD_GIT_DEPS.get(dep.name, []): + print(f'🤨 Ignoring git dependency {dep.name} in {used_by}') + else: + print(f'🚫 Found git dependency {dep.name} in {used_by}') + sys.exit(1) + +# Check the workspace dependencies that can be inherited: +for dep in workspace.dependencies: + check_dep(dep, "workspace") + +# And the dependencies of each crate: +for crate in workspace.crates: + for dep in crate.dependencies: + check_dep(dep, crate.name) diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml deleted file mode 100644 index 680a9ecffd312dba61c2eaee3a3e2e6a9d5b136c..0000000000000000000000000000000000000000 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: Build and Attach Runtimes to Releases/RC - -on: - release: - types: - - published - -env: - PROFILE: production - -jobs: - build_and_upload: - strategy: - matrix: - runtime: - - { name: westend, package: westend-runtime, path: polkadot/runtime/westend } - - { name: rococo, package: rococo-runtime, path: polkadot/runtime/rococo } - - { name: asset-hub-rococo, package: asset-hub-rococo-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-rococo } - - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } - - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } - - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } - build_config: - # Release build has logging disabled and no dev features - - { type: on-chain-release, opts: --features on-chain-release-build } - # Debug build has logging enabled and developer features - - { type: dev-debug-build, opts: --features try-runtime } - - runs-on: ubuntu-22.04 - - steps: - - name: Checkout code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Build ${{ matrix.runtime.name }} ${{ matrix.build_config.type }} - id: srtool_build - uses: chevdor/srtool-actions@v0.9.2 - env: - BUILD_OPTS: ${{ matrix.build_config.opts }} - with: - chain: ${{ matrix.runtime.name }} - package: ${{ matrix.runtime.package }} - runtime_dir: ${{ matrix.runtime.path }} - profile: ${{ env.PROFILE }} - - - name: Set up paths and runtime names - id: setup - run: | - RUNTIME_BLOB_NAME=$(echo ${{ matrix.runtime.package }} | sed 's/-/_/g').compact.compressed.wasm - PREFIX=${{ matrix.build_config.type == 'dev-debug-build' && 'DEV_DEBUG_BUILD__' || '' }} - - echo "RUNTIME_BLOB_NAME=$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_PATH=./${{ matrix.runtime.path }}/target/srtool/${{ env.PROFILE }}/wbuild/${{ matrix.runtime.package }}/$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - echo "ASSET_NAME=$PREFIX$RUNTIME_BLOB_NAME" >> $GITHUB_ENV - - - name: Upload Runtime to Release - uses: actions/upload-release-asset@v1 - with: - upload_url: ${{ github.event.release.upload_url }} - asset_path: ${{ env.ASSET_PATH }} - asset_name: ${{ env.ASSET_NAME }} - asset_content_type: application/octet-stream - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-changed-files.yml b/.github/workflows/check-changed-files.yml new file mode 100644 index 0000000000000000000000000000000000000000..657c05cd047db40af642886eb2710e7324dd40ce --- /dev/null +++ b/.github/workflows/check-changed-files.yml @@ -0,0 +1,57 @@ +# Reusable workflow to perform checks and generate conditions for other workflows. +# Currently it checks if any Rust (build-related) file is changed +# and if the current (caller) workflow file is changed. +# Example: +# +# jobs: +# changes: +# permissions: +# pull-requests: read +# uses: ./.github/workflows/check-changed-files.yml +# some-job: +# needs: changes +# if: ${{ needs.changes.outputs.rust }} +# ....... + +name: Check changes files + +on: + workflow_call: + # Map the workflow outputs to job outputs + outputs: + rust: + value: ${{ jobs.changes.outputs.rust }} + description: 'true if any of the build-related OR current (caller) workflow files have changed' + current-workflow: + value: ${{ jobs.changes.outputs.current-workflow }} + description: 'true if current (caller) workflow file has changed' + +jobs: + changes: + runs-on: ubuntu-latest + permissions: + pull-requests: read + outputs: + # true if current workflow (caller) file is changed + rust: ${{ steps.filter.outputs.rust == 'true' || steps.filter.outputs.current-workflow == 'true' }} + current-workflow: ${{ steps.filter.outputs.current-workflow }} + steps: + - id: current-file + run: echo "current-workflow-file=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT + - run: echo "${{ steps.current-file.outputs.current-workflow-file }}" + # For pull requests it's not necessary to checkout the code + - id: filter + uses: dorny/paths-filter@v3 + with: + predicate-quantifier: 'every' + # current-workflow - check if the current (caller) workflow file is changed + # rust - check if any Rust (build-related) file is changed + filters: | + current-workflow: + - '${{ steps.current-file.outputs.current-workflow-file }}' + rust: + - '**/*' + - '!.github/**/*' + - '!prdoc/**/*' + - '!docs/**/*' + # \ No newline at end of file diff --git a/.github/workflows/check-features.yml b/.github/workflows/check-features.yml index 53d6ac6b4dbfd7e3ccf1ca09ad9e1e70a49a9ff9..d34b3d52c5332b61d9a90dc03de938f154de5c7e 100644 --- a/.github/workflows/check-features.yml +++ b/.github/workflows/check-features.yml @@ -13,7 +13,7 @@ jobs: - name: Check uses: hack-ink/cargo-featalign-action@bea88a864d6ca7d0c53c26f1391ce1d431dc7f34 # v0.1.1 with: - crate: substrate/bin/node/runtime + crate: templates/parachain/runtime/ features: std,runtime-benchmarks,try-runtime ignore: sc-executor default-std: true diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index c32b6fcf89e06bb56cefc0517e1dcab1d1ef0f37..3bc95305f7467ebbede90526eadb156b89b1e7f9 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -10,6 +10,7 @@ permissions: jobs: check-licenses: runs-on: ubuntu-latest + timeout-minutes: 10 env: LICENSES: "'Apache-2.0' 'GPL-3.0-only' 'GPL-3.0-or-later WITH Classpath-exception-2.0'" NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml deleted file mode 100644 index 2b8a66db35b3adacea4f131a881103d48e3704ae..0000000000000000000000000000000000000000 --- a/.github/workflows/check-markdown.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Check Markdown - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: - -permissions: - packages: read - -jobs: - lint-markdown: - runs-on: ubuntu-latest - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - uses: actions/setup-node@v4.0.1 - with: - node-version: "18.x" - registry-url: "https://npm.pkg.github.com" - scope: "@paritytech" - - - name: Install tooling - run: | - npm install -g markdownlint-cli - markdownlint --version - - - name: Check Markdown - env: - CONFIG: .github/.markdownlint.yaml - run: | - echo "Checking markdown formatting. More info: docs/contributor/markdown_linting.md" - markdownlint --config "$CONFIG" --ignore target . diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml new file mode 100644 index 0000000000000000000000000000000000000000..984e264d0d1d1d9c8ac1730c51950194cce56276 --- /dev/null +++ b/.github/workflows/check-runtime-migration.yml @@ -0,0 +1,122 @@ +name: check-runtime-migration + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} + FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} + FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} + FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} + FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} + +jobs: + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + # rococo and westend are disabled for now (no access to parity-chains.parity.io) + check-runtime-migration: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + strategy: + fail-fast: false + matrix: + network: [ + # westend, + # rococo, + asset-hub-westend, + asset-hub-rococo, + bridge-hub-westend, + bridge-hub-rococo, + contracts-rococo, + collectives-westend, + coretime-rococo, + ] + include: + # - network: westend + # package: westend-runtime + # wasm: westend_runtime.compact.compressed.wasm + # uri: "wss://westend-try-runtime-node.parity-chains.parity.io:443" + # subcommand_extra_args: "--no-weight-warnings" + # command_extra_args: "" + # - network: rococo + # package: rococo-runtime + # wasm: rococo_runtime.compact.compressed.wasm + # uri: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" + # subcommand_extra_args: "--no-weight-warnings" + # command_extra_args: "" + - network: asset-hub-westend + package: asset-hub-westend-runtime + wasm: asset_hub_westend_runtime.compact.compressed.wasm + uri: "wss://westend-asset-hub-rpc.polkadot.io:443" + subcommand_extra_args: "" + command_extra_args: "" + - network: "asset-hub-rococo" + package: "asset-hub-rococo-runtime" + wasm: "asset_hub_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-asset-hub-rpc.polkadot.io:443" + subcommand_extra_args: "" + command_extra_args: "" + - network: "bridge-hub-westend" + package: "bridge-hub-westend-runtime" + wasm: "bridge_hub_westend_runtime.compact.compressed.wasm" + uri: "wss://westend-bridge-hub-rpc.polkadot.io:443" + - network: "bridge-hub-rococo" + package: "bridge-hub-rococo-runtime" + wasm: "bridge_hub_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-bridge-hub-rpc.polkadot.io:443" + - network: "contracts-rococo" + package: "contracts-rococo-runtime" + wasm: "contracts_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-contracts-rpc.polkadot.io:443" + - network: "collectives-westend" + package: "collectives-westend-runtime" + wasm: "collectives_westend_runtime.compact.compressed.wasm" + uri: "wss://westend-collectives-rpc.polkadot.io:443" + command_extra_args: "--disable-spec-name-check" + - network: "coretime-rococo" + package: "coretime-rococo-runtime" + wasm: "coretime_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-coretime-rpc.polkadot.io:443" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + echo "Running ${{ matrix.network }} runtime migration check" + export RUST_LOG=remote-ext=debug,runtime=debug + + echo "---------- Downloading try-runtime CLI ----------" + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime + chmod +x ./try-runtime + echo "Using try-runtime-cli version:" + ./try-runtime --version + + echo "---------- Building ${{ matrix.package }} runtime ----------" + time forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime + + echo "---------- Executing on-runtime-upgrade for ${{ matrix.network }} ----------" + time ./try-runtime ${{ matrix.command_extra_args }} \ + --runtime ./target/release/wbuild/${{ matrix.package }}/${{ matrix.wasm }} \ + on-runtime-upgrade --disable-spec-version-check --checks=all ${{ matrix.subcommand_extra_args }} live --uri ${{ matrix.uri }} + sleep 5 diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index f0e076e8a1683a0cb5a46aa9010ec9bd3d1bc898..04c63f4192b29ca1773d1018698b2abe6a666e1c 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -38,6 +38,7 @@ jobs: run: | export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' + export SKIP_WASM_BUILD=1 if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc --toolchain nightly-2024-03-01 -v; then cat < - python3 .github/scripts/check-workspace.py . - --exclude - "substrate/frame/contracts/fixtures/build" - "substrate/frame/contracts/fixtures/contracts/common" diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml new file mode 100644 index 0000000000000000000000000000000000000000..c4382d1b9b40ee6226a5014b13fa3eafb2d1d883 --- /dev/null +++ b/.github/workflows/checks-quick.yml @@ -0,0 +1,149 @@ +# Checks that doesn't require heavy lifting, like formatting, linting, etc. +name: quick-checks + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: {} + +jobs: + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + timeout-minutes: 10 + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + fmt: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Cargo fmt + run: cargo +nightly fmt --all -- --check + check-dependency-rules: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: check dependency rules + run: | + cd substrate/ + ../.gitlab/ensure-deps.sh + check-rust-feature-propagation: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: run zepter + run: zepter run check + test-rust-features: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: run rust features + run: bash .gitlab/rust-features.sh . + check-toml-format: + runs-on: ubuntu-latest + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: check toml format + run: | + taplo format --check --config .config/taplo.toml + echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" + check-workspace: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) + - name: install python deps + run: | + sudo apt-get update && sudo apt-get install -y python3-pip python3 + pip3 install toml "cargo-workspace>=1.2.6" + - name: check integrity + run: > + python3 .github/scripts/check-workspace.py . + --exclude + "substrate/frame/contracts/fixtures/build" + "substrate/frame/contracts/fixtures/contracts/common" + - name: deny git deps + run: python3 .github/scripts/deny-git-deps.py . + check-markdown: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: Setup Node.js + uses: actions/setup-node@v4.0.1 + with: + node-version: "18.x" + registry-url: "https://npm.pkg.github.com" + scope: "@paritytech" + - name: Install tooling + run: | + npm install -g markdownlint-cli + markdownlint --version + - name: Check Markdown + env: + CONFIG: .github/.markdownlint.yaml + run: | + echo "Checking markdown formatting. More info: docs/contributor/markdown_linting.md" + markdownlint --config "$CONFIG" --ignore target . + check-umbrella: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 10 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.0 (22. Sep 2023) + - name: install python deps + run: | + sudo apt-get update && sudo apt-get install -y python3-pip python3 + pip3 install "cargo-workspace>=1.2.4" toml + - name: check umbrella correctness + run: | + python3 scripts/generate-umbrella.py --sdk . --version 0.1.0 + cargo +nightly fmt --all + + if [ -n "$(git status --porcelain)" ]; then + cat < Cargo.temp mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml - - toml get Cargo.toml 'workspace.lints' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml - - toml get Cargo.toml 'workspace.dependencies' --output-toml >> ./templates/${{ matrix.template }}/Cargo.toml working-directory: polkadot-sdk - name: Print the result Cargo.tomls for debugging if: runner.debug == '1' @@ -120,6 +116,18 @@ jobs: - name: Copy over the new changes run: | cp -r polkadot-sdk/templates/${{ matrix.template }}/* "${{ env.template-path }}/" + - name: Copy over required workspace dependencies + run: | + echo -e "\n[workspace.dependencies]" >> Cargo.toml + set +e + # If a workspace dependency is required.. + while cargo tree --depth 1 --prefix none --no-dedupe 2>&1 | grep 'was not found in `workspace.dependencies`'; do + # Get its name.. + missing_dep=$(cargo tree --depth 1 --prefix none --no-dedupe 2>&1 | grep 'was not found in `workspace.dependencies`' | sed -E 's/(.*)`dependency.(.*)` was not found in `workspace.dependencies`/\2/') + # And copy the dependency from the monorepo. + toml get ../polkadot-sdk/Cargo.toml 'workspace.dependencies' --output-toml | grep "^${missing_dep} = " >> Cargo.toml + done; + working-directory: "${{ env.template-path }}" # 3. Verify the build. Push the changes or create a PR. @@ -148,12 +156,12 @@ jobs: token: ${{ steps.app_token.outputs.token }} add-paths: | ./* - title: "[Don't merge] Update the ${{ matrix.template }} template" + title: "[Don't merge] Update the ${{ matrix.template }} template to ${{ github.event.inputs.crate_release_version }}" body: "The template has NOT been successfully built and needs to be inspected." - branch: "update-template/${{ github.event_name }}" + branch: "update-template/${{ github.event.inputs.crate_release_version }}" - name: Push changes run: | git add -A . - git commit --allow-empty -m "Update template triggered by ${{ github.event_name }}" + git commit --allow-empty -m "Update to ${{ github.event.inputs.crate_release_version }} triggered by ${{ github.event_name }}" git push working-directory: "${{ env.template-path }}" diff --git a/.github/workflows/check-publish.yml b/.github/workflows/publish-check-crates.yml similarity index 100% rename from .github/workflows/check-publish.yml rename to .github/workflows/publish-check-crates.yml diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/publish-claim-crates.yml similarity index 100% rename from .github/workflows/claim-crates.yml rename to .github/workflows/publish-claim-crates.yml diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/publish-subsystem-benchmarks.yml similarity index 100% rename from .github/workflows/subsystem-benchmarks.yml rename to .github/workflows/publish-subsystem-benchmarks.yml diff --git a/.github/workflows/quick-checks.yml b/.github/workflows/quick-checks.yml deleted file mode 100644 index 7bf1983a1f69c4de2d6611f0023216eaf2f55466..0000000000000000000000000000000000000000 --- a/.github/workflows/quick-checks.yml +++ /dev/null @@ -1,81 +0,0 @@ -# Checks that doesn't require heavy lifting, like formatting, linting, etc. -name: quick-checks - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - set-image: - # GitHub Actions allows using 'env' in a container context. - # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 - # This workaround sets the container image for each job using 'set-image' job output. - runs-on: arc-runners-polkadot-sdk-default - timeout-minutes: 10 - outputs: - IMAGE: ${{ steps.set_image.outputs.IMAGE }} - steps: - - name: Checkout - uses: actions/checkout@v4 - - id: set_image - run: cat .github/env >> $GITHUB_OUTPUT - fmt: - runs-on: arc-runners-polkadot-sdk-default - timeout-minutes: 10 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Cargo fmt - run: cargo +nightly fmt --all -- --check - check-dependency-rules: - runs-on: arc-runners-polkadot-sdk-default - timeout-minutes: 10 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: check dependency rules - run: | - cd substrate/ - ../.gitlab/ensure-deps.sh - check-rust-feature-propagation: - runs-on: arc-runners-polkadot-sdk-default - # runs-on: ubuntu-latest - timeout-minutes: 10 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: run zepter - run: zepter run check - test-rust-features: - runs-on: arc-runners-polkadot-sdk-default - # runs-on: ubuntu-latest - timeout-minutes: 10 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: run rust features - run: bash .gitlab/rust-features.sh . - check-toml-format: - runs-on: arc-runners-polkadot-sdk-default - timeout-minutes: 10 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: check toml format - run: | - taplo format --check --config .config/taplo.toml - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index a9e521051d04079a49af76c78681af8ae52ae5da..f39eb4c1716ebea4fb3207ea1a2ecc8227037448 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -23,13 +23,44 @@ jobs: echo "stable=$RUST_STABLE_VERSION" >> $GITHUB_OUTPUT build-runtimes: - uses: "./.github/workflows/srtool.yml" + uses: "./.github/workflows/release-srtool.yml" with: excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" + build-binaries: + runs-on: ubuntu-latest + strategy: + matrix: + binary: [ frame-omni-bencher, chain-spec-builder ] + steps: + - name: Checkout sources + uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + + - name: Install protobuf-compiler + run: | + sudo apt update + sudo apt install -y protobuf-compiler + + - name: Build ${{ matrix.binary }} binary + run: | + if [[ ${{ matrix.binary }} =~ chain-spec-builder ]]; then + cargo build --locked --profile=production -p staging-${{ matrix.binary }} --bin ${{ matrix.binary }} + target/production/${{ matrix.binary }} -h + else + cargo build --locked --profile=production -p ${{ matrix.binary }} + target/production/${{ matrix.binary }} --version + fi + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binary }} + path: target/production/${{ matrix.binary }} + + publish-release-draft: runs-on: ubuntu-latest - needs: [get-rust-versions, build-runtimes] + needs: [ get-rust-versions, build-runtimes ] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} @@ -37,15 +68,15 @@ jobs: - name: Checkout uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + - name: Prepare tooling run: | URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb - - name: Download artifacts - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - - name: Prepare draft id: draft env: @@ -129,6 +160,30 @@ jobs: asset_name: ${{ matrix.chain }}_runtime-v${{ env.SPEC }}.compact.compressed.wasm asset_content_type: application/wasm + publish-binaries: + needs: [ publish-release-draft, build-binaries ] + continue-on-error: true + runs-on: ubuntu-latest + strategy: + matrix: + binary: [frame-omni-bencher, chain-spec-builder] + + steps: + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 + with: + name: ${{ matrix.binary }} + + - name: Upload ${{ matrix.binary }} binary + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 #v1.0.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-release-draft.outputs.asset_upload_url }} + asset_path: ${{ github.workspace}}/${{ matrix.binary }} + asset_name: ${{ matrix.binary }} + asset_content_type: application/octet-stream + post_to_matrix: runs-on: ubuntu-latest needs: publish-release-draft diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 67e93ee96574de1f1e3e29f1bf6d90085865100d..4679f58578f7906b75e4a3d6d623ebc1d55df40d 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -27,6 +27,7 @@ on: options: - polkadot - polkadot-parachain + - chain-spec-builder release_id: description: | @@ -74,7 +75,7 @@ env: jobs: fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest steps: @@ -97,7 +98,7 @@ jobs: - name: Fetch rc artifacts or release artifacts from s3 based on version #this step runs only if the workflow is triggered manually - if: ${{ env.EVENT_NAME == 'workflow_dispatch' }} + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary != 'chain-spec-builder'}} run: | . ./.github/scripts/common/lib.sh @@ -106,15 +107,22 @@ jobs: fetch_release_artifacts_from_s3 - - name: Cache the artifacts - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 + - name: Fetch chain-spec-builder rc artifacts or release artifacts based on release id + #this step runs only if the workflow is triggered manually and only for chain-spec-builder + if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary == 'chain-spec-builder' }} + run: | + . ./.github/scripts/common/lib.sh + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + fetch_release_artifacts + + - name: Upload artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - path: | - ./release-artifacts/${{ env.BINARY }}/**/* + name: release-artifacts + path: release-artifacts/${{ env.BINARY }}/**/* build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build - if: ${{ inputs.binary == 'polkadot-parachain' || inputs.image_type == 'rc' }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest needs: fetch-artifacts environment: release @@ -123,26 +131,23 @@ jobs: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Get artifacts from cache - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - key: artifacts-${{ env.BINARY }}-${{ github.sha }} - fail-on-cache-miss: true - path: | - ./release-artifacts/${{ env.BINARY }}/**/* + - name: Download artifacts + uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - name: Check sha256 ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + working-directory: release-artifacts run: | - . ../../.github/scripts/common/lib.sh + . ../.github/scripts/common/lib.sh echo "Checking binary $BINARY" check_sha256 $BINARY && echo "OK" || echo "ERR" - name: Check GPG ${{ env.BINARY }} - working-directory: ./release-artifacts/${{ env.BINARY }} + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'polkadot' }} + working-directory: release-artifacts run: | - . ../../.github/scripts/common/lib.sh + . ../.github/scripts/common/lib.sh import_gpg_keys check_gpg $BINARY @@ -164,20 +169,21 @@ jobs: echo "No tag, doing without" - name: Fetch release tags - working-directory: ./release-artifacts/${{ env.BINARY }} + working-directory: release-artifacts if: ${{ env.IMAGE_TYPE == 'release'}} id: fetch_release_refs run: | chmod a+rx $BINARY - VERSION=$(./$BINARY --version | awk '{ print $2 }' ) + [[ $BINARY != 'chain-spec-builder' ]] && VERSION=$(./$BINARY --version | awk '{ print $2 }' ) + release=$( echo $VERSION | cut -f1 -d- ) echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT - - name: Build Injected Container image for polkadot rc - if: ${{ env.BINARY == 'polkadot' }} + - name: Build Injected Container image for polkadot rc or chain-spec-builder + if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'chain-spec-builder' }} env: - ARTIFACTS_FOLDER: ./release-artifacts + ARTIFACTS_FOLDER: release-artifacts IMAGE_NAME: ${{ env.BINARY }} OWNER: ${{ env.DOCKER_OWNER }} TAGS: ${{ join(steps.fetch_rc_refs.outputs.*, ',') || join(steps.fetch_release_refs.outputs.*, ',') }} @@ -189,7 +195,7 @@ jobs: - name: Build Injected Container image for polkadot-parachain if: ${{ env.BINARY == 'polkadot-parachain' }} env: - ARTIFACTS_FOLDER: ./release-artifacts + ARTIFACTS_FOLDER: release-artifacts IMAGE_NAME: ${{ env.BINARY }} OWNER: ${{ env.DOCKER_OWNER }} DOCKERFILE: docker/dockerfiles/polkadot-parachain/polkadot-parachain_injected.Dockerfile @@ -219,7 +225,11 @@ jobs: RELEASE_TAG: ${{ steps.fetch_rc_refs.outputs.release || steps.fetch_release_refs.outputs.release }} run: | echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}" - $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version + if [[ ${BINARY} == 'chain-spec-builder' ]]; then + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} + else + $ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version + fi fetch-latest-debian-package-version: # this job will be triggered for polkadot release build if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} diff --git a/.github/workflows/check-runtimes.yml b/.github/workflows/release-check-runtimes.yml similarity index 100% rename from .github/workflows/check-runtimes.yml rename to .github/workflows/release-check-runtimes.yml diff --git a/.github/workflows/srtool.yml b/.github/workflows/release-srtool.yml similarity index 100% rename from .github/workflows/srtool.yml rename to .github/workflows/release-srtool.yml diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml deleted file mode 100644 index e35ee09948634e37d4d601bea262140c87ff6c98..0000000000000000000000000000000000000000 --- a/.github/workflows/test-github-actions.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: test-github-actions - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - set-image: - # GitHub Actions allows using 'env' in a container context. - # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 - # This workaround sets the container image for each job using 'set-image' job output. - runs-on: ubuntu-latest - outputs: - IMAGE: ${{ steps.set_image.outputs.IMAGE }} - steps: - - name: Checkout - uses: actions/checkout@v4 - - id: set_image - run: cat .github/env >> $GITHUB_OUTPUT - test-linux-stable-int: - runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - env: - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - # Ensure we run the UI tests. - RUN_UI_TESTS: 1 - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: script - run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored - quick-benchmarks: - runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - env: - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: script - run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml new file mode 100644 index 0000000000000000000000000000000000000000..5fdfabc437fe721339af817f12aea1a58a6c0346 --- /dev/null +++ b/.github/workflows/tests-linux-stable.yml @@ -0,0 +1,81 @@ +# GHA for test-linux-stable-int, test-linux-stable, test-linux-stable-oldkernel +name: tests linux stable + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} + FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} + FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} + FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} + FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} + +jobs: + + changes: + permissions: + pull-requests: read + uses: ./.github/workflows/check-changed-files.yml + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + needs: changes + if: ${{ needs.changes.outputs.rust }} + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + test-linux-stable-int: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored + + # https://github.com/paritytech/ci_cd/issues/864 + test-linux-stable-runtime-benchmarks: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..293acadc4e6a892fea9aa7fa3686cd821606992a --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,98 @@ +name: tests + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} + FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} + FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} + FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} + FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} + +jobs: + + changes: + permissions: + pull-requests: read + uses: ./.github/workflows/check-changed-files.yml + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + quick-benchmarks: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet + + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 + test-syscalls: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + continue-on-error: true # this rarely triggers in practice + env: + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker + cd polkadot/scripts/list-syscalls + ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - + ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - + # todo: + # after_script: + # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then + # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; + # fi + + cargo-check-all-benches: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time forklift cargo check --all --benches diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5e57dd86f14166e695f1c64b6b5aee56529a4781..73a8c52c448f72d12e510d65b2f7ff38469856f0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -120,7 +120,7 @@ default: .forklift-cache: before_script: - mkdir ~/.forklift - - cp $FL_FORKLIFT_CONFIG ~/.forklift/config.toml + - cp .forklift/config.toml ~/.forklift/config.toml - > if [ "$FORKLIFT_BYPASS" != "true" ]; then echo "FORKLIFT_BYPASS not set"; diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 6fb8a97fe95821886c416d97224fb21fd0f2897b..5c1a667a313ce391f0c790881d87f48ebd79d073 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -146,96 +146,6 @@ check-runtime-migration-rococo: URI: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" -# Check runtime migrations for Parity managed asset hub chains -check-runtime-migration-asset-hub-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "asset-hub-westend" - PACKAGE: "asset-hub-westend-runtime" - WASM: "asset_hub_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-asset-hub-rpc.polkadot.io:443" - -check-runtime-migration-asset-hub-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "asset-hub-rococo" - PACKAGE: "asset-hub-rococo-runtime" - WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed bridge hub chains -check-runtime-migration-bridge-hub-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "bridge-hub-westend" - PACKAGE: "bridge-hub-westend-runtime" - WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" - -check-runtime-migration-bridge-hub-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "bridge-hub-rococo" - PACKAGE: "bridge-hub-rococo-runtime" - WASM: "bridge_hub_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-bridge-hub-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed contract chains -check-runtime-migration-contracts-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "contracts-rococo" - PACKAGE: "contracts-rococo-runtime" - WASM: "contracts_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-contracts-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed collectives chains -check-runtime-migration-collectives-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "collectives-westend" - PACKAGE: "collectives-westend-runtime" - WASM: "collectives_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-collectives-rpc.polkadot.io:443" - COMMAND_EXTRA_ARGS: "--disable-spec-name-check" - -# Check runtime migrations for Parity managed coretime chain -check-runtime-migration-coretime-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "coretime-rococo" - PACKAGE: "coretime-rococo-runtime" - WASM: "coretime_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-coretime-rpc.polkadot.io:443" - find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 68712610ad2361601af3763485d1ab3e6c158682..44cd1933a9cfa0b3cbff384ff4184a36bf864021 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -76,6 +76,8 @@ publish-subsystem-benchmarks: artifacts: true - job: subsystem-benchmark-approval-voting artifacts: true + - job: subsystem-benchmark-statement-distribution + artifacts: true - job: publish-rustdoc artifacts: false script: @@ -119,6 +121,8 @@ trigger_workflow: artifacts: true - job: subsystem-benchmark-approval-voting artifacts: true + - job: subsystem-benchmark-statement-distribution + artifacts: true script: - echo "Triggering workflow" - > @@ -129,7 +133,7 @@ trigger_workflow: curl -q -X POST \ -H "Accept: application/vnd.github.v3+json" \ -H "Authorization: token $GITHUB_TOKEN" \ - https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/subsystem-benchmarks.yml/dispatches \ + https://api.github.com/repos/paritytech/${CI_PROJECT_NAME}/actions/workflows/publish-subsystem-benchmarks.yml/dispatches \ -d "{\"ref\":\"refs/heads/master\",\"inputs\":{\"benchmark-data-dir-path\":\"$benchmark_dir\",\"output-file-path\":\"$benchmark_name\"}}"; sleep 300; done diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 796e4d65310481fef4e831a42497044a9ccaae5a..d171a8a19426c959f776aa0780f4373c4b23b4e6 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -24,7 +24,7 @@ # # -# +# codecov-start: stage: test when: manual @@ -53,11 +53,11 @@ codecov-finish: extends: - .kubernetes-env - .common-refs - - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts needs: - test-linux-stable-codecov script: - - !reference [.codecov-check, script] + - !reference [.codecov-check, script] - codecovcli -v create-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - codecovcli -v get-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - codecovcli -v send-notifications -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github @@ -78,14 +78,14 @@ test-linux-stable-codecov: RUST_TOOLCHAIN: stable RUSTFLAGS: "-Cdebug-assertions=y -Cinstrument-coverage" LLVM_PROFILE_FILE: "target/coverage/cargo-test-${CI_NODE_INDEX}-%p-%m.profraw" - CARGO_INCREMENTAL: 0 - FORKLIFT_BYPASS: "true" + CARGO_INCREMENTAL: 0 + FORKLIFT_BYPASS: "true" parallel: 2 script: # tools - - !reference [.codecov-check, script] + - !reference [.codecov-check, script] - rustup component add llvm-tools-preview - - mkdir -p target/coverage/result/ + - mkdir -p target/coverage/result/ # Place real test call here - > time cargo nextest run -p polkadot \ @@ -102,15 +102,15 @@ test-linux-stable-codecov: -t lcov \ --branch \ -o target/coverage/result/report-${CI_NODE_INDEX}.lcov - - ls -l target/coverage/result/ + - ls -l target/coverage/result/ - > if [ "$CI_COMMIT_REF_NAME" != "master" ]; then codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --pr ${CI_COMMIT_REF_NAME} --git-service github; else codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; fi - - # + + # test-linux-stable: stage: test @@ -128,8 +128,6 @@ test-linux-stable: script: # Build all but only execute 'runtime' tests. - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" - # add experimental to features after https://github.com/paritytech/substrate/pull/14502 is merged - # "upgrade_version_checks_should_work" is currently failing - > time cargo nextest run \ --workspace \ @@ -254,18 +252,6 @@ test-rustdoc: script: - time cargo doc --workspace --all-features --no-deps -cargo-check-all-benches: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: cargo-hfuzz - artifacts: false - script: - - time cargo check --all --benches - test-node-metrics: stage: test extends: @@ -505,6 +491,17 @@ check-tracing: - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features - time cargo test --locked --manifest-path ./substrate/primitives/tracing/Cargo.toml --no-default-features --features=with-tracing +# Check that `westend-runtime` compiles with the `metadata-hash` feature enabled. +check-metadata-hash: + stage: test + extends: + - .docker-env + - .common-refs + - .run-immediately + - .pipeline-stopper-artifacts + script: + - time cargo build --locked -p westend-runtime --features metadata-hash + # more information about this job can be found here: # https://github.com/paritytech/substrate/pull/3778 test-full-crypto-feature: @@ -598,26 +595,6 @@ cargo-hfuzz: - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done -# cf https://github.com/paritytech/polkadot-sdk/issues/1652 -test-syscalls: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - SKIP_WASM_BUILD: 1 - script: - - cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker - - cd polkadot/scripts/list-syscalls - - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - - - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - - after_script: - - if [[ "$CI_JOB_STATUS" == "failed" ]]; then - printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; - fi - allow_failure: false # this rarely triggers in practice - .subsystem-benchmark-template: stage: test artifacts: @@ -653,3 +630,10 @@ subsystem-benchmark-approval-voting: script: - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks allow_failure: true + +subsystem-benchmark-statement-distribution: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-statement-distribution --bench statement-distribution-regression-bench --features subsystem-benchmarks + allow_failure: true diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 52948e1eb719d9f8669523d9762f5662fd1b6e96..7897e55e291bd3cae1f3df42d1fcf5c811ff52f2 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,8 +1,9 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.99" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.105" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" + DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" include: # substrate tests diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index c473f5c5fed755bfcceeeceea30a93c1d0c3403d..a7f321505bacf99df202c1469e7a75b4f0b30ba4 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -15,7 +15,6 @@ - echo "${COL_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" - - export DEBUG=zombie - export RELAY_IMAGE=${POLKADOT_IMAGE} - export COL_IMAGE=${COL_IMAGE} diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 38c5332f309703dab881d1df88709fc4fe95e49c..b158cbe0b5aa3a50d490129dab87c9e7d6769b35 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -10,7 +10,6 @@ - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; then export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - export PIPELINE_IMAGE_TAG=${DOCKER_IMAGES_VERSION} - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" @@ -184,6 +183,22 @@ zombienet-polkadot-functional-0012-spam-statement-distribution-requests: --local-dir="${LOCAL_DIR}/functional" --test="0012-spam-statement-distribution-requests.zndsl" +zombienet-polkadot-functional-0013-systematic-chunk-recovery: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0013-systematic-chunk-recovery.zndsl" + +zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0014-chunk-fetching-network-compatibility.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 8a627c454f9f3853f04694827e1484571f5444a9..2013ffd571cf388ba154f3965c166765f40baaf6 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -13,7 +13,6 @@ - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" - echo "${LOCAL_DIR}" - - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${SUBSTRATE_IMAGE}":${SUBSTRATE_IMAGE_TAG} - echo "${ZOMBIENET_INTEGRATION_TEST_IMAGE}" stage: zombienet diff --git a/Cargo.lock b/Cargo.lock index 96ca31f11d5994b344edd2207f8bcacca4e8bd0a..cf4b82b701116d8fdf0e1c5cf1f38099abb18eab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -794,6 +794,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -816,7 +817,6 @@ dependencies = [ "pallet-nfts-runtime-api", "pallet-proxy", "pallet-session", - "pallet-state-trie-migration", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -881,6 +881,7 @@ dependencies = [ "cumulus-pallet-parachain-system", "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", + "frame-metadata-hash-extension", "frame-support", "frame-system", "pallet-asset-conversion", @@ -926,6 +927,7 @@ dependencies = [ "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -2134,6 +2136,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "tuplex", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -2294,6 +2297,7 @@ dependencies = [ "testnet-parachains-constants", "tuplex", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -2903,6 +2907,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3149,6 +3154,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3245,6 +3251,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -3309,6 +3316,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -5337,18 +5345,6 @@ dependencies = [ "futures", ] -[[package]] -name = "expander" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a718c0675c555c5f976fff4ea9e2c150fa06cefa201cadef87cfbf9324075881" -dependencies = [ - "blake3", - "fs-err", - "proc-macro2 1.0.85", - "quote 1.0.36", -] - [[package]] name = "expander" version = "2.1.0" @@ -5413,9 +5409,9 @@ dependencies = [ [[package]] name = "fatality" -version = "0.0.6" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad875162843b0d046276327afe0136e9ed3a23d5a754210fb6f1f33610d39ab" +checksum = "ec6f82451ff7f0568c6181287189126d492b5654e30a788add08027b6363d019" dependencies = [ "fatality-proc-macro", "thiserror", @@ -5423,17 +5419,16 @@ dependencies = [ [[package]] name = "fatality-proc-macro" -version = "0.0.6" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd" +checksum = "eb42427514b063d97ce21d5199f36c0c307d981434a6be32582bc79fe5bd2303" dependencies = [ - "expander 0.0.4", - "indexmap 1.9.3", - "proc-macro-crate 1.3.1", + "expander", + "indexmap 2.2.6", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", - "syn 1.0.109", - "thiserror", + "syn 2.0.66", ] [[package]] @@ -5834,6 +5829,27 @@ dependencies = [ "serde", ] +[[package]] +name = "frame-metadata-hash-extension" +version = "0.1.0" +dependencies = [ + "array-bytes", + "docify", + "frame-metadata", + "frame-support", + "frame-system", + "log", + "merkleized-metadata", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-runtime", + "sp-tracing 16.0.0", + "sp-transaction-pool", + "substrate-test-runtime-client", + "substrate-wasm-builder", +] + [[package]] name = "frame-omni-bencher" version = "0.1.0" @@ -5922,7 +5938,7 @@ dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse 0.2.0", - "expander 2.1.0", + "expander", "frame-support-procedural-tools", "itertools 0.11.0", "macro_magic", @@ -7342,124 +7358,15 @@ checksum = "c33070833c9ee02266356de0c43f723152bd38bd96ddf52c82b3af10c9138b28" name = "kitchensink-runtime" version = "3.0.0-dev" dependencies = [ - "frame-benchmarking", - "frame-benchmarking-pallet-pov", - "frame-election-provider-support", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", "log", "node-primitives", - "pallet-alliance", - "pallet-asset-conversion", - "pallet-asset-conversion-ops", - "pallet-asset-conversion-tx-payment", - "pallet-asset-rate", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-beefy", - "pallet-beefy-mmr", - "pallet-bounties", - "pallet-broker", - "pallet-child-bounties", - "pallet-collective", - "pallet-contracts", - "pallet-conviction-voting", - "pallet-core-fellowship", - "pallet-democracy", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", "pallet-example-mbm", "pallet-example-tasks", - "pallet-fast-unstake", - "pallet-glutton", - "pallet-grandpa", - "pallet-identity", - "pallet-im-online", - "pallet-indices", - "pallet-insecure-randomness-collective-flip", - "pallet-lottery", - "pallet-membership", - "pallet-message-queue", - "pallet-migrations", - "pallet-mixnet", - "pallet-mmr", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-nis", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-parameters", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-remark", - "pallet-root-testing", - "pallet-safe-mode", - "pallet-salary", - "pallet-scheduler", - "pallet-session", - "pallet-session-benchmarking", - "pallet-skip-feeless-payment", - "pallet-society", - "pallet-stake-tracker", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-staking-runtime-api", - "pallet-state-trie-migration", - "pallet-statement", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", - "pallet-treasury", - "pallet-tx-pause", - "pallet-uniques", - "pallet-utility", - "pallet-vesting", - "pallet-whitelist", "parity-scale-codec", + "polkadot-sdk", "primitive-types", "scale-info", "serde_json", - "sp-api", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-mixnet", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-statement-store", - "sp-std 14.0.0", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", "static_assertions", "substrate-wasm-builder", ] @@ -8168,8 +8075,9 @@ dependencies = [ [[package]] name = "litep2p" -version = "0.3.0" -source = "git+https://github.com/paritytech/litep2p?rev=e03a6023882db111beeb24d8c0ceaac0721d3f0f#e03a6023882db111beeb24d8c0ceaac0721d3f0f" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f02542ae3a94b4c4ffa37dc56388c923e286afa3bf65452e3984b50b2a2f316" dependencies = [ "async-trait", "bs58 0.4.0", @@ -8181,7 +8089,7 @@ dependencies = [ "hex-literal", "indexmap 2.2.6", "libc", - "mockall", + "mockall 0.12.1", "multiaddr", "multihash 0.17.0", "network-interface", @@ -8438,6 +8346,20 @@ dependencies = [ "hash-db", ] +[[package]] +name = "merkleized-metadata" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f313fcff1d2a4bcaa2deeaa00bf7530d77d5f7bd0467a117dde2e29a75a7a17a" +dependencies = [ + "array-bytes", + "blake3", + "frame-metadata", + "parity-scale-codec", + "scale-decode", + "scale-info", +] + [[package]] name = "merlin" version = "3.0.0" @@ -8501,7 +8423,7 @@ dependencies = [ "pallet-minimal-template", "polkadot-sdk-docs", "polkadot-sdk-frame", - "simple-mermaid", + "simple-mermaid 0.1.1", ] [[package]] @@ -8650,11 +8572,26 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive", + "mockall_derive 0.11.4", "predicates 2.1.5", "predicates-tree", ] +[[package]] +name = "mockall" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "lazy_static", + "mockall_derive 0.12.1", + "predicates 3.1.0", + "predicates-tree", +] + [[package]] name = "mockall_derive" version = "0.11.4" @@ -8667,6 +8604,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "mockall_derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" +dependencies = [ + "cfg-if", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "multiaddr" version = "0.17.1" @@ -8971,12 +8920,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.5.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -9059,10 +9009,12 @@ dependencies = [ "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", + "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore", "sp-runtime", "sp-statement-store", @@ -9097,6 +9049,7 @@ dependencies = [ name = "node-testing" version = "3.0.0-dev" dependencies = [ + "frame-metadata-hash-extension", "frame-system", "fs_extra", "futures", @@ -9442,7 +9395,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1344346d5af32c95bbddea91b18a88cc83eac394192d20ef2fc4c40a74332355" dependencies = [ - "expander 2.1.0", + "expander", "indexmap 2.2.6", "itertools 0.11.0", "petgraph", @@ -9976,6 +9929,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-std 14.0.0", + "sp-tracing 16.0.0", ] [[package]] @@ -10222,6 +10176,29 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-delegated-staking" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-nomination-pools", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std 14.0.0", + "sp-tracing 16.0.0", + "substrate-test-utils", +] + [[package]] name = "pallet-democracy" version = "28.0.0" @@ -10882,6 +10859,7 @@ dependencies = [ "frame-system", "pallet-bags-list", "pallet-balances", + "pallet-delegated-staking", "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", @@ -10922,7 +10900,32 @@ dependencies = [ ] [[package]] -name = "pallet-nomination-pools-test-staking" +name = "pallet-nomination-pools-test-delegate-stake" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-bags-list", + "pallet-balances", + "pallet-delegated-staking", + "pallet-nomination-pools", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std 14.0.0", + "sp-tracing 16.0.0", +] + +[[package]] +name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" dependencies = [ "frame-election-provider-support", @@ -11899,6 +11902,7 @@ dependencies = [ "docify", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-benchmarking", @@ -12402,6 +12406,7 @@ dependencies = [ "staging-xcm-executor", "substrate-wasm-builder", "testnet-parachains-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -12502,6 +12507,7 @@ dependencies = [ "substrate-wasm-builder", "testnet-parachains-constants", "westend-runtime-constants", + "xcm-fee-payment-runtime-api", ] [[package]] @@ -12670,7 +12676,7 @@ version = "6.0.0" dependencies = [ "assert_cmd", "color-eyre", - "nix 0.27.1", + "nix 0.28.0", "polkadot-cli", "polkadot-core-primitives", "polkadot-node-core-pvf", @@ -12761,6 +12767,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand 0.8.5", + "rstest", "sc-network", "schnellru", "sp-core", @@ -12777,7 +12784,6 @@ version = "7.0.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.11.3", "fatality", "futures", "futures-timer", @@ -12793,24 +12799,36 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand 0.8.5", + "rstest", "sc-network", "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", + "sp-tracing 16.0.0", "thiserror", "tokio", "tracing-gum", ] [[package]] -name = "polkadot-cli" -version = "7.0.0" +name = "polkadot-ckb-merkle-mountain-range" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b44320e5f7ce2c18227537a3032ae5b2c476a7e8eddba45333e1011fc31b92" dependencies = [ "cfg-if", - "clap 4.5.4", - "frame-benchmarking-cli", - "futures", + "itertools 0.10.5", +] + +[[package]] +name = "polkadot-cli" +version = "7.0.0" +dependencies = [ + "cfg-if", + "clap 4.5.4", + "frame-benchmarking-cli", + "futures", "log", "polkadot-node-metrics", "polkadot-node-primitives", @@ -12851,6 +12869,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sc-network", "sp-core", @@ -12914,6 +12933,7 @@ dependencies = [ "parity-scale-codec", "polkadot-node-primitives", "polkadot-primitives", + "quickcheck", "reed-solomon-novelpoly", "sp-core", "sp-trie", @@ -13247,7 +13267,6 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -13354,7 +13373,7 @@ dependencies = [ "futures", "landlock", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-primitives", @@ -13379,7 +13398,7 @@ dependencies = [ "cfg-if", "cpu-time", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", @@ -13395,7 +13414,7 @@ dependencies = [ "cfg-if", "criterion", "libc", - "nix 0.27.1", + "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-primitives", @@ -13561,6 +13580,7 @@ dependencies = [ "async-trait", "bitvec", "derive_more", + "fatality", "futures", "orchestra", "polkadot-node-jaeger", @@ -13603,6 +13623,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "pin-project", + "polkadot-erasure-coding", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -13688,7 +13709,7 @@ dependencies = [ "hex-literal", "jsonrpsee", "log", - "nix 0.27.1", + "nix 0.28.0", "pallet-transaction-payment", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", @@ -13824,10 +13845,12 @@ dependencies = [ "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", + "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-beefy", "sp-keystore", "sp-runtime", "substrate-frame-rpc-system", @@ -13958,8 +13981,394 @@ dependencies = [ "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-executor", - "static_assertions", - "thousands", + "static_assertions", + "thousands", +] + +[[package]] +name = "polkadot-sdk" +version = "0.1.0" +dependencies = [ + "asset-test-utils", + "assets-common", + "binary-merkle-tree", + "bp-asset-hub-rococo", + "bp-asset-hub-westend", + "bp-bridge-hub-cumulus", + "bp-bridge-hub-kusama", + "bp-bridge-hub-polkadot", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", + "bp-header-chain", + "bp-kusama", + "bp-messages", + "bp-parachains", + "bp-polkadot", + "bp-polkadot-bulletin", + "bp-polkadot-core", + "bp-relayers", + "bp-rococo", + "bp-runtime", + "bp-test-utils", + "bp-westend", + "bp-xcm-bridge-hub", + "bp-xcm-bridge-hub-router", + "bridge-hub-common", + "bridge-hub-test-utils", + "bridge-runtime-common", + "cumulus-client-cli", + "cumulus-client-collator", + "cumulus-client-consensus-aura", + "cumulus-client-consensus-common", + "cumulus-client-consensus-proposer", + "cumulus-client-consensus-relay-chain", + "cumulus-client-network", + "cumulus-client-parachain-inherent", + "cumulus-client-pov-recovery", + "cumulus-client-service", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-parachain-system-proc-macro", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-solo-to-para", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-timestamp", + "cumulus-primitives-utility", + "cumulus-relay-chain-inprocess-interface", + "cumulus-relay-chain-interface", + "cumulus-relay-chain-minimal-node", + "cumulus-relay-chain-rpc-interface", + "cumulus-test-relay-sproof-builder", + "emulated-integration-tests-common", + "fork-tree", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-benchmarking-pallet-pov", + "frame-election-provider-solution-type", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", + "frame-remote-externalities", + "frame-support", + "frame-support-procedural", + "frame-support-procedural-tools", + "frame-support-procedural-tools-derive", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "generate-bags", + "mmr-gadget", + "mmr-rpc", + "pallet-alliance", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-asset-rate", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-atomic-swap", + "pallet-aura", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-bounties", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-broker", + "pallet-child-bounties", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-contracts", + "pallet-contracts-mock-network", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-conviction-voting", + "pallet-core-fellowship", + "pallet-delegated-staking", + "pallet-democracy", + "pallet-dev-mode", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-glutton", + "pallet-grandpa", + "pallet-identity", + "pallet-im-online", + "pallet-indices", + "pallet-insecure-randomness-collective-flip", + "pallet-lottery", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mixnet", + "pallet-mmr", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-nis", + "pallet-node-authorization", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-paged-list", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-remark", + "pallet-root-offences", + "pallet-root-testing", + "pallet-safe-mode", + "pallet-salary", + "pallet-scheduler", + "pallet-scored-pool", + "pallet-session", + "pallet-session-benchmarking", + "pallet-skip-feeless-payment", + "pallet-society", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-staking-reward-fn", + "pallet-staking-runtime-api", + "pallet-state-trie-migration", + "pallet-statement", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", + "pallet-treasury", + "pallet-tx-pause", + "pallet-uniques", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parachains-runtimes-test-utils", + "polkadot-approval-distribution", + "polkadot-availability-bitfield-distribution", + "polkadot-availability-distribution", + "polkadot-availability-recovery", + "polkadot-cli", + "polkadot-collator-protocol", + "polkadot-core-primitives", + "polkadot-dispute-distribution", + "polkadot-erasure-coding", + "polkadot-gossip-support", + "polkadot-network-bridge", + "polkadot-node-collation-generation", + "polkadot-node-core-approval-voting", + "polkadot-node-core-av-store", + "polkadot-node-core-backing", + "polkadot-node-core-bitfield-signing", + "polkadot-node-core-candidate-validation", + "polkadot-node-core-chain-api", + "polkadot-node-core-chain-selection", + "polkadot-node-core-dispute-coordinator", + "polkadot-node-core-parachains-inherent", + "polkadot-node-core-prospective-parachains", + "polkadot-node-core-provisioner", + "polkadot-node-core-pvf", + "polkadot-node-core-pvf-checker", + "polkadot-node-core-pvf-common", + "polkadot-node-core-pvf-execute-worker", + "polkadot-node-core-pvf-prepare-worker", + "polkadot-node-core-runtime-api", + "polkadot-node-jaeger", + "polkadot-node-metrics", + "polkadot-node-network-protocol", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-types", + "polkadot-node-subsystem-util", + "polkadot-overseer", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-rpc", + "polkadot-runtime-common", + "polkadot-runtime-metrics", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", + "polkadot-service", + "polkadot-statement-distribution", + "polkadot-statement-table", + "rococo-runtime-constants", + "sc-allocator", + "sc-authority-discovery", + "sc-basic-authorship", + "sc-block-builder", + "sc-chain-spec", + "sc-chain-spec-derive", + "sc-cli", + "sc-client-api", + "sc-client-db", + "sc-consensus", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-babe-rpc", + "sc-consensus-beefy", + "sc-consensus-beefy-rpc", + "sc-consensus-epochs", + "sc-consensus-grandpa", + "sc-consensus-grandpa-rpc", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-consensus-slots", + "sc-executor", + "sc-executor-common", + "sc-executor-polkavm", + "sc-executor-wasmtime", + "sc-informant", + "sc-keystore", + "sc-mixnet", + "sc-network", + "sc-network-common", + "sc-network-gossip", + "sc-network-light", + "sc-network-statement", + "sc-network-sync", + "sc-network-transactions", + "sc-network-types", + "sc-offchain", + "sc-proposer-metrics", + "sc-rpc", + "sc-rpc-api", + "sc-rpc-server", + "sc-rpc-spec-v2", + "sc-service", + "sc-state-db", + "sc-statement-store", + "sc-storage-monitor", + "sc-sync-state-rpc", + "sc-sysinfo", + "sc-telemetry", + "sc-tracing", + "sc-tracing-proc-macro", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sc-utils", + "slot-range-helper", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "snowbridge-outbound-queue-merkle-tree", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-ethereum-client-fixtures", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-runtime-test-common", + "snowbridge-system-runtime-api", + "sp-api", + "sp-api-proc-macro", + "sp-application-crypto", + "sp-arithmetic", + "sp-authority-discovery", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-consensus-pow", + "sp-consensus-slots", + "sp-core", + "sp-core-hashing", + "sp-core-hashing-proc-macro", + "sp-crypto-ec-utils 0.10.0", + "sp-crypto-hashing", + "sp-crypto-hashing-proc-macro", + "sp-database", + "sp-debug-derive 14.0.0", + "sp-externalities 0.25.0", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-keystore", + "sp-maybe-compressed-blob", + "sp-metadata-ir", + "sp-mixnet", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", + "sp-panic-handler", + "sp-rpc", + "sp-runtime", + "sp-runtime-interface 24.0.0", + "sp-runtime-interface-proc-macro 17.0.0", + "sp-session", + "sp-staking", + "sp-state-machine", + "sp-statement-store", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-timestamp", + "sp-tracing 16.0.0", + "sp-transaction-pool", + "sp-transaction-storage-proof", + "sp-trie", + "sp-version", + "sp-version-proc-macro", + "sp-wasm-interface 20.0.0", + "sp-weights", + "staging-chain-spec-builder", + "staging-node-inspect", + "staging-parachain-info", + "staging-tracking-allocator", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "subkey", + "substrate-bip39", + "substrate-build-script-utils", + "substrate-frame-rpc-support", + "substrate-frame-rpc-system", + "substrate-prometheus-endpoint", + "substrate-rpc-client", + "substrate-state-trie-migration-rpc", + "substrate-wasm-builder", + "testnet-parachains-constants", + "tracing-gum", + "tracing-gum-proc-macro", + "westend-runtime-constants", + "xcm-emulator", + "xcm-fee-payment-runtime-api", + "xcm-procedural", + "xcm-simulator", ] [[package]] @@ -13973,6 +14382,7 @@ dependencies = [ "cumulus-primitives-storage-weight-reclaim", "docify", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "kitchensink-runtime", @@ -13999,6 +14409,7 @@ dependencies = [ "pallet-uniques", "pallet-utility", "parity-scale-codec", + "polkadot-sdk", "polkadot-sdk-frame", "sc-cli", "sc-client-db", @@ -14014,7 +14425,7 @@ dependencies = [ "sc-rpc-api", "sc-service", "scale-info", - "simple-mermaid", + "simple-mermaid 0.1.1", "sp-api", "sp-arithmetic", "sp-core", @@ -14029,6 +14440,7 @@ dependencies = [ "staging-xcm", "subkey", "substrate-wasm-builder", + "xcm-docs", ] [[package]] @@ -14074,6 +14486,7 @@ dependencies = [ "env_logger 0.11.3", "frame-benchmarking", "frame-benchmarking-cli", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-rpc-runtime-api", @@ -14211,6 +14624,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand_chacha 0.3.1", "sc-keystore", "sc-network", @@ -14275,6 +14689,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-statement-distribution", "prometheus", "pyroscope", "pyroscope_pprofrs", @@ -14299,6 +14714,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", + "strum 0.24.1", "substrate-prometheus-endpoint", "tokio", "tracing-gum", @@ -15603,6 +16019,7 @@ dependencies = [ "jsonpath_lib", "log", "num-traits", + "parking_lot 0.12.3", "serde_json", "sp-runtime", "substrate-prometheus-endpoint", @@ -15845,6 +16262,7 @@ dependencies = [ "bitvec", "frame-benchmarking", "frame-executive", + "frame-metadata-hash-extension", "frame-remote-externalities", "frame-support", "frame-system", @@ -16451,10 +16869,12 @@ name = "sc-chain-spec" version = "28.0.0" dependencies = [ "array-bytes", + "clap 4.5.4", "docify", "log", "memmap2 0.9.4", "parity-scale-codec", + "regex", "sc-chain-spec-derive", "sc-client-api", "sc-executor", @@ -16599,7 +17019,7 @@ dependencies = [ "futures", "futures-timer", "log", - "mockall", + "mockall 0.11.4", "parking_lot 0.12.3", "sc-client-api", "sc-network-types", @@ -16781,6 +17201,7 @@ dependencies = [ "sc-rpc", "serde", "serde_json", + "sp-application-crypto", "sp-consensus-beefy", "sp-core", "sp-runtime", @@ -17124,7 +17545,7 @@ dependencies = [ "linked_hash_set", "litep2p", "log", - "mockall", + "mockall 0.11.4", "multistream-select", "once_cell", "parity-scale-codec", @@ -17262,7 +17683,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "mockall", + "mockall 0.11.4", "parity-scale-codec", "prost 0.12.6", "prost-build 0.12.6", @@ -17346,12 +17767,15 @@ name = "sc-network-types" version = "0.10.0" dependencies = [ "bs58 0.5.1", + "ed25519-dalek 2.1.1", "libp2p-identity", "litep2p", "multiaddr", "multihash 0.17.0", + "quickcheck", "rand 0.8.5", "thiserror", + "zeroize", ] [[package]] @@ -17846,6 +18270,29 @@ dependencies = [ "tokio-test", ] +[[package]] +name = "scale-bits" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57b1e7f6b65ed1f04e79a85a57d755ad56d76fdf1e9bddcc9ae14f71fcdcf54" +dependencies = [ + "parity-scale-codec", + "scale-type-resolver", +] + +[[package]] +name = "scale-decode" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12ebca36cec2a3f983c46295b282b35e5f8496346fb859a8776dad5389e5389" +dependencies = [ + "derive_more", + "parity-scale-codec", + "scale-bits", + "scale-type-resolver", + "smallvec", +] + [[package]] name = "scale-info" version = "2.11.3" @@ -17872,6 +18319,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "scale-type-resolver" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0cded6518aa0bd6c1be2b88ac81bf7044992f0f154bfbabd5ad34f43512abcb" + [[package]] name = "schannel" version = "0.1.23" @@ -17981,9 +18434,9 @@ dependencies = [ [[package]] name = "sctp-proto" -version = "0.1.7" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f64cef148d3295c730c3cb340b0b252a4d570b1c7d4bf0808f88540b0a888bc" +checksum = "b6220f78bb44c15f326b0596113305f6101097a18755d53727a575c97e09fb24" dependencies = [ "bytes", "crc", @@ -18477,6 +18930,11 @@ dependencies = [ "bitflags 2.5.0", ] +[[package]] +name = "simple-mermaid" +version = "0.1.0" +source = "git+https://github.com/kianenigma/simple-mermaid.git?branch=main#e48b187bcfd5cc75111acd9d241f1bd36604344b" + [[package]] name = "simple-mermaid" version = "0.1.1" @@ -19138,7 +19596,7 @@ dependencies = [ "Inflector", "assert_matches", "blake2 0.10.6", - "expander 2.1.0", + "expander", "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", @@ -19489,7 +19947,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -19549,7 +20007,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -19568,7 +20026,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "environmental", "parity-scale-codec", @@ -19677,9 +20135,9 @@ name = "sp-mmr-primitives" version = "26.0.0" dependencies = [ "array-bytes", - "ckb-merkle-mountain-range", "log", "parity-scale-codec", + "polkadot-ckb-merkle-mountain-range", "scale-info", "serde", "sp-api", @@ -19758,7 +20216,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "simple-mermaid", + "simple-mermaid 0.1.1", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -19799,7 +20257,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -19820,7 +20278,7 @@ name = "sp-runtime-interface-proc-macro" version = "17.0.0" dependencies = [ "Inflector", - "expander 2.1.0", + "expander", "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", @@ -19830,10 +20288,10 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "Inflector", - "expander 2.1.0", + "expander", "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", @@ -19956,7 +20414,7 @@ version = "14.0.0" [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" [[package]] name = "sp-storage" @@ -19972,7 +20430,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "impl-serde", "parity-scale-codec", @@ -20017,7 +20475,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "parity-scale-codec", "tracing", @@ -20114,7 +20572,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f66e693a6befef0956a3129254fbe568247c9c57" +source = "git+https://github.com/paritytech/polkadot-sdk#6f228e7d220bb14c113dcc27c931590737f9d0ab" dependencies = [ "impl-trait-for-tuples", "log", @@ -20223,7 +20681,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-chain-spec-builder" -version = "3.0.0" +version = "1.6.0" dependencies = [ "clap 4.5.4", "log", @@ -20241,102 +20699,26 @@ dependencies = [ "clap 4.5.4", "clap_complete", "criterion", - "frame-benchmarking", - "frame-benchmarking-cli", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", "futures", "jsonrpsee", "kitchensink-runtime", "log", - "mmr-gadget", - "nix 0.27.1", + "nix 0.28.0", "node-primitives", "node-rpc", "node-testing", - "pallet-asset-conversion-tx-payment", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-balances", - "pallet-contracts", - "pallet-glutton", - "pallet-im-online", - "pallet-root-testing", - "pallet-skip-feeless-payment", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", "parity-scale-codec", "platforms", + "polkadot-sdk", "rand 0.8.5", "regex", - "sc-authority-discovery", - "sc-basic-authorship", - "sc-block-builder", - "sc-chain-spec", - "sc-cli", - "sc-client-api", - "sc-client-db", - "sc-consensus", - "sc-consensus-babe", - "sc-consensus-beefy", - "sc-consensus-epochs", - "sc-consensus-grandpa", - "sc-consensus-slots", - "sc-executor", - "sc-keystore", - "sc-mixnet", - "sc-network", - "sc-network-common", - "sc-network-statement", - "sc-network-sync", - "sc-offchain", - "sc-rpc", - "sc-service", "sc-service-test", - "sc-statement-store", - "sc-storage-monitor", - "sc-sync-state-rpc", - "sc-sysinfo", - "sc-telemetry", - "sc-transaction-pool", - "sc-transaction-pool-api", "scale-info", "serde", "serde_json", "soketto", - "sp-api", - "sp-application-crypto", - "sp-authority-discovery", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-core", - "sp-crypto-hashing", - "sp-externalities 0.25.0", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-keystore", - "sp-mixnet", - "sp-mmr-primitives", - "sp-runtime", - "sp-state-machine", - "sp-statement-store", - "sp-timestamp", - "sp-tracing 16.0.0", - "sp-transaction-storage-proof", - "sp-trie", "staging-node-inspect", - "substrate-build-script-utils", "substrate-cli-test-utils", - "substrate-frame-cli", - "substrate-rpc-client", "tempfile", "tokio", "tokio-util", @@ -20485,17 +20867,17 @@ dependencies = [ [[package]] name = "str0m" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3f10d3f68e60168d81110410428a435dbde28cc5525f5f7c6fdec92dbdc2800" +checksum = "6706347e49b13373f7ddfafad47df7583ed52083d6fc8a594eb2c80497ef959d" dependencies = [ "combine", "crc", + "fastrand 2.1.0", "hmac 0.12.1", "once_cell", "openssl", "openssl-sys", - "rand 0.8.5", "sctp-proto", "serde", "sha-1 0.10.1", @@ -20633,7 +21015,7 @@ version = "0.1.0" dependencies = [ "assert_cmd", "futures", - "nix 0.27.1", + "nix 0.28.0", "node-primitives", "regex", "sc-cli", @@ -20644,18 +21026,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "substrate-frame-cli" -version = "32.0.0" -dependencies = [ - "clap 4.5.4", - "frame-support", - "frame-system", - "sc-cli", - "sp-core", - "sp-runtime", -] - [[package]] name = "substrate-frame-rpc-support" version = "29.0.0" @@ -20813,6 +21183,7 @@ version = "2.0.0" dependencies = [ "array-bytes", "frame-executive", + "frame-metadata-hash-extension", "frame-support", "frame-system", "frame-system-rpc-runtime-api", @@ -20904,13 +21275,22 @@ dependencies = [ name = "substrate-wasm-builder" version = "17.0.0" dependencies = [ + "array-bytes", "build-helper", "cargo_metadata", "console", "filetime", + "frame-metadata", + "merkleized-metadata", + "parity-scale-codec", "parity-wasm", "polkavm-linker", + "sc-executor", + "sp-core", + "sp-io", "sp-maybe-compressed-blob", + "sp-tracing 16.0.0", + "sp-version", "strum 0.26.2", "tempfile", "toml 0.8.13", @@ -21196,6 +21576,28 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +[[package]] +name = "test-log" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" +dependencies = [ + "env_logger 0.11.3", + "test-log-macros", + "tracing-subscriber", +] + +[[package]] +name = "test-log-macros" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "test-parachain-adder" version = "1.0.0" @@ -21793,7 +22195,7 @@ name = "tracing-gum-proc-macro" version = "5.0.0" dependencies = [ "assert_matches", - "expander 2.1.0", + "expander", "proc-macro-crate 3.1.0", "proc-macro2 1.0.85", "quote 1.0.36", @@ -22828,6 +23230,7 @@ dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-executive", + "frame-metadata-hash-extension", "frame-remote-externalities", "frame-support", "frame-system", @@ -22846,6 +23249,7 @@ dependencies = [ "pallet-beefy-mmr", "pallet-collective", "pallet-conviction-voting", + "pallet-delegated-staking", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", @@ -23382,6 +23786,31 @@ dependencies = [ "rustix 0.38.34", ] +[[package]] +name = "xcm-docs" +version = "0.1.0" +dependencies = [ + "docify", + "pallet-balances", + "pallet-message-queue", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", + "scale-info", + "simple-mermaid 0.1.0", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "test-log", + "xcm-simulator", +] + [[package]] name = "xcm-emulator" version = "0.5.0" @@ -23480,12 +23909,16 @@ name = "xcm-simulator" version = "7.0.0" dependencies = [ "frame-support", + "frame-system", "parity-scale-codec", "paste", "polkadot-core-primitives", "polkadot-parachain-primitives", + "polkadot-primitives", "polkadot-runtime-parachains", + "scale-info", "sp-io", + "sp-runtime", "sp-std 14.0.0", "staging-xcm", "staging-xcm-builder", diff --git a/Cargo.toml b/Cargo.toml index c936cacd46a4ed1a6835ac2ea59688c5a99c7c8a..1b695d5572c901ee65717166d8e3ee801aa8b731 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,6 +218,7 @@ members = [ "polkadot/utils/generate-bags", "polkadot/utils/remote-ext-tests/bags-list", "polkadot/xcm", + "polkadot/xcm/docs", "polkadot/xcm/pallet-xcm", "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/procedural", @@ -328,6 +329,7 @@ members = [ "substrate/frame/contracts/uapi", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", + "substrate/frame/delegated-staking", "substrate/frame/democracy", "substrate/frame/election-provider-multi-phase", "substrate/frame/election-provider-multi-phase/test-staking-e2e", @@ -359,6 +361,7 @@ members = [ "substrate/frame/membership", "substrate/frame/merkle-mountain-range", "substrate/frame/message-queue", + "substrate/frame/metadata-hash-extension", "substrate/frame/migrations", "substrate/frame/mixnet", "substrate/frame/multisig", @@ -371,7 +374,8 @@ members = [ "substrate/frame/nomination-pools/benchmarking", "substrate/frame/nomination-pools/fuzzer", "substrate/frame/nomination-pools/runtime-api", - "substrate/frame/nomination-pools/test-staking", + "substrate/frame/nomination-pools/test-delegate-stake", + "substrate/frame/nomination-pools/test-transfer-stake", "substrate/frame/offences", "substrate/frame/offences/benchmarking", "substrate/frame/paged-list", @@ -501,7 +505,6 @@ members = [ "substrate/utils/build-script-utils", "substrate/utils/fork-tree", "substrate/utils/frame/benchmarking-cli", - "substrate/utils/frame/frame-utilities-cli", "substrate/utils/frame/generate-bags", "substrate/utils/frame/generate-bags/node-runtime", "substrate/utils/frame/omni-bencher", @@ -513,21 +516,23 @@ members = [ "substrate/utils/prometheus", "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", - "templates/minimal", "templates/minimal/node", "templates/minimal/pallets/template", "templates/minimal/runtime", - + "templates/parachain/node", + "templates/parachain/pallets/template", + "templates/parachain/runtime", "templates/solochain/node", "templates/solochain/pallets/template", "templates/solochain/runtime", + "umbrella", +] - "templates/parachain/node", - "templates/parachain/pallets/template", - "templates/parachain/runtime", +default-members = [ + "polkadot", + "substrate/bin/node/cli", ] -default-members = ["polkadot", "substrate/bin/node/cli"] [workspace.lints.rust] suspicious_double_ref_op = { level = "allow", priority = 2 } diff --git a/README.md b/README.md index 63743a456f4c8f8561bbeee8c59d63b88d352285..773481732520e2a8c9217e72e426a9bda781fb6f 100644 --- a/README.md +++ b/README.md @@ -1,81 +1,83 @@ -> NOTE: We have recently made significant changes to our repository structure. In order to streamline our development -process and foster better contributions, we have merged three separate repositories Cumulus, Substrate and Polkadot into -this repository. Read more about the changes [ -here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). -# Polkadot SDK +
+ +![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only) +![SDK Logo](./docs/images/Polkadot_Logo_Horizontal_Pink_Black.png#gh-light-mode-only) -![](https://cms.polkadot.network/content/images/2021/06/1-xPcVR_fkITd0ssKBvJ3GMw.png) +# Polkadot SDK -[![StackExchange](https://img.shields.io/badge/StackExchange-Community%20&%20Support-222222?logo=stackexchange)](https://substrate.stackexchange.com/) +![GitHub stars](https://img.shields.io/github/stars/paritytech/polkadot-sdk)  ![GitHub +forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) -The Polkadot SDK repository provides all the resources needed to start building on the Polkadot network, a multi-chain -blockchain platform that enables different blockchains to interoperate and share information in a secure and scalable -way. The Polkadot SDK comprises three main pieces of software: + +[![StackExchange](https://img.shields.io/badge/StackExchange-Community%20&%20Support-222222?logo=stackexchange)](https://substrate.stackexchange.com/)  ![GitHub contributors](https://img.shields.io/github/contributors/paritytech/polkadot-sdk)  ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/paritytech/polkadot-sdk) -## [Polkadot](./polkadot/) -[![PolkadotForum](https://img.shields.io/badge/Polkadot_Forum-e6007a?logo=polkadot)](https://forum.polkadot.network/) -[![Polkadot-license](https://img.shields.io/badge/License-GPL3-blue)](./polkadot/LICENSE) +![GitHub lines of code](https://tokei.rs/b1/github/paritytech/polkadot-sdk)   +![GitHub last commit](https://img.shields.io/github/last-commit/paritytech/polkadot-sdk) -Implementation of a node for the https://polkadot.network in Rust, using the Substrate framework. This directory -currently contains runtimes for the Westend and Rococo test networks. Polkadot, Kusama and their system chain runtimes -are located in the [`runtimes`](https://github.com/polkadot-fellows/runtimes/) repository maintained by -[the Polkadot Technical Fellowship](https://polkadot-fellows.github.io/dashboard/#/overview). +> The Polkadot SDK repository provides all the components needed to start building on the +> [Polkadot](https://polkadot.network) network, a multi-chain blockchain platform that enables +> different blockchains to interoperate and share information in a secure and scalable way. -## [Substrate](./substrate/) - [![SubstrateRustDocs](https://img.shields.io/badge/Rust_Docs-Substrate-24CC85?logo=rust)](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/substrate/index.html) - [![Substrate-license](https://img.shields.io/badge/License-GPL3%2FApache2.0-blue)](./substrate/README.md#LICENSE) +
-Substrate is the primary blockchain SDK used by developers to create the parachains that make up the Polkadot network. -Additionally, it allows for the development of self-sovereign blockchains that operate completely independently of -Polkadot. +## 📚 Documentation -## [Cumulus](./cumulus/) -[![CumulusRustDocs](https://img.shields.io/badge/Rust_Docs-Cumulus-222222?logo=rust)](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) -[![Cumulus-license](https://img.shields.io/badge/License-GPL3-blue)](./cumulus/LICENSE) +* [🦀 rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) + * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) + to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM +* Other Resources: + * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) -Cumulus is a set of tools for writing Substrate-based Polkadot parachains. +## 🚀 Releases -## Releases +> [!NOTE] +> Our release process is still Work-In-Progress and may not yet reflect the aspired outline +> here. -> [!NOTE] -> Our release process is still Work-In-Progress and may not yet reflect the aspired outline here. +The Polkadot-SDK has two release channels: `stable` and `nightly`. Production software is advised to +only use `stable`. `nightly` is meant for tinkerers to try out the latest features. The detailed +release process is described in [RELEASE.md](docs/RELEASE.md). -The Polkadot-SDK has two release channels: `stable` and `nightly`. Production software is advised to only use `stable`. -`nightly` is meant for tinkerers to try out the latest features. The detailed release process is described in -[RELEASE.md](docs/RELEASE.md). +### 😌 Stable -### Stable +`stable` releases have a support duration of **three months**. In this period, the release will not +have any breaking changes. It will receive bug fixes, security fixes, performance fixes and new +non-breaking features on a **two week** cadence. -`stable` releases have a support duration of **three months**. In this period, the release will not have any breaking -changes. It will receive bug fixes, security fixes, performance fixes and new non-breaking features on a **two week** -cadence. +### 🤠 Nightly -### Nightly +`nightly` releases are released every night from the `master` branch, potentially with breaking +changes. They have pre-release version numbers in the format `major.0.0-nightlyYYMMDD`. -`nightly` releases are released every night from the `master` branch, potentially with breaking changes. They have -pre-release version numbers in the format `major.0.0-nightlyYYMMDD`. +## 🔐 Security -## Upstream Dependencies +The security policy and procedures can be found in +[docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). -Below are the primary upstream dependencies utilized in this project: +## 🤍 Contributing & Code of Conduct -- [`parity-scale-codec`](https://crates.io/crates/parity-scale-codec) -- [`parity-db`](https://crates.io/crates/parity-db) -- [`parity-common`](https://github.com/paritytech/parity-common) -- [`trie`](https://github.com/paritytech/trie) +Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every +interaction and contribution, this project adheres to the [Contributor Covenant Code of +Conduct](./docs/contributor/CODE_OF_CONDUCT.md). -## Security +### 👾 Ready to Contribute? -The security policy and procedures can be found in [docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). +Take a look at the issues labeled with [`mentor`](https://github.com/paritytech/polkadot-sdk/labels/C1-mentor) +(or alternatively [this](https://mentor.tasty.limo/) page, created by one of the maintainers) label to get started! +We always recognize valuable contributions by proposing an on-chain tip to the Polkadot network as a token of our +appreciation. -## Contributing & Code of Conduct +## Polkadot Fellowship -Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every interaction and -contribution, this project adheres to the [Contributor Covenant Code of Conduct](./docs/contributor/CODE_OF_CONDUCT.md). +Development in this repo usually goes hand in hand with the `fellowship` organization. In short, +this repository provides all the SDK pieces needed to build both Polkadot and its parachains. But, +the actual Polkadot runtime lives in the `fellowship/runtimes` repository. Read more about the +fellowship, this separation, the RFC process +[here](https://polkadot-fellows.github.io/dashboard/). -## Additional Resources +## History -- For monitoring upcoming changes and current proposals related to the technical implementation of the Polkadot network, - visit the [`Requests for Comment (RFC)`](https://github.com/polkadot-fellows/RFCs) repository. While it's maintained - by the Polkadot Fellowship, the RFC process welcomes contributions from everyone. +This repository is the amalgamation of 3 separate repositories that used to make up Polkadot SDK, +namely Substrate, Polkadot and Cumulus. Read more about the merge and its history +[here](https://polkadot-public.notion.site/Polkadot-SDK-FAQ-fbc4cecc2c46443fb37b9eeec2f0d85f). diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 74049031afe63cf0d2bc95193541a2b1303a1bbf..783009a8c890768bcc85dafec14dc3da9e8da573 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -11,7 +11,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs index 0c7a9ad1a83d6a83e0c9fe1f5e77ba2c4cefc17d..74494f7908045fac601b4c3f64a456ad12dacd6f 100644 --- a/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -271,7 +271,7 @@ pub fn generate_xcm_builder_bridge_message_sample( move |expected_message_size| -> MessagePayload { // For XCM bridge hubs, it is the message that // will be pushed further to some XCM queue (XCMP/UMP) - let location = xcm::VersionedInteriorLocation::V4(destination.clone()); + let location = xcm::VersionedInteriorLocation::from(destination.clone()); let location_encoded_size = location.encoded_size(); // we don't need to be super-precise with `expected_size` here @@ -294,16 +294,13 @@ pub fn generate_xcm_builder_bridge_message_sample( expected_message_size, location_encoded_size, xcm_size, xcm_data_size, ); - let xcm = xcm::VersionedXcm::<()>::V4( - vec![Instruction::<()>::ExpectPallet { - index: 0, - name: vec![42; xcm_data_size], - module_name: vec![], - crate_major: 0, - min_crate_minor: 0, - }] - .into(), - ); + let xcm = xcm::VersionedXcm::<()>::from(Xcm(vec![Instruction::<()>::ExpectPallet { + index: 0, + name: vec![42; xcm_data_size], + module_name: vec![], + crate_major: 0, + min_crate_minor: 0, + }])); // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor // or public fields, so just tuple diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index e323f1edfc71da8c84fe8cabb977da85ce4d303e..f49474667896049cfd6aff4bf4a4b0d9d6e73c95 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -148,7 +148,6 @@ impl frame_system::Config for TestRuntime { type AccountId = ThisChainAccountId; type Block = ThisChainBlock; type AccountData = pallet_balances::AccountData; - type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { diff --git a/bridges/chains/chain-asset-hub-rococo/Cargo.toml b/bridges/chains/chain-asset-hub-rococo/Cargo.toml index 9a6419a5b4055be348f4f8813e3c1301f14f7142..d9afe2c8bf76713104beead1ad4c36dc08dae1ed 100644 --- a/bridges/chains/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/chains/chain-asset-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies diff --git a/bridges/chains/chain-asset-hub-westend/Cargo.toml b/bridges/chains/chain-asset-hub-westend/Cargo.toml index 1c08ee28e417cb50ce9ef9ded5f17163e1bb30d4..4b3ed052f1382d0c7f076ad5152c861f60d8bef1 100644 --- a/bridges/chains/chain-asset-hub-westend/Cargo.toml +++ b/bridges/chains/chain-asset-hub-westend/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate Dependencies diff --git a/bridges/chains/chain-polkadot-bulletin/Cargo.toml b/bridges/chains/chain-polkadot-bulletin/Cargo.toml index 2db16a00e92492e3a167458343a88a24c2186748..700247b7055a891bec2d4a40bfd126720a0d952c 100644 --- a/bridges/chains/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/chains/chain-polkadot-bulletin/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Bridge Dependencies diff --git a/bridges/docs/running-relayer.md b/bridges/docs/running-relayer.md index 710810a476e4df5e4b80fde31f9576be5ad26391..594cbc35a106b1ebd8cc1e9e9c0542c759489197 100644 --- a/bridges/docs/running-relayer.md +++ b/bridges/docs/running-relayer.md @@ -139,7 +139,7 @@ your transactions that are **validated** on top of block, where it is active get becomes expired when the block with the number you have specified during registration is "mined". It is the `validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, -where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. +where `LEASE` is the chain constant, controlled by the governance. Initially it is set to `300` blocks. All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less than the `LEASE`. @@ -156,7 +156,7 @@ than the `LEASE`. | 700 | Inactive | Your message delivery transactions are not boosted | | 701 | Inactive | Your message delivery transactions are not boosted | | ... | Inactive | Your message delivery transactions are not boosted | -| 1000 | Expired | Your may submit a tx with the `deregister` call | +| 1000 | Expired | You may submit a tx with the `deregister` call | @@ -230,7 +230,7 @@ your relayer account. Then: - set the `bridgedChainId` to `bhpd`; -- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions +- check both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions and `BridgedChain` is used to pay for message confirmation transactions. If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. diff --git a/bridges/modules/beefy/Cargo.toml b/bridges/modules/beefy/Cargo.toml index 438f32fb146042f2704deb3092381a2b5cc68394..e36bbb615f23a20d4ef4a4f4ea8418e752d5b01f 100644 --- a/bridges/modules/beefy/Cargo.toml +++ b/bridges/modules/beefy/Cargo.toml @@ -12,7 +12,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true } diff --git a/bridges/modules/beefy/src/lib.rs b/bridges/modules/beefy/src/lib.rs index 27c83921021bb4299b18cbf2d3216427f8c89ccc..ccddcde920f694a87ab22042f44d84707982eefe 100644 --- a/bridges/modules/beefy/src/lib.rs +++ b/bridges/modules/beefy/src/lib.rs @@ -316,7 +316,7 @@ pub mod pallet { /// Pallet owner has the right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. `democracy::referendum` to update halt - /// flag directly or calling `halt_operations`). + /// flag directly or calling `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index 0db1827211a05f715cd1aed0db93da0f52c9d67c..0ca6b67503511976ea9122f64e3c2e515e971177 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -13,7 +13,7 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/grandpa/README.md b/bridges/modules/grandpa/README.md index 4a3099b8afc654bfced296aaa0ead4a5d113eb7f..df63f4aa639f21186932bde6b2a475b3873a6bf7 100644 --- a/bridges/modules/grandpa/README.md +++ b/bridges/modules/grandpa/README.md @@ -87,7 +87,7 @@ It'd be better for anyone (for chain and for submitters) to reject all transacti already known headers to the pallet. This way, we leave block space to other useful transactions and we don't charge concurrent submitters for their honest actions. -To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime. +To deal with that, we have a [signed extension](./src/call_ext.rs) that may be added to the runtime. It does exactly what is required - rejects all transactions with already known headers. The submitter pays nothing for such transactions - they're simply removed from the transaction pool, when the block is built. diff --git a/bridges/modules/grandpa/src/benchmarking.rs b/bridges/modules/grandpa/src/benchmarking.rs index 11033373ce478fa9fefb613a1377449bb77daf1d..fb7354e05c06eeeab17363d5671cd8d636ec48f3 100644 --- a/bridges/modules/grandpa/src/benchmarking.rs +++ b/bridges/modules/grandpa/src/benchmarking.rs @@ -70,11 +70,12 @@ const MAX_VOTE_ANCESTRIES_RANGE_END: u32 = // the same with validators - if there are too much validators, let's run benchmarks on subrange fn precommits_range_end, I: 'static>() -> u32 { let max_bridged_authorities = T::BridgedChain::MAX_AUTHORITIES_COUNT; - if max_bridged_authorities > 128 { + let max_bridged_authorities = if max_bridged_authorities > 128 { sp_std::cmp::max(128, max_bridged_authorities / 5) } else { max_bridged_authorities }; + required_justification_precommits(max_bridged_authorities) } @@ -138,5 +139,19 @@ benchmarks_instance_pallet! { assert!(!>::contains_key(genesis_header.hash())); } + force_set_pallet_state { + let set_id = 100; + let authorities = accounts(T::BridgedChain::MAX_AUTHORITIES_COUNT as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + let (header, _) = prepare_benchmark_data::(1, 1); + let expected_hash = header.hash(); + }: force_set_pallet_state(RawOrigin::Root, set_id, authorities, Box::new(header)) + verify { + assert_eq!(>::get().unwrap().1, expected_hash); + assert_eq!(>::get().set_id, set_id); + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) } diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index 98fbeaa30bbac4c6bade6dc4b7d2f97d53940c6b..f08eb4c5d1ab5ae231afc388dacb0699d58fbc46 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -18,12 +18,8 @@ use crate::{ weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config, CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet, }; -use bp_header_chain::{ - justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, - ChainWithGrandpa, GrandpaConsensusLogReader, -}; +use bp_header_chain::{justification::GrandpaJustification, submit_finality_proof_limits_extras}; use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule}; -use codec::Encode; use frame_support::{ dispatch::CallableCallFor, traits::{Get, IsSubType}, @@ -303,53 +299,31 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( current_set_id: Option, is_free_execution_expected: bool, ) -> SubmitFinalityProofInfo> { - let block_number = *finality_target.number(); - - // the `submit_finality_proof` call will reject justifications with invalid, duplicate, - // unknown and extra signatures. It'll also reject justifications with less than necessary - // signatures. So we do not care about extra weight because of additional signatures here. - let precommits_len = justification.commit.precommits.len().saturated_into(); - let required_precommits = precommits_len; + // check if call exceeds limits. In other words - whether some size or weight is included + // in the call + let extras = + submit_finality_proof_limits_extras::(finality_target, justification); // We do care about extra weight because of more-than-expected headers in the votes // ancestries. But we have problems computing extra weight for additional headers (weight of // additional header is too small, so that our benchmarks aren't detecting that). So if there // are more than expected headers in votes ancestries, we will treat the whole call weight // as an extra weight. - let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); - let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY { - T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) - } else { - Weight::zero() - }; - - // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger - // size - let is_mandatory_finality_target = - GrandpaConsensusLogReader::>::find_scheduled_change( - finality_target.digest(), - ) - .is_some(); - - // we can estimate extra call size easily, without any additional significant overhead - let actual_call_size: u32 = finality_target - .encoded_size() - .saturating_add(justification.encoded_size()) - .saturated_into(); - let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory_finality_target, - required_precommits, - ); - let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + let extra_weight = if extras.is_weight_limit_exceeded { + let precommits_len = justification.commit.precommits.len().saturated_into(); + let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); + T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) + } else { + Weight::zero() + }; SubmitFinalityProofInfo { - block_number, + block_number: *finality_target.number(), current_set_id, - is_mandatory: is_mandatory_finality_target, + is_mandatory: extras.is_mandatory_finality_target, is_free_execution_expected, extra_weight, - extra_size, + extra_size: extras.extra_size, } } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index a927882aaaa27210c4777fa2e99a109b4d8b500b..3b77f676870e1a28b8367f1b14d24c9ca83ece4a 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -44,7 +44,7 @@ use bp_header_chain::{ }; use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound}; -use sp_consensus_grandpa::SetId; +use sp_consensus_grandpa::{AuthorityList, SetId}; use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, @@ -360,6 +360,42 @@ pub mod pallet { Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) } + + /// Set current authorities set and best finalized bridged header to given values + /// (almost) without any checks. This call can fail only if: + /// + /// - the call origin is not a root or a pallet owner; + /// + /// - there are too many authorities in the new set. + /// + /// No other checks are made. Previously imported headers stay in the storage and + /// are still accessible after the call. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::force_set_pallet_state())] + pub fn force_set_pallet_state( + origin: OriginFor, + new_current_set_id: SetId, + new_authorities: AuthorityList, + new_best_header: Box>, + ) -> DispatchResult { + Self::ensure_owner_or_root(origin)?; + + // save new authorities set. It only fails if there are too many authorities + // in the new set + save_authorities_set::( + CurrentAuthoritySet::::get().set_id, + new_current_set_id, + new_authorities, + )?; + + // save new best header. It may be older than the best header that is already + // known to the pallet - it changes nothing (except for the fact that previously + // imported headers may still be used to prove something) + let new_best_header_hash = new_best_header.hash(); + insert_header::(*new_best_header, new_best_header_hash); + + Ok(()) + } } /// Number of free header submissions that we may yet accept in the current block. @@ -423,7 +459,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; @@ -592,33 +628,45 @@ pub mod pallet { // GRANDPA only includes a `delay` for forced changes, so this isn't valid. ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); - // TODO [#788]: Stop manually increasing the `set_id` here. - let next_authorities = StoredAuthoritySet:: { - authorities: change - .next_authorities - .try_into() - .map_err(|_| Error::::TooManyAuthoritiesInSet)?, - set_id: current_set_id + 1, - }; - // Since our header schedules a change and we know the delay is 0, it must also enact // the change. - >::put(&next_authorities); - - log::info!( - target: LOG_TARGET, - "Transitioned from authority set {} to {}! New authorities are: {:?}", + // TODO [#788]: Stop manually increasing the `set_id` here. + return save_authorities_set::( current_set_id, current_set_id + 1, - next_authorities, + change.next_authorities, ); - - return Ok(Some(next_authorities.into())) }; Ok(None) } + /// Save new authorities set. + pub(crate) fn save_authorities_set, I: 'static>( + old_current_set_id: SetId, + new_current_set_id: SetId, + new_authorities: AuthorityList, + ) -> Result, DispatchError> { + let next_authorities = StoredAuthoritySet:: { + authorities: new_authorities + .try_into() + .map_err(|_| Error::::TooManyAuthoritiesInSet)?, + set_id: new_current_set_id, + }; + + >::put(&next_authorities); + + log::info!( + target: LOG_TARGET, + "Transitioned from authority set {} to {}! New authorities are: {:?}", + old_current_set_id, + new_current_set_id, + next_authorities, + ); + + Ok(Some(next_authorities.into())) + } + /// Verify a GRANDPA justification (finality proof) for a given header. /// /// Will use the GRANDPA current authorities known to the pallet. @@ -1700,4 +1748,98 @@ mod tests { assert_eq!(FreeHeadersRemaining::::get(), Some(0)); }) } + + #[test] + fn force_set_pallet_state_works() { + run_test(|| { + let header25 = test_header(25); + let header50 = test_header(50); + let ok_new_set_id = 100; + let ok_new_authorities = authority_list(); + let bad_new_set_id = 100; + let bad_new_authorities: Vec<_> = std::iter::repeat((ALICE.into(), 1)) + .take(MAX_BRIDGED_AUTHORITIES as usize + 1) + .collect(); + + // initialize and import several headers + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(30)); + + // wrong origin => error + assert_noop!( + Pallet::::force_set_pallet_state( + RuntimeOrigin::signed(1), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header50.clone()), + ), + DispatchError::BadOrigin, + ); + + // too many authorities in the set => error + assert_noop!( + Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + bad_new_set_id, + bad_new_authorities.clone(), + Box::new(header50.clone()), + ), + Error::::TooManyAuthoritiesInSet, + ); + + // force import header 50 => ok + assert_ok!(Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header50.clone()), + ),); + + // force import header 25 after 50 => ok + assert_ok!(Pallet::::force_set_pallet_state( + RuntimeOrigin::root(), + ok_new_set_id, + ok_new_authorities.clone(), + Box::new(header25.clone()), + ),); + + // we may import better headers + assert_noop!(submit_finality_proof(20), Error::::OldHeader); + assert_ok!(submit_finality_proof_with_set_id(26, ok_new_set_id)); + + // we can even reimport header #50. It **will cause** some issues during pruning + // (see below) + assert_ok!(submit_finality_proof_with_set_id(50, ok_new_set_id)); + + // and all headers are available. Even though there are 4 headers, the ring + // buffer thinks that there are 5, because we've imported header $50 twice + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(30).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(50).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(25).hash() + ) + .is_some()); + assert!(GrandpaChainHeaders::::finalized_header_state_root( + test_header(26).hash() + ) + .is_some()); + + // next header import will prune header 30 + assert_ok!(submit_finality_proof_with_set_id(70, ok_new_set_id)); + // next header import will prune header 50 + assert_ok!(submit_finality_proof_with_set_id(80, ok_new_set_id)); + // next header import will prune header 25 + assert_ok!(submit_finality_proof_with_set_id(90, ok_new_set_id)); + // next header import will prune header 26 + assert_ok!(submit_finality_proof_with_set_id(100, ok_new_set_id)); + // next header import will prune header 50 again. But it is fine + assert_ok!(submit_finality_proof_with_set_id(110, ok_new_set_id)); + }); + } } diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index a75e7b5a8e4ada8ce880a040492c904d8035642c..9719bc9c022e2f082307ddf49fc02eb70f4c3497 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -51,6 +51,7 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bridge_grandpa. pub trait WeightInfo { fn submit_finality_proof(p: u32, v: u32) -> Weight; + fn force_set_pallet_state() -> Weight; } /// Weights for `pallet_bridge_grandpa` that are generated using one of the Bridge testnets. @@ -109,6 +110,30 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } + + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: + /// Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: + /// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes` + /// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), + /// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), + /// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 + /// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: + /// Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 62_232_000 picoseconds. + Weight::from_parts(78_755_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(6)) + } } // For backwards compatibility and tests @@ -164,4 +189,28 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: + /// Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: + /// Some(4), added: 499, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHashes` + /// (r:1 w:1) Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), + /// `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), + /// added: 531, mode: `MaxEncodedLen`) Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 + /// w:2) Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: + /// Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 62_232_000 picoseconds. + Weight::from_parts(78_755_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(RocksDbWeight::get().reads(3)) + .saturating_add(RocksDbWeight::get().writes(6)) + } } diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index df5b92db7402bd048b1afca9c13cfb1dddc74863..71c86ccc0361708684d0a93166f858118dbf0d92 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } num-traits = { version = "0.2", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md index fe62305748cd1d6030a7a8085bff29f24ee4dbc5..c06b96b857dea1cdf7fdaed81e70d66aff116064 100644 --- a/bridges/modules/messages/README.md +++ b/bridges/modules/messages/README.md @@ -187,11 +187,13 @@ There may be a special account in every runtime where the messages module is dep owner', is like a module-level sudo account - he's able to halt and resume all module operations without requiring runtime upgrade. Calls that are related to this account are: - `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; -- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all module operations. After - this call, all message-related transactions will be rejected until further `resume_operations` call'. This call may be - used when something extraordinary happens with the bridge; -- `fn resume_operations()`: module owner may call this function to resume bridge operations. The module will resume its - regular operations after this call. +- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to pause/resume + pallet operations. Owner may halt the pallet by calling this method with + `MessagesOperatingMode::Basic(BasicOperatingMode::Halted)` argument - all message-related + transactions will be rejected. Owner may then resume pallet operations by passing the + `MessagesOperatingMode::Basic(BasicOperatingMode::Normal)` argument. There's also + `MessagesOperatingMode::RejectingOutboundMessages` pallet mode, where it still accepts all incoming + messages, but all outbound messages are rejected. If pallet owner is not defined, the governance may be used to make those calls. diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs index bc00db9eba5ba12dbdaa0de7008f293a727a7ef5..e31a4542056cb30466f236d0dc9957c053a03f66 100644 --- a/bridges/modules/messages/src/lib.rs +++ b/bridges/modules/messages/src/lib.rs @@ -573,7 +573,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume it. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] #[pallet::getter(fn module_owner)] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId>; diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 35213be0674a8c8d31de79afb720fbf457f3445a..d3152f8d0a4aa9b6dc1c726441c5e139e08de162 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index 61e04aed3770dcaa9cb611dc754aad21325e1b39..d323aef3b22070d1db1e4709fe0dad8bf0360caf 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -260,7 +260,7 @@ pub mod pallet { /// Pallet owner has a right to halt all pallet operations and then resume them. If it is /// `None`, then there are no direct ways to halt/resume pallet operations, but other /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). + /// flag directly or call the `set_operating_mode`). #[pallet::storage] pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index e2b7aca92249c19096bf129be6fab1be08a5357a..08e1438d4f1946fb41f614b0e94c0ce6f1611fd5 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 06f2a339bed9d07b5615ca047177228d6585cfc2..b80240c974de9f5874e2825f5506885fea11ef3a 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } diff --git a/bridges/modules/xcm-bridge-hub/Cargo.toml b/bridges/modules/xcm-bridge-hub/Cargo.toml index 4483a3790900f975030e8c820f9b42442a747262..9b22770061a9a9ffd981f186de9231d7ff41cde9 100644 --- a/bridges/modules/xcm-bridge-hub/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/primitives/beefy/Cargo.toml b/bridges/primitives/beefy/Cargo.toml index 2a13685207cc633fe18633525231866511afd01e..bd68076ca48fc8ccc7bb8f48611083c0930731f7 100644 --- a/bridges/primitives/beefy/Cargo.toml +++ b/bridges/primitives/beefy/Cargo.toml @@ -12,7 +12,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { default-features = false, features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/beefy/src/lib.rs b/bridges/primitives/beefy/src/lib.rs index 0441781e79a66f785b985047ad56da70c0f13d49..2494706818ef14c11a8193983164e65a510a80e6 100644 --- a/bridges/primitives/beefy/src/lib.rs +++ b/bridges/primitives/beefy/src/lib.rs @@ -22,7 +22,7 @@ pub use binary_merkle_tree::merkle_root; pub use pallet_beefy_mmr::BeefyEcdsaToEthereum; pub use pallet_mmr::{ - primitives::{DataOrHash as MmrDataOrHash, Proof as MmrProof}, + primitives::{DataOrHash as MmrDataOrHash, LeafProof as MmrProof}, verify_leaves_proof as verify_mmr_leaves_proof, }; pub use sp_consensus_beefy::{ diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index f7a61a9ff32bd42f4199859834b6296aeaa18f4a..def1f7ad4dfefb14c3f8459a3d2960c3890ddcf8 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index ad496012c6a3f95d636a2c1ae52fcb5f4ec5434d..af2afb65a26a7f206fdbfcf22e20cb5100a8c95f 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -24,8 +24,8 @@ use crate::justification::{ GrandpaJustification, JustificationVerificationContext, JustificationVerificationError, }; use bp_runtime::{ - BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, StorageProofChecker, - StorageProofError, UnderlyingChainProvider, + BasicOperatingMode, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, + StorageProofChecker, StorageProofError, UnderlyingChainProvider, }; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; @@ -35,7 +35,7 @@ use serde::{Deserialize, Serialize}; use sp_consensus_grandpa::{ AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID, }; -use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; +use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug, SaturatedConversion}; use sp_std::{boxed::Box, vec::Vec}; pub mod justification; @@ -325,6 +325,68 @@ where const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; } +/// Result of checking maximal expected submit finality proof call weight and size. +#[derive(Debug)] +pub struct SubmitFinalityProofCallExtras { + /// If true, the call weight is larger than what we have assumed. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for weight above that limit, is never refunded. + pub is_weight_limit_exceeded: bool, + /// Extra size (in bytes) that we assume are included in the call. + /// + /// We have some assumptions about headers and justifications of the bridged chain. + /// We know that if our assumptions are correct, then the call must not have the + /// weight above some limit. The fee paid for bytes above that limit, is never refunded. + pub extra_size: u32, + /// A flag that is true if the header is the mandatory header that enacts new + /// authorities set. + pub is_mandatory_finality_target: bool, +} + +/// Checks whether the given `header` and its finality `proof` fit the maximal expected +/// call limits (size and weight). The submission may be refunded sometimes (see pallet +/// configuration for details), but it should fit some limits. If the call has some extra +/// weight and/or size included, though, we won't refund it or refund will be partial. +pub fn submit_finality_proof_limits_extras( + header: &C::Header, + proof: &justification::GrandpaJustification, +) -> SubmitFinalityProofCallExtras { + // the `submit_finality_proof` call will reject justifications with invalid, duplicate, + // unknown and extra signatures. It'll also reject justifications with less than necessary + // signatures. So we do not care about extra weight because of additional signatures here. + let precommits_len = proof.commit.precommits.len().saturated_into(); + let required_precommits = precommits_len; + + // the weight check is simple - we assume that there are no more than the `limit` + // headers in the ancestry proof + let votes_ancestries_len: u32 = proof.votes_ancestries.len().saturated_into(); + let is_weight_limit_exceeded = + votes_ancestries_len > C::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; + + // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger + // size + let is_mandatory_finality_target = + GrandpaConsensusLogReader::>::find_scheduled_change(header.digest()) + .is_some(); + + // we can estimate extra call size easily, without any additional significant overhead + let actual_call_size: u32 = + header.encoded_size().saturating_add(proof.encoded_size()).saturated_into(); + let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( + is_mandatory_finality_target, + required_precommits, + ); + let extra_size = actual_call_size.saturating_sub(max_expected_call_size); + + SubmitFinalityProofCallExtras { + is_weight_limit_exceeded, + extra_size, + is_mandatory_finality_target, + } +} + /// Returns maximal expected size of `submit_finality_proof` call arguments. pub fn max_expected_submit_finality_proof_arguments_size( is_mandatory_finality_target: bool, diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index d41acfb9d32863d14e56e095755791a420fd3ce6..20337873c2e6abac5872807adf67557be60a46e8 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 2e7000b86a5e4ba21ccadf74b1c2d5374db5c545..a6e71876cefbb3963ef1923469d641281cda00dc 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2" scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index 53b1e574cb1997e556f17b7f21f6a28d9eb84400..d4b2f503e9e2ca92c095649f8aa36741d02c8037 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index 1be7f1dc6ebd38061e98865b45d4f85d8f3f7448..5081dddce1e61eccbae540f665257e122d777dd6 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 9a9b0291687d19ac0b5698ce3ef6a591da9f86c9..ac65ad538b4988c71e59d081cba46d47ebdc7c39 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } impl-trait-for-tuples = "0.2.2" log = { workspace = true } diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index d314c38683cdbc8b40cfda3a14c64f91854e5d7f..99f5ee0d1aee4528f64028bbb4ce089cfb6f4c44 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -15,7 +15,7 @@ bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } ed25519-dalek = { version = "2.1", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index 94eece16d5797eb23dd12af5b8b5aeb7d283d862..b94e722024562e526c33d2bf1efe9b89f1a035aa 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index 2c98441fc3017e8bcd4a213b27d5f5d2fee3223d..cb7eae4f340c7375ad69b111f6b561c84bc57144 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] async-std = { version = "1.9.0", features = ["attributes"] } async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["macros", "ws-client"] } log = { workspace = true } diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs index afbda8599b2aa03c4ed4a7c33a93fdd429646516..2e7cb7455f76cceee1c63aae4efb4a5cfe9f2a69 100644 --- a/bridges/relays/client-substrate/src/client.rs +++ b/bridges/relays/client-substrate/src/client.rs @@ -77,7 +77,12 @@ pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: } /// Opaque justifications subscription type. -pub struct Subscription(pub(crate) Mutex>>); +pub struct Subscription( + pub(crate) Mutex>>, + // The following field is not explicitly used by the code. But when it is dropped, + // the bakground task receives a shutdown signal. + #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, +); /// Opaque GRANDPA authorities set. pub type OpaqueGrandpaAuthoritiesSet = Vec; @@ -621,6 +626,7 @@ impl Client { e })??; + let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); let (tracker, subscription) = self .jsonrpsee_execute(move |client| async move { @@ -639,7 +645,7 @@ impl Client { self_clone, stall_timeout, tx_hash, - Subscription(Mutex::new(receiver)), + Subscription(Mutex::new(receiver), cancel_sender), ); Ok((tracker, subscription)) }) @@ -649,6 +655,7 @@ impl Client { "extrinsic".into(), subscription, sender, + cancel_receiver, )); Ok(tracker) } @@ -790,14 +797,16 @@ impl Client { Ok(FC::subscribe_justifications(&client).await?) }) .await?; + let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); self.data.read().await.tokio.spawn(Subscription::background_worker( C::NAME.into(), "justification".into(), subscription, sender, + cancel_receiver, )); - Ok(Subscription(Mutex::new(receiver))) + Ok(Subscription(Mutex::new(receiver), cancel_sender)) } /// Generates a proof of key ownership for the given authority in the given set. @@ -843,9 +852,17 @@ impl Client { impl Subscription { /// Consumes subscription and returns future statuses stream. pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(self, |this| async { + futures::stream::unfold(Some(self), |mut this| async move { + let Some(this) = this.take() else { return None }; let item = this.0.lock().await.next().await.unwrap_or(None); - item.map(|i| (i, this)) + match item { + Some(item) => Some((item, Some(this))), + None => { + // let's make it explicit here + let _ = this.1.send(()); + None + }, + } }) } @@ -860,19 +877,35 @@ impl Subscription { async fn background_worker( chain_name: String, item_type: String, - mut subscription: jsonrpsee::core::client::Subscription, + subscription: jsonrpsee::core::client::Subscription, mut sender: futures::channel::mpsc::Sender>, + cancel_receiver: futures::channel::oneshot::Receiver<()>, ) { + log::trace!( + target: "bridge", + "Starting background worker for {} {} subscription stream.", + chain_name, + item_type, + ); + + futures::pin_mut!(subscription, cancel_receiver); loop { - match subscription.next().await { - Some(Ok(item)) => + match futures::future::select(subscription.next(), &mut cancel_receiver).await { + futures::future::Either::Left((Some(Ok(item)), _)) => if sender.send(Some(item)).await.is_err() { + log::trace!( + target: "bridge", + "{} {} subscription stream: no listener. Stopping background worker.", + chain_name, + item_type, + ); + break }, - Some(Err(e)) => { + futures::future::Either::Left((Some(Err(e)), _)) => { log::trace!( target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.", + "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", chain_name, item_type, e, @@ -880,16 +913,25 @@ impl Subscription { let _ = sender.send(None).await; break }, - None => { + futures::future::Either::Left((None, _)) => { log::trace!( target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted.", + "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", chain_name, item_type, ); let _ = sender.send(None).await; break }, + futures::future::Either::Right((_, _)) => { + log::trace!( + target: "bridge", + "{} {} subscription stream: listener has been dropped. Stopping background worker.", + chain_name, + item_type, + ); + break; + }, } } } diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 0b446681818879d662ba9a71679a799519cf491b..2133c18887846b4f4360bdb6baa34799a24e6164 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -17,6 +17,7 @@ //! Substrate node RPC errors. use crate::SimpleRuntimeVersion; +use bp_header_chain::SubmitFinalityProofCallExtras; use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; @@ -129,6 +130,12 @@ pub enum Error { /// Actual runtime version. actual: SimpleRuntimeVersion, }, + /// Finality proof submission exceeds size and/or weight limits. + #[error("Finality proof submission exceeds limits: {extras:?}")] + FinalityProofWeightLimitExceeded { + /// Finality proof submission extras. + extras: SubmitFinalityProofCallExtras, + }, /// Custom logic error. #[error("{0}")] Custom(String), diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index 00375768c45c27c23dfccb7730668108a6fab788..b181a945c2c15393daf821901b298e81214f85e3 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -306,12 +306,13 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { + let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), + Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), ); let wait_for_stall_timeout = futures::future::pending(); @@ -428,12 +429,13 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { + let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), + Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); diff --git a/bridges/relays/lib-substrate-relay/Cargo.toml b/bridges/relays/lib-substrate-relay/Cargo.toml index 3f657645b591c51ab74c5b6b64bfdd8ca04e7b18..077d1b1ff356a871364d45c1251aec0af7680cdd 100644 --- a/bridges/relays/lib-substrate-relay/Cargo.toml +++ b/bridges/relays/lib-substrate-relay/Cargo.toml @@ -14,7 +14,7 @@ workspace = true anyhow = "1.0" async-std = "1.9.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" hex = "0.4" log = { workspace = true } diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 0dd0d5474b3a517b7eb67b641a5d3b427cad2b84..270608bf6ed8e1500d10000173bd7945a31c8135 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -125,14 +125,13 @@ impl PrometheusParams { None }; - let relay_version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown"); + let relay_version = relay_utils::initialize::RELAYER_VERSION + .lock() + .clone() + .unwrap_or_else(|| "unknown".to_string()); let relay_commit = SubstrateRelayBuildInfo::get_git_commit(); - relay_utils::metrics::MetricsParams::new( - metrics_address, - relay_version.into(), - relay_commit, - ) - .map_err(|e| anyhow::format_err!("{:?}", e)) + relay_utils::metrics::MetricsParams::new(metrics_address, relay_version, relay_commit) + .map_err(|e| anyhow::format_err!("{:?}", e)) } } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index b672bd4f9b86812c87532012debb89e0a601451f..943feba072e408d98360ece228fe8c5558181b69 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -26,9 +26,12 @@ use async_trait::async_trait; use sp_core::Pair; use structopt::StructOpt; +use bp_messages::MessageNonce; +use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, }; +use relay_utils::UniqueSaturatedInto; /// Messages relaying params. #[derive(StructOpt)] @@ -48,6 +51,53 @@ pub struct RelayMessagesParams { prometheus_params: PrometheusParams, } +/// Messages range relaying params. +#[derive(StructOpt)] +pub struct RelayMessagesRangeParams { + /// Number of the source chain header that we will use to prepare a messages proof. + /// This header must be previously proved to the target chain. + #[structopt(long)] + at_source_block: u128, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Nonce (inclusive) of the first message to relay. + #[structopt(long)] + messages_start: MessageNonce, + /// Nonce (inclusive) of the last message to relay. + #[structopt(long)] + messages_end: MessageNonce, + /// Whether the outbound lane state proof should be included into transaction. + #[structopt(long)] + outbound_state_proof_required: bool, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, +} + +/// Messages delivery confirmation relaying params. +#[derive(StructOpt)] +pub struct RelayMessagesDeliveryConfirmationParams { + /// Number of the target chain header that we will use to prepare a messages + /// delivery proof. This header must be previously proved to the source chain. + #[structopt(long)] + at_target_block: u128, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, +} + /// Trait used for relaying messages between 2 chains. #[async_trait] pub trait MessagesRelayer: MessagesCliBridge @@ -86,4 +136,73 @@ where .await .map_err(|e| anyhow::format_err!("{}", e)) } + + /// Relay a consequitive range of messages. + async fn relay_messages_range(data: RelayMessagesRangeParams) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let source_sign = data.source_sign.to_keypair::()?; + let source_transactions_mortality = data.source_sign.transactions_mortality()?; + let target_sign = data.target_sign.to_keypair::()?; + let target_transactions_mortality = data.target_sign.transactions_mortality()?; + + let at_source_block = source_client + .header_by_number(data.at_source_block.unique_saturated_into()) + .await + .map_err(|e| { + log::trace!( + target: "bridge", + "Failed to read {} header with number {}: {e:?}", + Self::Source::NAME, + data.at_source_block, + ); + anyhow::format_err!("The command has failed") + })? + .id(); + + crate::messages_lane::relay_messages_range::( + source_client, + target_client, + TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, + TransactionParams { signer: target_sign, mortality: target_transactions_mortality }, + at_source_block, + data.lane.into(), + data.messages_start..=data.messages_end, + data.outbound_state_proof_required, + ) + .await + } + + /// Relay a messages delivery confirmation. + async fn relay_messages_delivery_confirmation( + data: RelayMessagesDeliveryConfirmationParams, + ) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let source_sign = data.source_sign.to_keypair::()?; + let source_transactions_mortality = data.source_sign.transactions_mortality()?; + + let at_target_block = target_client + .header_by_number(data.at_target_block.unique_saturated_into()) + .await + .map_err(|e| { + log::trace!( + target: "bridge", + "Failed to read {} header with number {}: {e:?}", + Self::Target::NAME, + data.at_target_block, + ); + anyhow::format_err!("The command has failed") + })? + .id(); + + crate::messages_lane::relay_messages_delivery_confirmation::( + source_client, + target_client, + TransactionParams { signer: source_sign, mortality: source_transactions_mortality }, + at_target_block, + data.lane.into(), + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 0874fa53549c59f413a2f3f0c4f3dbc582fe0090..52ab2462c62c4784b80bfbd128c11194a4f2edd4 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -137,6 +137,16 @@ impl TargetClient: Send { @@ -129,12 +115,11 @@ pub trait Engine: Send { ) -> Result; /// Checks whether the given `header` and its finality `proof` fit the maximal expected - /// call size limit. If result is `MaxExpectedCallSizeCheck::Exceeds { .. }`, this - /// submission won't be fully refunded and relayer will spend its own funds on that. - fn check_max_expected_call_size( + /// call limits (size and weight). + fn check_max_expected_call_limits( header: &C::Header, proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck; + ) -> SubmitFinalityProofCallExtras; /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( @@ -245,22 +230,11 @@ impl Engine for Grandpa { }) } - fn check_max_expected_call_size( + fn check_max_expected_call_limits( header: &C::Header, proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck { - let is_mandatory = Self::ConsensusLogReader::schedules_authorities_change(header.digest()); - let call_size: u32 = - header.encoded_size().saturating_add(proof.encoded_size()).saturated_into(); - let max_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory, - proof.commit.precommits.len().saturated_into(), - ); - if call_size > max_call_size { - MaxExpectedCallSizeCheck::Exceeds { call_size, max_call_size } - } else { - MaxExpectedCallSizeCheck::Ok - } + ) -> SubmitFinalityProofCallExtras { + bp_header_chain::submit_finality_proof_limits_extras::(header, proof) } /// Prepare initialization data for the GRANDPA verifier pallet. diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index 58e9ded312dfea4813d1a5ddf843e76752bfc0cb..08550d19bae03aaf955c81800267cd80f9ce0f20 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -46,7 +46,7 @@ use relay_utils::{ }; use sp_core::Pair; use sp_runtime::traits::Zero; -use std::{fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData, ops::RangeInclusive}; /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { @@ -262,7 +262,7 @@ where source_client, params.lane_id, relayer_id_at_source, - params.target_transaction_params, + Some(params.target_transaction_params), params.source_to_target_headers_relay, ), { @@ -275,6 +275,87 @@ where .map_err(Into::into) } +/// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction +/// will succeed. +pub async fn relay_messages_range( + source_client: Client, + target_client: Client, + source_transaction_params: TransactionParams>, + target_transaction_params: TransactionParams>, + at_source_block: HeaderIdOf, + lane_id: LaneId, + range: RangeInclusive, + outbound_state_proof_required: bool, +) -> anyhow::Result<()> +where + AccountIdOf: From< as Pair>::Public>, + AccountIdOf: From< as Pair>::Public>, + BalanceOf: TryFrom>, +{ + let relayer_id_at_source: AccountIdOf = + source_transaction_params.signer.public().into(); + messages_relay::relay_messages_range( + SubstrateMessagesSource::

::new( + source_client.clone(), + target_client.clone(), + lane_id, + source_transaction_params, + None, + ), + SubstrateMessagesTarget::

::new( + target_client, + source_client, + lane_id, + relayer_id_at_source, + Some(target_transaction_params), + None, + ), + at_source_block, + range, + outbound_state_proof_required, + ) + .await + .map_err(|_| anyhow::format_err!("The command has failed")) +} + +/// Relay messages delivery confirmation of Substrate-to-Substrate messages. +/// No checks are made to ensure that transaction will succeed. +pub async fn relay_messages_delivery_confirmation( + source_client: Client, + target_client: Client, + source_transaction_params: TransactionParams>, + at_target_block: HeaderIdOf, + lane_id: LaneId, +) -> anyhow::Result<()> +where + AccountIdOf: From< as Pair>::Public>, + AccountIdOf: From< as Pair>::Public>, + BalanceOf: TryFrom>, +{ + let relayer_id_at_source: AccountIdOf = + source_transaction_params.signer.public().into(); + messages_relay::relay_messages_delivery_confirmation( + SubstrateMessagesSource::

::new( + source_client.clone(), + target_client.clone(), + lane_id, + source_transaction_params, + None, + ), + SubstrateMessagesTarget::

::new( + target_client, + source_client, + lane_id, + relayer_id_at_source, + None, + None, + ), + at_target_block, + ) + .await + .map_err(|_| anyhow::format_err!("The command has failed")) +} + /// Different ways of building `receive_messages_proof` calls. pub trait ReceiveMessagesProofCallBuilder { /// Given messages proof, build call of `receive_messages_proof` function of bridge diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 633b11f0b8028636fdb8c9c6b4f1ec5fa42ccf33..5ffb2b6c771e0fec2bf44640993abba3706cb0a5 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -40,8 +40,8 @@ use messages_relay::{ message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, CallOf, Client, Error as SubstrateError, HashOf, - TransactionEra, TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, BalanceOf, CallOf, Chain, Client, Error as SubstrateError, + HashOf, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; @@ -57,7 +57,7 @@ pub struct SubstrateMessagesTarget { source_client: Client, lane_id: LaneId, relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, + transaction_params: Option>>, source_to_target_headers_relay: Option>>, } @@ -68,7 +68,7 @@ impl SubstrateMessagesTarget

{ source_client: Client, lane_id: LaneId, relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, + transaction_params: Option>>, source_to_target_headers_relay: Option< Arc>, >, @@ -249,11 +249,18 @@ where None => messages_proof_call, }; - let transaction_params = self.transaction_params.clone(); + let transaction_params = self.transaction_params.clone().map(Ok).unwrap_or_else(|| { + // this error shall never happen in practice, so it not deserves + // a separate error variant + Err(SubstrateError::Custom(format!( + "Cannot sign transaction of {} chain", + P::TargetChain::NAME, + ))) + })?; let tx_tracker = self .target_client .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, + &transaction_params.signer, move |best_block_id, transaction_nonce| { Ok(UnsignedTransaction::new(final_call.into(), transaction_nonce) .era(TransactionEra::new(best_block_id, transaction_params.mortality))) diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 74f3a70c5e81bbc1d27162a74fb8dadab46a6d09..202f53ea4e4f50510f125f28da86de878125d581 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -16,9 +16,7 @@ //! On-demand Substrate -> Substrate header finality relay. -use crate::{ - finality::SubmitFinalityProofCallBuilder, finality_base::engine::MaxExpectedCallSizeCheck, -}; +use crate::finality::SubmitFinalityProofCallBuilder; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; @@ -156,22 +154,21 @@ impl OnDemandRelay( .await } +/// Relay range of messages. +pub async fn relay_messages_range( + source_client: impl MessageLaneSourceClient

, + target_client: impl MessageLaneTargetClient

, + at: SourceHeaderIdOf

, + range: RangeInclusive, + outbound_state_proof_required: bool, +) -> Result<(), ()> { + // compute cumulative dispatch weight of all messages in given range + let dispatch_weight = source_client + .generated_message_details(at.clone(), range.clone()) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to get generated message details at {:?} for messages {:?}: {:?}", + at, + range, + e, + ); + })? + .values() + .fold(Weight::zero(), |total, details| total.saturating_add(details.dispatch_weight)); + // prepare messages proof + let (at, range, proof) = source_client + .prove_messages( + at.clone(), + range.clone(), + MessageProofParameters { outbound_state_proof_required, dispatch_weight }, + ) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to generate messages proof at {:?} for messages {:?}: {:?}", + at, + range, + e, + ); + })?; + // submit messages proof to the target node + let tx_tracker = target_client + .submit_messages_proof(None, at, range.clone(), proof) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to submit messages proof for messages {:?}: {:?}", + range, + e, + ); + })? + .tx_tracker; + + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => { + log::error!("Transaction with messages {:?} is considered lost", range,); + Err(()) + }, + } +} + /// Message delivery race. struct MessageDeliveryRace

(std::marker::PhantomData

); diff --git a/bridges/relays/messages/src/message_race_receiving.rs b/bridges/relays/messages/src/message_race_receiving.rs index e6497a1b79eb70999f16cd0c1926ae9cf0ad5411..ac4149b22d7b124f9cf661e8a99de7dedc4a15fe 100644 --- a/bridges/relays/messages/src/message_race_receiving.rs +++ b/bridges/relays/messages/src/message_race_receiving.rs @@ -30,7 +30,7 @@ use crate::{ use async_trait::async_trait; use bp_messages::MessageNonce; use futures::stream::FusedStream; -use relay_utils::FailedClient; +use relay_utils::{FailedClient, TrackedTransactionStatus, TransactionTracker}; use std::{marker::PhantomData, ops::RangeInclusive}; /// Message receiving confirmations delivery strategy. @@ -69,6 +69,43 @@ pub async fn run( .await } +/// Relay messages delivery confirmation. +pub async fn relay_messages_delivery_confirmation( + source_client: impl MessageLaneSourceClient

, + target_client: impl MessageLaneTargetClient

, + at: TargetHeaderIdOf

, +) -> Result<(), ()> { + // prepare messages delivery proof + let (at, proof) = target_client.prove_messages_receiving(at.clone()).await.map_err(|e| { + log::error!( + target: "bridge", + "Failed to generate messages delivery proof at {:?}: {:?}", + at, + e, + ); + })?; + // submit messages delivery proof to the source node + let tx_tracker = + source_client + .submit_messages_receiving_proof(None, at, proof) + .await + .map_err(|e| { + log::error!( + target: "bridge", + "Failed to submit messages delivery proof: {:?}", + e, + ); + })?; + + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => { + log::error!("Transaction with messages delivery proof is considered lost"); + Err(()) + }, + } +} + /// Messages receiving confirmations race. struct ReceivingConfirmationsRace

(std::marker::PhantomData

); diff --git a/bridges/relays/parachains/Cargo.toml b/bridges/relays/parachains/Cargo.toml index a73a2f5b31c1aca5d93bf0b871b79b5e279da6ff..8d38e4e6bd07c2420adcf233729c1bac9bb77c37 100644 --- a/bridges/relays/parachains/Cargo.toml +++ b/bridges/relays/parachains/Cargo.toml @@ -23,6 +23,6 @@ bp-polkadot-core = { path = "../../primitives/polkadot-core" } relay-substrate-client = { path = "../client-substrate" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } sp-core = { path = "../../../substrate/primitives/core" } diff --git a/bridges/relays/utils/Cargo.toml b/bridges/relays/utils/Cargo.toml index ee56ebf9a956c6f4fdd06b554da279b56ed534dc..1264f582983f93c0f0b739f9d6c456df417f00ac 100644 --- a/bridges/relays/utils/Cargo.toml +++ b/bridges/relays/utils/Cargo.toml @@ -22,6 +22,7 @@ futures = "0.3.30" jsonpath_lib = "0.3" log = { workspace = true } num-traits = "0.2" +parking_lot = "0.12.1" serde_json = { workspace = true, default-features = true } sysinfo = "0.30" time = { version = "0.3", features = ["formatting", "local-offset", "std"] } diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs index 8224c1803ad2f74ec694cd68c18a556dddc6d76f..64d710242710b722b3b5be67dc439f814cd7e9df 100644 --- a/bridges/relays/utils/src/initialize.rs +++ b/bridges/relays/utils/src/initialize.rs @@ -16,8 +16,13 @@ //! Relayer initialization functions. +use parking_lot::Mutex; use std::{cell::RefCell, fmt::Display, io::Write}; +/// Relayer version that is provided as metric. Must be set by a binary +/// (get it with `option_env!("CARGO_PKG_VERSION")` from a binary package code). +pub static RELAYER_VERSION: Mutex> = Mutex::new(None); + async_std::task_local! { pub(crate) static LOOP_NAME: RefCell = RefCell::new(String::default()); } diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 0e15304ff11234d6c1871511fc8c3237830fe9a4..e60934e34740600e78b9973d66ce648751a55138 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } serde_json = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } diff --git a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs index c1b9e19729bc47588ad179db0c9c280a05834bc1..6a5972ca7a142e4a5842d506fce5a87991e25f3a 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/lib.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/lib.rs @@ -104,6 +104,7 @@ pub mod pallet { #[pallet::error] pub enum Error { SkippedSyncCommitteePeriod, + SyncCommitteeUpdateRequired, /// Attested header is older than latest finalized header. IrrelevantUpdate, NotBootstrapped, @@ -138,41 +139,39 @@ pub mod pallet { /// Latest imported checkpoint root #[pallet::storage] #[pallet::getter(fn initial_checkpoint_root)] - pub(super) type InitialCheckpointRoot = StorageValue<_, H256, ValueQuery>; + pub type InitialCheckpointRoot = StorageValue<_, H256, ValueQuery>; /// Latest imported finalized block root #[pallet::storage] #[pallet::getter(fn latest_finalized_block_root)] - pub(super) type LatestFinalizedBlockRoot = StorageValue<_, H256, ValueQuery>; + pub type LatestFinalizedBlockRoot = StorageValue<_, H256, ValueQuery>; /// Beacon state by finalized block root #[pallet::storage] #[pallet::getter(fn finalized_beacon_state)] - pub(super) type FinalizedBeaconState = + pub type FinalizedBeaconState = StorageMap<_, Identity, H256, CompactBeaconState, OptionQuery>; /// Finalized Headers: Current position in ring buffer #[pallet::storage] - pub(crate) type FinalizedBeaconStateIndex = StorageValue<_, u32, ValueQuery>; + pub type FinalizedBeaconStateIndex = StorageValue<_, u32, ValueQuery>; /// Finalized Headers: Mapping of ring buffer index to a pruning candidate #[pallet::storage] - pub(crate) type FinalizedBeaconStateMapping = + pub type FinalizedBeaconStateMapping = StorageMap<_, Identity, u32, H256, ValueQuery>; #[pallet::storage] #[pallet::getter(fn validators_root)] - pub(super) type ValidatorsRoot = StorageValue<_, H256, ValueQuery>; + pub type ValidatorsRoot = StorageValue<_, H256, ValueQuery>; /// Sync committee for current period #[pallet::storage] - pub(super) type CurrentSyncCommittee = - StorageValue<_, SyncCommitteePrepared, ValueQuery>; + pub type CurrentSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; /// Sync committee for next period #[pallet::storage] - pub(super) type NextSyncCommittee = - StorageValue<_, SyncCommitteePrepared, ValueQuery>; + pub type NextSyncCommittee = StorageValue<_, SyncCommitteePrepared, ValueQuery>; /// The current operating mode of the pallet. #[pallet::storage] @@ -320,6 +319,7 @@ pub mod pallet { // Verify update is relevant. let update_attested_period = compute_period(update.attested_header.slot); + let update_finalized_period = compute_period(update.finalized_header.slot); let update_has_next_sync_committee = !>::exists() && (update.next_sync_committee_update.is_some() && update_attested_period == store_period); @@ -395,6 +395,11 @@ pub mod pallet { ), Error::::InvalidSyncCommitteeMerkleProof ); + } else { + ensure!( + update_finalized_period == store_period, + Error::::SyncCommitteeUpdateRequired + ); } // Verify sync committee aggregate signature. diff --git a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs index 765958c128212ee279bf547eb864335c7921e2eb..da762dc2fd8071969a1f68c707a0145aa0803852 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/tests.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/tests.rs @@ -362,13 +362,14 @@ fn submit_update_with_sync_committee_in_current_period() { } #[test] -fn submit_update_in_next_period() { +fn reject_submit_update_in_next_period() { let checkpoint = Box::new(load_checkpoint_update_fixture()); let sync_committee_update = Box::new(load_sync_committee_update_fixture()); let update = Box::new(load_next_finalized_header_update_fixture()); let sync_committee_period = compute_period(sync_committee_update.finalized_header.slot); let next_sync_committee_period = compute_period(update.finalized_header.slot); assert_eq!(sync_committee_period + 1, next_sync_committee_period); + let next_sync_committee_update = Box::new(load_next_sync_committee_update_fixture()); new_tester().execute_with(|| { assert_ok!(EthereumBeaconClient::process_checkpoint_update(&checkpoint)); @@ -376,6 +377,17 @@ fn submit_update_in_next_period() { RuntimeOrigin::signed(1), sync_committee_update.clone() )); + // check an update in the next period is rejected + assert_err!( + EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone()), + Error::::SyncCommitteeUpdateRequired + ); + // submit update with next sync committee + assert_ok!(EthereumBeaconClient::submit( + RuntimeOrigin::signed(1), + next_sync_committee_update + )); + // check same header in the next period can now be submitted successfully assert_ok!(EthereumBeaconClient::submit(RuntimeOrigin::signed(1), update.clone())); let block_root: H256 = update.finalized_header.clone().hash_tree_root().unwrap(); assert!(>::contains_key(block_root)); diff --git a/bridges/snowbridge/pallets/ethereum-client/src/types.rs b/bridges/snowbridge/pallets/ethereum-client/src/types.rs index 8808f989754b240d207101cbeff6fe25fe74279a..92b9f77f739b062b89de2685db91e7f8355a2722 100644 --- a/bridges/snowbridge/pallets/ethereum-client/src/types.rs +++ b/bridges/snowbridge/pallets/ethereum-client/src/types.rs @@ -18,7 +18,7 @@ pub type NextSyncCommitteeUpdate = primitives::NextSyncCommitteeUpdate; pub use primitives::{AncestryProof, ExecutionProof}; /// FinalizedState ring buffer implementation -pub(crate) type FinalizedBeaconStateBuffer = RingBufferMapImpl< +pub type FinalizedBeaconStateBuffer = RingBufferMapImpl< u32, crate::MaxFinalizedHeadersToKeep, crate::FinalizedBeaconStateIndex, diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 71d49e684e0b9379778f45743f6427a41796fd8a..d63398770f207051ebb5adb72f4f574c767e8770 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { optional = true, workspace = true, default-features = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index c96c868bc26ef71984e70bc40863db73cf18b9c5..05481ca2f6b439a82ba0065075c879725f2c3818 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -43,10 +43,6 @@ frame_support::construct_runtime!( pub type Signature = MultiSignature; pub type AccountId = <::Signer as IdentifyAccount>::AccountId; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - type Balance = u128; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -60,7 +56,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type Nonce = u64; diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 387491abae0f652d63c55d50279d4145dc03b18d..15c6c3a5b32b0fc2bd1a95fd842bab78f07a697a 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 2d58517c18b0c55ff96e612948c7151c5dfc6ba8..1b1a9905928f8b5ea8eaccc15d18813f87406494 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.1.5", package = "parity-scale-codec", default-features = false, features = ["derive"] } +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } diff --git a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml index fa49cc0f29a0a161838ab62a1210e33a79f32c59..b8d704f1cb92d570ea8e8b06cd00410bea7746bb 100644 --- a/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/runtime-api/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.1.5", package = "parity-scale-codec", features = ["derive"], default-features = false } +codec = { version = "3.6.12", package = "parity-scale-codec", features = ["derive"], default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } diff --git a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs index 5eeeeead140018e81a8aaaf302d678e3a4321e08..d65a96e2702dfbc69e52a913964de230bcdeadd0 100644 --- a/bridges/snowbridge/pallets/outbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/outbound-queue/src/mock.rs @@ -33,10 +33,6 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; @@ -48,7 +44,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type PalletInfo = PalletInfo; type Nonce = u64; type Block = Block; diff --git a/bridges/snowbridge/pallets/system/Cargo.toml b/bridges/snowbridge/pallets/system/Cargo.toml index c1ee44214c84a81dd9c4727bf81be93d5375c50c..5bbbb1d9310da4c3617ec4b03ea63620c30feb20 100644 --- a/bridges/snowbridge/pallets/system/Cargo.toml +++ b/bridges/snowbridge/pallets/system/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml index 4073dc0f71c2096edce47a0857cd70cf969affed..42df5edfb7b2d4e5abaf0e30850ecbd3ebd04b98 100644 --- a/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml +++ b/bridges/snowbridge/pallets/system/runtime-api/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } diff --git a/bridges/snowbridge/pallets/system/src/mock.rs b/bridges/snowbridge/pallets/system/src/mock.rs index 687072a49e2e5ca9e27d7f1be94ea89564c2275f..d7fc4152b371025687d2a36cbd49e628c88205fc 100644 --- a/bridges/snowbridge/pallets/system/src/mock.rs +++ b/bridges/snowbridge/pallets/system/src/mock.rs @@ -3,7 +3,7 @@ use crate as snowbridge_system; use frame_support::{ derive_impl, parameter_types, - traits::{tokens::fungible::Mutate, ConstU128, ConstU64, ConstU8}, + traits::{tokens::fungible::Mutate, ConstU128, ConstU8}, weights::IdentityFee, PalletId, }; @@ -106,7 +106,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; type Nonce = u64; diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 7d901bcdb04493ea3103150c7dfeff0a32814269..18123910c35b2e198ec03ca1aa01aef1ea0d96ca 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } hex = { version = "0.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } rlp = { version = "0.5", default-features = false } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 9a299ad0ae92326a6d0bb0391baf81e6e5bad663..573ab6608e5f91c0333f5ee7288cb679d6c38fb6 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index d72cd2661733b516a6a9b8102782b4d8d5c40e36..fb0b6cbaf3c2fba82c709fbc84ca565c53e7505e 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] serde = { optional = true, features = ["derive"], workspace = true, default-features = true } serde-big-array = { optional = true, features = ["const-generics"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ethbloom = { version = "0.13.0", default-features = false } ethereum-types = { version = "0.14.1", default-features = false, features = ["codec", "rlp", "serialize"] } diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index 361b539af3e350a29871e82a76265de072bbf779..1d3fc43909df46a20f6e030985c2a7eea189fdc4 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } log = { workspace = true } diff --git a/bridges/snowbridge/runtime/runtime-common/Cargo.toml b/bridges/snowbridge/runtime/runtime-common/Cargo.toml index 995475349e4cb62045c613596acaf659eabd7076..2372908b86ab5134f4ea0f8373ffe00cbcc2bd32 100644 --- a/bridges/snowbridge/runtime/runtime-common/Cargo.toml +++ b/bridges/snowbridge/runtime/runtime-common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } frame-support = { path = "../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-arithmetic = { path = "../../../../substrate/primitives/arithmetic", default-features = false } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 7cbb38574034c0b4c7f8b0bf82866c0392c82a10..e19c682de4542994e19e20d0c194598fc8009db5 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -11,7 +11,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate frame-support = { path = "../../../../substrate/frame/support", default-features = false } @@ -32,7 +32,7 @@ xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-feat xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../cumulus/pallets/parachain-system", default-features = false } pallet-collator-selection = { path = "../../../../cumulus/pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../../cumulus/parachains/pallets/parachain-info", default-features = false } parachains-runtimes-test-utils = { path = "../../../../cumulus/parachains/runtimes/test-utils", default-features = false } diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 57a3e08502f2da8efb13c45e6982bba7353cf2ac..ef4a5597902fdc61caedd071f408f03a87a19ea0 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -175,11 +175,9 @@ function run_finality_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-headers rococo-to-bridge-hub-westend \ --only-free-headers \ - --source-host localhost \ - --source-port 9942 \ + --source-uri ws://localhost:9942 \ --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ + --target-uri ws://localhost:8945 \ --target-version-mode Auto \ --target-signer //Charlie \ --target-transactions-mortality 4& @@ -187,11 +185,9 @@ function run_finality_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-headers westend-to-bridge-hub-rococo \ --only-free-headers \ - --source-host localhost \ - --source-port 9945 \ + --source-uri ws://localhost:9945 \ --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ + --target-uri ws://localhost:8943 \ --target-version-mode Auto \ --target-signer //Charlie \ --target-transactions-mortality 4 @@ -203,11 +199,9 @@ function run_parachains_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-parachains rococo-to-bridge-hub-westend \ --only-free-headers \ - --source-host localhost \ - --source-port 9942 \ + --source-uri ws://localhost:9942 \ --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ + --target-uri ws://localhost:8945 \ --target-version-mode Auto \ --target-signer //Dave \ --target-transactions-mortality 4& @@ -215,11 +209,9 @@ function run_parachains_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-parachains westend-to-bridge-hub-rococo \ --only-free-headers \ - --source-host localhost \ - --source-port 9945 \ + --source-uri ws://localhost:9945 \ --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ + --target-uri ws://localhost:8943 \ --target-version-mode Auto \ --target-signer //Dave \ --target-transactions-mortality 4 @@ -230,13 +222,11 @@ function run_messages_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-messages bridge-hub-rococo-to-bridge-hub-westend \ - --source-host localhost \ - --source-port 8943 \ + --source-uri ws://localhost:8943 \ --source-version-mode Auto \ --source-signer //Eve \ --source-transactions-mortality 4 \ - --target-host localhost \ - --target-port 8945 \ + --target-uri ws://localhost:8945 \ --target-version-mode Auto \ --target-signer //Eve \ --target-transactions-mortality 4 \ @@ -244,13 +234,11 @@ function run_messages_relay() { RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ $relayer_path relay-messages bridge-hub-westend-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 8945 \ + --source-uri ws://localhost:8945 \ --source-version-mode Auto \ --source-signer //Ferdie \ --source-transactions-mortality 4 \ - --target-host localhost \ - --target-port 8943 \ + --target-uri ws://localhost:8943 \ --target-version-mode Auto \ --target-signer //Ferdie \ --target-transactions-mortality 4 \ diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 0b2edb593c405bef266fa7e389a996fe3a2fcec1..410ac8b983d96f0a38633ac0199208a4e249e49b 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } url = "2.4.0" # Substrate diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 42f7342d1a5381a6174c15909b815485eb7c8a7e..39cedf87a0cb1b6fb8296c1a3bdec1483170af38 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" tracing = "0.1.25" diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 70dd67cb9a00b2e7b0baf04dbfdaaff0386104e5..547137b7306460d91c10c3222c27d70f68d6e15d 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" schnellru = "0.2.1" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index fb4a85ad122637470a81371eb12352a14ac7c61e..3a7c6b57d6d931b8809e9be9fda4cf4e07e50b8c 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } dyn-clone = "1.0.16" futures = "0.3.28" log = { workspace = true, default-features = true } diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 1210975ef690c4d110a7f96b2e88e4d151edfa87..d4fc752872589fbfd361f5df49939874d208ab3d 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" parking_lot = "0.12.1" diff --git a/cumulus/client/parachain-inherent/Cargo.toml b/cumulus/client/parachain-inherent/Cargo.toml index 6e9adab1ffc9e2bf9de9d8101eb38e4da8212b27..85619e8403458c0bfa3dae6dadc688f2cb895731 100644 --- a/cumulus/client/parachain-inherent/Cargo.toml +++ b/cumulus/client/parachain-inherent/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } tracing = { version = "0.1.37" } diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index 22691006f93ed264e3e7d37b7b2120ea576e9da3..dfe4a66c3dc19665d413fed47434d803d6ed63af 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -28,6 +28,9 @@ use std::collections::BTreeMap; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +/// Relay chain slot duration, in milliseconds. +pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// Inherent data provider that supplies mocked validation data. /// /// This is useful when running a node that is not actually backed by any relay chain. @@ -43,10 +46,14 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; /// your parachain's configuration in order to mock the MQC heads properly. /// See [`MockXcmConfig`] for more information pub struct MockValidationDataInherentDataProvider { - /// The current block number of the local block chain (the parachain) + /// The current block number of the local block chain (the parachain). pub current_para_block: u32, + /// The parachain ID of the parachain for that the inherent data is created. + pub para_id: ParaId, + /// The current block head data of the local block chain (the parachain). + pub current_para_block_head: Option, /// The relay block in which this parachain appeared to start. This will be the relay block - /// number in para block #P1 + /// number in para block #P1. pub relay_offset: u32, /// The number of relay blocks that elapses between each parablock. Probably set this to 1 or 2 /// to simulate optimistic or realistic relay chain behavior. @@ -54,19 +61,21 @@ pub struct MockValidationDataInherentDataProvider { /// Number of parachain blocks per relay chain epoch /// Mock epoch is computed by dividing `current_para_block` by this value. pub para_blocks_per_relay_epoch: u32, - /// Function to mock BABE one epoch ago randomness + /// Function to mock BABE one epoch ago randomness. pub relay_randomness_config: R, /// XCM messages and associated configuration information. pub xcm_config: MockXcmConfig, /// Inbound downward XCM messages to be injected into the block. pub raw_downward_messages: Vec>, - // Inbound Horizontal messages sorted by channel + // Inbound Horizontal messages sorted by channel. pub raw_horizontal_messages: Vec<(ParaId, Vec)>, // Additional key-value pairs that should be injected. pub additional_key_values: Option, Vec)>>, } +/// Something that can generate randomness. pub trait GenerateRandomness { + /// Generate the randomness using the given `input`. fn generate_randomness(&self, input: I) -> relay_chain::Hash; } @@ -86,8 +95,6 @@ impl GenerateRandomness for () { /// parachain's storage, and the corresponding relay data mocked. #[derive(Default)] pub struct MockXcmConfig { - /// The parachain id of the parachain being mocked. - pub para_id: ParaId, /// The starting state of the dmq_mqc_head. pub starting_dmq_mqc_head: relay_chain::Hash, /// The starting state of each parachain's mqc head @@ -114,7 +121,6 @@ impl MockXcmConfig { pub fn new, C: StorageProvider>( client: &C, parent_block: B::Hash, - para_id: ParaId, parachain_system_name: ParachainSystemName, ) -> Self { let starting_dmq_mqc_head = client @@ -147,7 +153,7 @@ impl MockXcmConfig { }) .unwrap_or_default(); - Self { para_id, starting_dmq_mqc_head, starting_hrmp_mqc_heads } + Self { starting_dmq_mqc_head, starting_hrmp_mqc_heads } } } @@ -159,13 +165,15 @@ impl> InherentDataProvider &self, inherent_data: &mut InherentData, ) -> Result<(), sp_inherents::Error> { + // Use the "sproof" (spoof proof) builder to build valid mock state root and proof. + let mut sproof_builder = + RelayStateSproofBuilder { para_id: self.para_id, ..Default::default() }; + // Calculate the mocked relay block based on the current para block let relay_parent_number = self.relay_offset + self.relay_blocks_per_para_block * self.current_para_block; - - // Use the "sproof" (spoof proof) builder to build valid mock state root and proof. - let mut sproof_builder = - RelayStateSproofBuilder { para_id: self.xcm_config.para_id, ..Default::default() }; + sproof_builder.current_slot = + ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); // Process the downward messages and set up the correct head let mut downward_messages = Vec::new(); @@ -217,6 +225,9 @@ impl> InherentDataProvider sproof_builder.additional_key_values = key_values.clone() } + // Inject current para block head, if any + sproof_builder.included_para_head = self.current_para_block_head.clone(); + let (relay_parent_storage_root, proof) = sproof_builder.into_state_root_and_proof(); inherent_data.put_data( diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index 571935620d6d90b3aa1157124a2bae6fd73a19c2..7afe7fae34bd799ed75d557ad5f0ca5067743f7f 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -10,7 +10,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" rand = "0.8.5" diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 2c635320ff4ae6f68f33bb9da5ca545098851f65..c41c543f04d1f0d3f3a9fe7356dc7ab3ed5e5f11 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -56,6 +56,7 @@ impl ActiveCandidateRecovery { candidate.receipt.clone(), candidate.session_index, None, + None, tx, ), "ActiveCandidateRecovery", diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 6df9847252fecf8e4bdcdec0ff9f830d6530668b..5962c68bba7a561b05a1710c2477b31964bbfe07 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -23,4 +23,4 @@ futures = "0.3.28" async-trait = "0.1.79" thiserror = { workspace = true } jsonrpsee-core = "0.22" -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index b84427c3a75a55e30d9867cbe8d45522a2d2f8c0..699393e2d48a7d6a0260246e9cef28b168c4a33a 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -285,5 +285,8 @@ fn build_request_response_protocol_receivers< let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config::<_, Network>(request_protocol_names); config.add_request_response_protocol(cfg); + let cfg = + Protocol::ChunkFetchingV2.get_outbound_only_config::<_, Network>(request_protocol_names); + config.add_request_response_protocol(cfg); (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 14981677289561040875b0951dbdbffb5854a439..2ec42ebca851e4d30a9ac17acd923f005956c287 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -32,7 +32,7 @@ tokio-util = { version = "0.7.8", features = ["compat"] } futures = "0.3.28" futures-timer = "3.0.2" -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.79" diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index fe717596f9b307993f968ea0e7b58faef591dcf7..daff5ef8f482e82b80f341d5208cad51b23b7b1a 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 25ca2fe057baf2a3e9c313556961615a3b14c2f7..f30802fa5d82ecb93e8610e7c7bb17a2a83cacb4 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.0.0" } +codec = { default-features = false, features = ["derive"], package = "parity-scale-codec", version = "3.6.12" } rand = { version = "0.8.5", features = ["std_rng"], default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 4a440dfe1e92f26685415d260e0af9b1eb56a9a4..196184d62781e4a79968ed9834460e6bc39b0f98 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -46,7 +46,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -65,7 +64,6 @@ impl system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index b2b24aeed72ba28ea320a90b8e725ce983fa1a07..687cda164fb0bd3d4aefb9d6b51f6735ef3a43c3 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -14,7 +14,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/dmp-queue/src/lib.rs b/cumulus/pallets/dmp-queue/src/lib.rs index 79cc4bc895ec29b87eb95e629e0a9624fd7f62a5..9b3ec684febab81fb535c52118cbadb64b61e984 100644 --- a/cumulus/pallets/dmp-queue/src/lib.rs +++ b/cumulus/pallets/dmp-queue/src/lib.rs @@ -21,6 +21,7 @@ //! from the runtime once `Completed` was emitted. #![cfg_attr(not(feature = "std"), no_std)] +#![allow(deprecated)] // The pallet itself is deprecated. use migration::*; pub use pallet::*; @@ -38,6 +39,9 @@ pub type MaxDmpMessageLenOf = <::DmpSink as frame_support::traits::HandleMessage>::MaxMessageLen; #[frame_support::pallet] +#[deprecated( + note = "`cumulus-pallet-dmp-queue` will be removed after November 2024. It can be removed once its lazy migration completed. See ." +)] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::HandleMessage, weights::WeightMeter}; diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 57e274db361de98533fd258c554abcff498060b3..1a6a19f2ab4a2523074bd02aa96053a0f7236140 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bytes = { version = "1.4.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } environmental = { version = "1.1.4", default-features = false } impl-trait-for-tuples = "0.2.1" log = { workspace = true } @@ -122,5 +122,3 @@ try-runtime = [ "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] - -parameterized-consensus-hook = [] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index c8e7d1bb30f7166d50ab9aed8d1dfd0526e8f36e..bbb74a1b053886b90740562e077ce40804eb01fd 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -30,7 +30,7 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, - GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, MessageSendError, + GetChannelInfo, InboundDownwardMessage, InboundHrmpMessage, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; @@ -245,10 +245,6 @@ pub mod pallet { /// [`consensus_hook::ExpectParentIncluded`] here. This is only necessary in the case /// that collators aren't expected to have node versions that supply the included block /// in the relay-chain state proof. - /// - /// This config type is only available when the `parameterized-consensus-hook` crate feature - /// is activated. - #[cfg(feature = "parameterized-consensus-hook")] type ConsensusHook: ConsensusHook; } @@ -556,10 +552,8 @@ pub mod pallet { .expect("Invalid relay chain state proof"); // Update the desired maximum capacity according to the consensus hook. - #[cfg(feature = "parameterized-consensus-hook")] - let (consensus_hook_weight, capacity) = T::ConsensusHook::on_state_proof(&relay_state_proof); - #[cfg(not(feature = "parameterized-consensus-hook"))] - let (consensus_hook_weight, capacity) = ExpectParentIncluded::on_state_proof(&relay_state_proof); + let (consensus_hook_weight, capacity) = + T::ConsensusHook::on_state_proof(&relay_state_proof); total_weight += consensus_hook_weight; total_weight += Self::maybe_drop_included_ancestors(&relay_state_proof, capacity); // Deposit a log indicating the relay-parent storage root. @@ -749,14 +743,13 @@ pub mod pallet { /// The segment length is limited by the capacity returned from the [`ConsensusHook`] configured /// in the pallet. #[pallet::storage] - pub(super) type UnincludedSegment = - StorageValue<_, Vec>, ValueQuery>; + pub type UnincludedSegment = StorageValue<_, Vec>, ValueQuery>; /// Storage field that keeps track of bandwidth used by the unincluded segment along with the /// latest HRMP watermark. Used for limiting the acceptance of new blocks with /// respect to relay chain constraints. #[pallet::storage] - pub(super) type AggregatedUnincludedSegment = + pub type AggregatedUnincludedSegment = StorageValue<_, SegmentTracker, OptionQuery>; /// In case of a scheduled upgrade, this storage field contains the validation code to be @@ -766,7 +759,7 @@ pub mod pallet { /// [`:code`][sp_core::storage::well_known_keys::CODE] which will result the next block process /// with the new validation code. This concludes the upgrade process. #[pallet::storage] - pub(super) type PendingValidationCode = StorageValue<_, Vec, ValueQuery>; + pub type PendingValidationCode = StorageValue<_, Vec, ValueQuery>; /// Validation code that is set by the parachain and is to be communicated to collator and /// consequently the relay-chain. @@ -774,23 +767,23 @@ pub mod pallet { /// This will be cleared in `on_initialize` of each new block if no other pallet already set /// the value. #[pallet::storage] - pub(super) type NewValidationCode = StorageValue<_, Vec, OptionQuery>; + pub type NewValidationCode = StorageValue<_, Vec, OptionQuery>; /// The [`PersistedValidationData`] set for this block. /// This value is expected to be set only once per block and it's never stored /// in the trie. #[pallet::storage] - pub(super) type ValidationData = StorageValue<_, PersistedValidationData>; + pub type ValidationData = StorageValue<_, PersistedValidationData>; /// Were the validation data set to notify the relay chain? #[pallet::storage] - pub(super) type DidSetValidationCode = StorageValue<_, bool, ValueQuery>; + pub type DidSetValidationCode = StorageValue<_, bool, ValueQuery>; /// The relay chain block number associated with the last parachain block. /// /// This is updated in `on_finalize`. #[pallet::storage] - pub(super) type LastRelayChainBlockNumber = + pub type LastRelayChainBlockNumber = StorageValue<_, RelayChainBlockNumber, ValueQuery>; /// An option which indicates if the relay-chain restricts signalling a validation code upgrade. @@ -801,7 +794,7 @@ pub mod pallet { /// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is /// set after the inherent. #[pallet::storage] - pub(super) type UpgradeRestrictionSignal = + pub type UpgradeRestrictionSignal = StorageValue<_, Option, ValueQuery>; /// Optional upgrade go-ahead signal from the relay-chain. @@ -810,7 +803,7 @@ pub mod pallet { /// relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is /// set after the inherent. #[pallet::storage] - pub(super) type UpgradeGoAhead = + pub type UpgradeGoAhead = StorageValue<_, Option, ValueQuery>; /// The state proof for the last relay parent block. @@ -820,7 +813,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] - pub(super) type RelayStateProof = StorageValue<_, sp_trie::StorageProof>; + pub type RelayStateProof = StorageValue<_, sp_trie::StorageProof>; /// The snapshot of some state related to messaging relevant to the current parachain as per /// the relay parent. @@ -830,7 +823,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] - pub(super) type RelevantMessagingState = StorageValue<_, MessagingStateSnapshot>; + pub type RelevantMessagingState = StorageValue<_, MessagingStateSnapshot>; /// The parachain host configuration that was obtained from the relay parent. /// @@ -840,53 +833,51 @@ pub mod pallet { /// This data is also absent from the genesis. #[pallet::storage] #[pallet::disable_try_decode_storage] - pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; + pub type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; /// The last downward message queue chain head we have observed. /// /// This value is loaded before and saved after processing inbound downward messages carried /// by the system inherent. #[pallet::storage] - pub(super) type LastDmqMqcHead = StorageValue<_, MessageQueueChain, ValueQuery>; + pub type LastDmqMqcHead = StorageValue<_, MessageQueueChain, ValueQuery>; /// The message queue chain heads we have observed per each channel incoming channel. /// /// This value is loaded before and saved after processing inbound downward messages carried /// by the system inherent. #[pallet::storage] - pub(super) type LastHrmpMqcHeads = + pub type LastHrmpMqcHeads = StorageValue<_, BTreeMap, ValueQuery>; /// Number of downward messages processed in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type ProcessedDownwardMessages = StorageValue<_, u32, ValueQuery>; + pub type ProcessedDownwardMessages = StorageValue<_, u32, ValueQuery>; /// HRMP watermark that was set in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type HrmpWatermark = - StorageValue<_, relay_chain::BlockNumber, ValueQuery>; + pub type HrmpWatermark = StorageValue<_, relay_chain::BlockNumber, ValueQuery>; /// HRMP messages that were sent in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type HrmpOutboundMessages = + pub type HrmpOutboundMessages = StorageValue<_, Vec, ValueQuery>; /// Upward messages that were sent in a block. /// /// This will be cleared in `on_initialize` of each new block. #[pallet::storage] - pub(super) type UpwardMessages = StorageValue<_, Vec, ValueQuery>; + pub type UpwardMessages = StorageValue<_, Vec, ValueQuery>; /// Upward messages that are still pending and not yet send to the relay chain. #[pallet::storage] - pub(super) type PendingUpwardMessages = - StorageValue<_, Vec, ValueQuery>; + pub type PendingUpwardMessages = StorageValue<_, Vec, ValueQuery>; /// Initialization value for the delivery fee factor for UMP. #[pallet::type_value] @@ -896,29 +887,29 @@ pub mod pallet { /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] - pub(super) type UpwardDeliveryFeeFactor = + pub type UpwardDeliveryFeeFactor = StorageValue<_, FixedU128, ValueQuery, UpwardInitialDeliveryFeeFactor>; /// The number of HRMP messages we observed in `on_initialize` and thus used that number for /// announcing the weight of `on_initialize` and `on_finalize`. #[pallet::storage] - pub(super) type AnnouncedHrmpMessagesPerCandidate = StorageValue<_, u32, ValueQuery>; + pub type AnnouncedHrmpMessagesPerCandidate = StorageValue<_, u32, ValueQuery>; /// The weight we reserve at the beginning of the block for processing XCMP messages. This /// overrides the amount set in the Config trait. #[pallet::storage] - pub(super) type ReservedXcmpWeightOverride = StorageValue<_, Weight>; + pub type ReservedXcmpWeightOverride = StorageValue<_, Weight>; /// The weight we reserve at the beginning of the block for processing DMP messages. This /// overrides the amount set in the Config trait. #[pallet::storage] - pub(super) type ReservedDmpWeightOverride = StorageValue<_, Weight>; + pub type ReservedDmpWeightOverride = StorageValue<_, Weight>; /// A custom head data that should be returned as result of `validate_block`. /// /// See `Pallet::set_custom_validation_head_data` for more information. #[pallet::storage] - pub(super) type CustomValidationHeadData = StorageValue<_, Vec, OptionQuery>; + pub type CustomValidationHeadData = StorageValue<_, Vec, OptionQuery>; #[pallet::inherent] impl ProvideInherent for Pallet { @@ -1022,6 +1013,13 @@ impl FeeTracker for Pallet { } } +impl ListChannelInfos for Pallet { + fn outgoing_channels() -> Vec { + let Some(state) = RelevantMessagingState::::get() else { return Vec::new() }; + state.egress_channels.into_iter().map(|(id, _)| id).collect() + } +} + impl GetChannelInfo for Pallet { fn get_channel_status(id: ParaId) -> ChannelStatus { // Note, that we are using `relevant_messaging_state` which may be from the previous @@ -1632,10 +1630,8 @@ impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pall } /// Something that can check the inherents of a block. -#[cfg_attr( - feature = "parameterized-consensus-hook", - deprecated = "consider switching to `cumulus-pallet-parachain-system::ConsensusHook`" -)] +#[deprecated(note = "This trait is deprecated and will be removed by September 2024. \ + Consider switching to `cumulus-pallet-parachain-system::ConsensusHook`")] pub trait CheckInherents { /// Check all inherents of the block. /// diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index fe89dfe68c67e4b5d9f9e046dcf2a780a292da0a..da904c0079a00a39dcca24c2d408d0bb381b2252 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -55,7 +55,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub Version: RuntimeVersion = RuntimeVersion { spec_name: sp_version::create_runtime_str!("test"), impl_name: sp_version::create_runtime_str!("system-test"), @@ -74,7 +73,6 @@ parameter_types! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = BlockHashCount; type Version = Version; type OnSetCode = ParachainSetCode; } @@ -122,7 +120,7 @@ impl pallet_message_queue::Config for Test { type Size = u32; type QueueChangeHandler = (); type QueuePausedQuery = (); - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MaxWeight; type IdleMaxServiceWeight = (); diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index ecab7a9a09311ae0f952a7842e9f1826b00adc69..956962fce157d3c5d090dd4bd8de78bba4d80405 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -80,6 +80,7 @@ environmental::environmental!(recorder: trait ProofSizeProvider); /// ensuring that the final storage root matches the storage root in the header of the block. In the /// end we return back the [`ValidationResult`] with all the required information for the validator. #[doc(hidden)] +#[allow(deprecated)] pub fn validate_block< B: BlockT, E: ExecuteBlock, @@ -186,6 +187,7 @@ where ) .expect("Invalid relay chain state proof"); + #[allow(deprecated)] let res = CI::check_inherents(&block, &relay_chain_proof); if !res.ok() { diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index 43fde4ea6009cce7d2dedceb815e2948beb80196..001c3d8aceac5a5db27aba87d666fdf83e92021c 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parity-scale-codec = { version = "3.6.4", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index 417038d7833c5ca4ec4acf681cba73d24ab45c5e..17b0fb2a01662d517a49d1bfd669ed071caf0ed7 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index 9122e110fb92e51b0f85e3ba4136d30178d8ab7a..178d981702f2e6dc42d05556e20a86f50106b6ee 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -10,7 +10,7 @@ description = "Pallet for stuff specific to parachains' usage of XCM" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index e3530ef7bf0e1e093ddb85e639a7a3ff757ad2da..87602978521fc363539b2aeb81f5b485dbe409ea 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -48,7 +48,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } frame-support = { path = "../../../substrate/frame/support", features = ["experimental"] } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../parachain-system" } [features] default = ["std"] diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index cc785b66150e52731cdb03d2c41aaad0058d3ef9..5633f05f13bb81370a23512effefaf6ae1fb23fa 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -51,7 +51,7 @@ pub mod weights; pub use weights::WeightInfo; use bounded_collections::BoundedBTreeSet; -use codec::{Decode, DecodeLimit, Encode}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use cumulus_primitives_core::{ relay_chain::BlockNumber as RelayBlockNumber, ChannelStatus, GetChannelInfo, MessageSendError, ParaId, XcmpMessageFormat, XcmpMessageHandler, XcmpMessageSource, @@ -59,7 +59,7 @@ use cumulus_primitives_core::{ use frame_support::{ defensive, defensive_assert, - traits::{EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, + traits::{Defensive, EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, weights::{Weight, WeightMeter}, BoundedVec, }; @@ -68,7 +68,7 @@ use polkadot_runtime_common::xcm_sender::PriceForMessageDelivery; use polkadot_runtime_parachains::FeeTracker; use scale_info::TypeInfo; use sp_core::MAX_POSSIBLE_ALLOCATION; -use sp_runtime::{FixedU128, RuntimeDebug, Saturating}; +use sp_runtime::{FixedU128, RuntimeDebug, Saturating, WeakBoundedVec}; use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm, WrapVersion, MAX_XCM_DECODE_DEPTH}; use xcm_builder::InspectMessageQueues; @@ -106,7 +106,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::storage_version(migration::STORAGE_VERSION)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] @@ -133,6 +132,25 @@ pub mod pallet { #[pallet::constant] type MaxInboundSuspended: Get; + /// Maximal number of outbound XCMP channels that can have messages queued at the same time. + /// + /// If this is reached, then no further messages can be sent to channels that do not yet + /// have a message queued. This should be set to the expected maximum of outbound channels + /// which is determined by [`Self::ChannelInfo`]. It is important to set this large enough, + /// since otherwise the congestion control protocol will not work as intended and messages + /// may be dropped. This value increases the PoV and should therefore not be picked too + /// high. Governance needs to pay attention to not open more channels than this value. + #[pallet::constant] + type MaxActiveOutboundChannels: Get; + + /// The maximal page size for HRMP message pages. + /// + /// A lower limit can be set dynamically, but this is the hard-limit for the PoV worst case + /// benchmarking. The limit for the size of a message is slightly below this, since some + /// overhead is incurred for encoding the format. + #[pallet::constant] + type MaxPageSize: Get; + /// The origin that is allowed to resume or suspend the XCMP queue. type ControllerOrigin: EnsureOrigin; @@ -277,6 +295,10 @@ pub mod pallet { AlreadySuspended, /// The execution is already resumed. AlreadyResumed, + /// There are too many active outbound channels. + TooManyActiveOutboundChannels, + /// The message is too big. + TooBig, } /// The suspended inbound XCMP channels. All others are not suspended. @@ -298,19 +320,28 @@ pub mod pallet { /// case of the need to send a high-priority signal message this block. /// The bool is true if there is a signal message waiting to be sent. #[pallet::storage] - pub(super) type OutboundXcmpStatus = - StorageValue<_, Vec, ValueQuery>; + pub(super) type OutboundXcmpStatus = StorageValue< + _, + BoundedVec, + ValueQuery, + >; - // The new way of doing it: /// The messages outbound in a given XCMP channel. #[pallet::storage] - pub(super) type OutboundXcmpMessages = - StorageDoubleMap<_, Blake2_128Concat, ParaId, Twox64Concat, u16, Vec, ValueQuery>; + pub(super) type OutboundXcmpMessages = StorageDoubleMap< + _, + Blake2_128Concat, + ParaId, + Twox64Concat, + u16, + WeakBoundedVec, + ValueQuery, + >; /// Any signal messages waiting to be sent. #[pallet::storage] pub(super) type SignalMessages = - StorageMap<_, Blake2_128Concat, ParaId, Vec, ValueQuery>; + StorageMap<_, Blake2_128Concat, ParaId, WeakBoundedVec, ValueQuery>; /// The configuration which controls the dynamics of the outbound queue. #[pallet::storage] @@ -332,15 +363,14 @@ pub mod pallet { StorageMap<_, Twox64Concat, ParaId, FixedU128, ValueQuery, InitialFactor>; } -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum OutboundState { Ok, Suspended, } /// Struct containing detailed information about the outbound channel. -#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug))] +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, RuntimeDebug, MaxEncodedLen)] pub struct OutboundChannelDetails { /// The `ParaId` of the parachain that this channel is connected with. recipient: ParaId, @@ -376,7 +406,7 @@ impl OutboundChannelDetails { } } -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct QueueConfigData { /// The number of pages which must be in the queue for the other side to be told to suspend /// their sending. @@ -479,7 +509,10 @@ impl Pallet { { details } else { - all_channels.push(OutboundChannelDetails::new(recipient)); + all_channels.try_push(OutboundChannelDetails::new(recipient)).map_err(|e| { + log::error!("Failed to activate HRMP channel: {:?}", e); + MessageSendError::TooManyChannels + })?; all_channels .last_mut() .expect("can't be empty; a new element was just pushed; qed") @@ -504,7 +537,9 @@ impl Pallet { if page.len() + encoded_fragment.len() > max_message_size { return None } - page.extend_from_slice(&encoded_fragment[..]); + for frag in encoded_fragment.iter() { + page.try_push(*frag).ok()?; + } Some(page.len()) }, ) @@ -522,7 +557,10 @@ impl Pallet { new_page.extend_from_slice(&encoded_fragment[..]); let last_page_size = new_page.len(); let number_of_pages = (channel_details.last_index - channel_details.first_index) as u32; - >::insert(recipient, page_index, new_page); + let bounded_page = BoundedVec::::try_from(new_page) + .map_err(|_| MessageSendError::TooBig)?; + let bounded_page = WeakBoundedVec::force_from(bounded_page.into_inner(), None); + >::insert(recipient, page_index, bounded_page); >::put(all_channels); (number_of_pages, last_page_size) }; @@ -544,17 +582,24 @@ impl Pallet { /// Sends a signal to the `dest` chain over XCMP. This is guaranteed to be dispatched on this /// block. - fn send_signal(dest: ParaId, signal: ChannelSignal) { + fn send_signal(dest: ParaId, signal: ChannelSignal) -> Result<(), Error> { let mut s = >::get(); if let Some(details) = s.iter_mut().find(|item| item.recipient == dest) { details.signals_exist = true; } else { - s.push(OutboundChannelDetails::new(dest).with_signals()); + s.try_push(OutboundChannelDetails::new(dest).with_signals()) + .map_err(|_| Error::::TooManyActiveOutboundChannels)?; } - >::mutate(dest, |page| { - *page = (XcmpMessageFormat::Signals, signal).encode(); - }); + + let page = BoundedVec::::try_from( + (XcmpMessageFormat::Signals, signal).encode(), + ) + .map_err(|_| Error::::TooBig)?; + let page = WeakBoundedVec::force_from(page.into_inner(), None); + + >::insert(dest, page); >::put(s); + Ok(()) } fn suspend_channel(target: ParaId) { @@ -564,7 +609,9 @@ impl Pallet { defensive_assert!(ok, "WARNING: Attempt to suspend channel that was not Ok."); details.state = OutboundState::Suspended; } else { - s.push(OutboundChannelDetails::new(target).with_suspended_state()); + if s.try_push(OutboundChannelDetails::new(target).with_suspended_state()).is_err() { + defensive!("Cannot pause channel; too many outbound channels"); + } } }); } @@ -665,18 +712,25 @@ impl OnQueueChanged for Pallet { let suspended = suspended_channels.contains(¶); if suspended && fp.ready_pages <= resume_threshold { - Self::send_signal(para, ChannelSignal::Resume); - - suspended_channels.remove(¶); - >::put(suspended_channels); + if let Err(err) = Self::send_signal(para, ChannelSignal::Resume) { + log::error!("defensive: Could not send resumption signal to inbound channel of sibling {:?}: {:?}; channel remains suspended.", para, err); + } else { + suspended_channels.remove(¶); + >::put(suspended_channels); + } } else if !suspended && fp.ready_pages >= suspend_threshold { log::warn!("XCMP queue for sibling {:?} is full; suspending channel.", para); - Self::send_signal(para, ChannelSignal::Suspend); - if let Err(err) = suspended_channels.try_insert(para) { + if let Err(err) = Self::send_signal(para, ChannelSignal::Suspend) { + // It will retry if `drop_threshold` is not reached, but it could be too late. + log::error!( + "defensive: Could not send suspension signal; future messages may be dropped: {:?}", err + ); + } else if let Err(err) = suspended_channels.try_insert(para) { log::error!("Too many channels suspended; cannot suspend sibling {:?}: {:?}; further messages may be dropped.", para, err); + } else { + >::put(suspended_channels); } - >::put(suspended_channels); } } } @@ -843,7 +897,7 @@ impl XcmpMessageSource for Pallet { // since it's so unlikely then for now we just drop it. defensive!("WARNING: oversize message in queue - dropping"); } else { - result.push((para_id, page)); + result.push((para_id, page.into_inner())); } let max_total_size = match T::ChannelInfo::get_channel_info(para_id) { @@ -891,7 +945,9 @@ impl XcmpMessageSource for Pallet { let pruned = old_statuses_len - statuses.len(); // removing an item from status implies a message being sent, so the result messages must // be no less than the pruned channels. - statuses.rotate_left(result.len().saturating_sub(pruned)); + let _ = statuses.try_rotate_left(result.len().saturating_sub(pruned)).defensive_proof( + "Could not store HRMP channels config. Some HRMP channels may be broken.", + ); >::put(statuses); diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 1702cd70bc2fb7e09cf15fbe21294bdb76b96ef1..b64982a893029f51aeb689d94d54066165ac40a3 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -16,6 +16,8 @@ //! A module that is responsible for migration of storage. +pub mod v5; + use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE}; use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ @@ -25,7 +27,7 @@ use frame_support::{ }; /// The in-code storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/pallets/xcmp-queue/src/migration/v5.rs b/cumulus/pallets/xcmp-queue/src/migration/v5.rs new file mode 100644 index 0000000000000000000000000000000000000000..247adab7108fac8a278ee81827a9e65e27c320f0 --- /dev/null +++ b/cumulus/pallets/xcmp-queue/src/migration/v5.rs @@ -0,0 +1,108 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Migrates the storage to version 5. + +use crate::*; +use cumulus_primitives_core::ListChannelInfos; +use frame_support::{pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade}; + +/// Configs needed to run the V5 migration. +pub trait V5Config: Config { + /// List all outbound channels with their target `ParaId` and maximum message size. + type ChannelList: ListChannelInfos; +} + +/// Ensures that the storage migrates cleanly to V5. +/// +/// The migration itself is a no-op, but it checks that none of the `BoundedVec`s would truncate on +/// the next decode after the upgrade was applied. +pub type MigrateV4ToV5 = frame_support::migrations::VersionedMigration< + 4, + 5, + unversioned::UncheckedMigrateV4ToV5, + Pallet, + ::DbWeight, +>; + +// V4 storage aliases +mod v4 { + use super::*; + + #[frame_support::storage_alias] + pub(super) type OutboundXcmpStatus = + StorageValue, Vec, ValueQuery>; + + #[frame_support::storage_alias] + pub(super) type OutboundXcmpMessages = StorageDoubleMap< + Pallet, + Blake2_128Concat, + ParaId, + Twox64Concat, + u16, + Vec, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(super) type SignalMessages = + StorageMap, Blake2_128Concat, ParaId, Vec, ValueQuery>; +} + +// Private module to hide the migration. +mod unversioned { + /// Please use [`MigrateV4ToV5`] instead. + pub struct UncheckedMigrateV4ToV5(core::marker::PhantomData); +} + +impl UncheckedOnRuntimeUpgrade for unversioned::UncheckedMigrateV4ToV5 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + Default::default() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), sp_runtime::DispatchError> { + // We dont need any front-run protection for this since channels are opened by governance. + ensure!( + v4::OutboundXcmpStatus::::get().len() as u32 <= T::MaxActiveOutboundChannels::get(), + "Too many outbound channels. Close some channels or increase `MaxActiveOutboundChannels`." + ); + + ensure!(T::MaxPageSize::get() >= 16, "Sanity check failed: MaxPageSize too small"); + + // Check if any channels have a too large message max sizes. + let max_msg_len = T::MaxPageSize::get() - XcmpMessageFormat::max_encoded_len() as u32; + for channel in T::ChannelList::outgoing_channels() { + let info = T::ChannelInfo::get_channel_info(channel) + .expect("All listed channels must provide info"); + + if info.max_message_size > max_msg_len { + log::error!( + "Max message size for channel is too large. This means that the V5 \ + migration can be front-run and an attacker could place a large message just right \ + before the migration to make other messages un-decodable. Please either increase \ + `MaxPageSize` or decrease the `max_message_size` for this channel. Channel max: {}, \ + MaxPageSize: {}", + info.max_message_size, + max_msg_len + ); + return Err("Migration can be front-run".into()); + } + } + + Ok(()) + } +} diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index e258576aa3f6dd0342f712498654ef102fb6f321..e166a78ee822097ebadb613ed81dd344a6574bf0 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -52,7 +52,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -73,7 +72,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -277,7 +275,11 @@ impl Config for Test { type ChannelInfo = MockedChannelInfo; type VersionWrapper = (); type XcmpQueue = EnqueueToLocalStorage>; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = SystemParachainAsSuperuser; type WeightInfo = (); @@ -319,10 +321,13 @@ impl GetChannelInfo for MockedChannelInfo { pub(crate) fn mk_page() -> Vec { let mut page = Vec::::new(); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + for i in 0..100 { page.extend(match i % 2 { - 0 => v2_xcm().encode(), - 1 => v3_xcm().encode(), + 0 => versioned_xcm(older_xcm_version).encode(), + 1 => versioned_xcm(newer_xcm_version).encode(), // We cannot push an undecodable XCM here since it would break the decode stream. // This is expected and the whole reason to introduce `MaybeDoubleEncodedVersionedXcm` // instead. @@ -333,12 +338,9 @@ pub(crate) fn mk_page() -> Vec { page } -pub(crate) fn v2_xcm() -> VersionedXcm<()> { - let instr = xcm::v2::Instruction::<()>::ClearOrigin; - VersionedXcm::V2(xcm::v2::Xcm::<()>(vec![instr; 3])) -} - -pub(crate) fn v3_xcm() -> VersionedXcm<()> { - let instr = xcm::v3::Instruction::<()>::Trap(1); - VersionedXcm::V3(xcm::v3::Xcm::<()>(vec![instr; 3])) +pub(crate) fn versioned_xcm(version: XcmVersion) -> VersionedXcm<()> { + let instr = Instruction::<()>::Trap(1); + VersionedXcm::from(Xcm::<()>(vec![instr; 3])) + .into_version(version) + .expect("Version conversion should work") } diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index f48e9eec3ac05b85794a2f18fc279566ad613272..cdf41e27f0b27aa99a8fc73fd42f98b02ee0e48e 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -14,7 +14,7 @@ // limitations under the License. use super::{ - mock::{mk_page, v2_xcm, v3_xcm, EnqueuedMessages, HRMP_PARA_ID}, + mock::{mk_page, versioned_xcm, EnqueuedMessages, HRMP_PARA_ID}, *, }; use XcmpMessageFormat::*; @@ -520,7 +520,7 @@ fn hrmp_signals_are_prioritized() { }); // But a signal gets prioritized instead of the messages: - XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend); + assert_ok!(XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend)); let taken = XcmpQueue::take_outbound_messages(130); assert_eq!( @@ -536,8 +536,8 @@ fn hrmp_signals_are_prioritized() { #[test] fn maybe_double_encoded_versioned_xcm_works() { // pre conditions - assert_eq!(VersionedXcm::<()>::V2(Default::default()).encode(), &[2, 0]); assert_eq!(VersionedXcm::<()>::V3(Default::default()).encode(), &[3, 0]); + assert_eq!(VersionedXcm::<()>::V4(Default::default()).encode(), &[4, 0]); } // Now also testing a page instead of just concat messages. @@ -545,15 +545,18 @@ fn maybe_double_encoded_versioned_xcm_works() { fn maybe_double_encoded_versioned_xcm_decode_page_works() { let page = mk_page(); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + // Now try to decode the page. let input = &mut &page[..]; for i in 0..100 { match (i % 2, VersionedXcm::<()>::decode(input)) { (0, Ok(xcm)) => { - assert_eq!(xcm, v2_xcm()); + assert_eq!(xcm, versioned_xcm(older_xcm_version)); }, (1, Ok(xcm)) => { - assert_eq!(xcm, v3_xcm()); + assert_eq!(xcm, versioned_xcm(newer_xcm_version)); }, unexpected => unreachable!("{:?}", unexpected), } @@ -568,14 +571,17 @@ fn take_first_concatenated_xcm_works() { let page = mk_page(); let input = &mut &page[..]; + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + for i in 0..100 { let xcm = XcmpQueue::take_first_concatenated_xcm(input, &mut WeightMeter::new()).unwrap(); match (i % 2, xcm) { (0, data) | (2, data) => { - assert_eq!(data, v2_xcm().encode()); + assert_eq!(data, versioned_xcm(older_xcm_version).encode()); }, (1, data) | (3, data) => { - assert_eq!(data, v3_xcm().encode()); + assert_eq!(data, versioned_xcm(newer_xcm_version).encode()); }, unexpected => unreachable!("{:?}", unexpected), } diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 66a705a40869ce6eb3fe2ac7a028ce68c784c18c..36cccd9b0b0dbad4a5f6c7240a69181ec7ffe5f3 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -11,14 +11,14 @@ "/dns/boot.stake.plus/tcp/34334/wss/p2p/12D3KooWAzSSZ7jLqMw1WPomYEKCYANQaKemXQ8BKoFvNEvfmdqR", "/dns/boot.metaspan.io/tcp/26052/p2p/12D3KooW9z9hKqe3mqYAp5UJMhZiCqhkTHyiR43fegnGmTJ3JAba", "/dns/boot.metaspan.io/tcp/26056/wss/p2p/12D3KooW9z9hKqe3mqYAp5UJMhZiCqhkTHyiR43fegnGmTJ3JAba", - "/dns/boot-cr.gatotech.network/tcp/33210/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", - "/dns/boot-cr.gatotech.network/tcp/35210/wss/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", + "/dns/boot.gatotech.network/tcp/33210/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", + "/dns/boot.gatotech.network/tcp/35210/wss/p2p/12D3KooWRMUYeWMPkadDG8baX9j1e95fspfp8MhPGym5BQza7Fm5", "/dns/statemine-bootnode.turboflakes.io/tcp/30320/p2p/12D3KooWN2Qqvp5wWgjbBMpbqhKgvSibSHfomP5VWVD9VCn3VrV4", "/dns/statemine-bootnode.turboflakes.io/tcp/30420/wss/p2p/12D3KooWN2Qqvp5wWgjbBMpbqhKgvSibSHfomP5VWVD9VCn3VrV4", "/dns/boot-node.helikon.io/tcp/10210/p2p/12D3KooWFXRQce3aMgZMn5SxvHtYH4PsR63TZLf8LrnBsEVTyzdr", "/dns/boot-node.helikon.io/tcp/10212/wss/p2p/12D3KooWFXRQce3aMgZMn5SxvHtYH4PsR63TZLf8LrnBsEVTyzdr", - "/dns/statemine.bootnode.amforc.com/tcp/30336/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", - "/dns/statemine.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", + "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/30007/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a", + "/dns/asset-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWHy1CPndZYphwdVqMb295KPC6LRt17Ae3zNSr7evzeF5a", "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 16caa52ba91376ed30187c89d249ff38ed1ea42a..f7f53f8d7246f42dda05901298831190e5d81802 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -11,14 +11,14 @@ "/dns/boot.stake.plus/tcp/35334/wss/p2p/12D3KooWFrQjYaPZSSLLxEVmoaHFcrF6VoY4awG4KRSLaqy3JCdQ", "/dns/boot.metaspan.io/tcp/16052/p2p/12D3KooWLwiJuvqQUB4kYaSjLenFKH9dWZhGZ4qi7pSb3sUYU651", "/dns/boot.metaspan.io/tcp/16056/wss/p2p/12D3KooWLwiJuvqQUB4kYaSjLenFKH9dWZhGZ4qi7pSb3sUYU651", - "/dns/boot-cr.gatotech.network/tcp/33110/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", - "/dns/boot-cr.gatotech.network/tcp/35110/wss/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", + "/dns/boot.gatotech.network/tcp/33110/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", + "/dns/boot.gatotech.network/tcp/35110/wss/p2p/12D3KooWKgwQfAeDoJARdtxFNNWfbYmcu6s4yUuSifnNoDgzHZgm", "/dns/statemint-bootnode.turboflakes.io/tcp/30315/p2p/12D3KooWL8CyLww3m3pRySQGGYGNJhWDMqko3j5xi67ckP7hDUvo", "/dns/statemint-bootnode.turboflakes.io/tcp/30415/wss/p2p/12D3KooWL8CyLww3m3pRySQGGYGNJhWDMqko3j5xi67ckP7hDUvo", "/dns/boot-node.helikon.io/tcp/10220/p2p/12D3KooW9uybhguhDjVJc3U3kgZC3i8rWmAnSpbnJkmuR7C6ZsRW", "/dns/boot-node.helikon.io/tcp/10222/wss/p2p/12D3KooW9uybhguhDjVJc3U3kgZC3i8rWmAnSpbnJkmuR7C6ZsRW", - "/dns/statemint.bootnode.amforc.com/tcp/30341/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", - "/dns/statemint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", + "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3", + "/dns/asset-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDLxPXYnSHjNwq9ibqgxuzRni5VViuGNSjNe3ueqVgqE3", "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 3752213e702eb8d1ef86e042531cc1f6fdec2767..b4334bdfe1243eb4d6726ab3464d619aebac394a 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -13,14 +13,14 @@ "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", "/dns/boot.metaspan.io/tcp/36056/wss/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", - "/dns/boot-cr.gatotech.network/tcp/33310/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", - "/dns/boot-cr.gatotech.network/tcp/35310/wss/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", + "/dns/boot.gatotech.network/tcp/33310/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", + "/dns/boot.gatotech.network/tcp/35310/wss/p2p/12D3KooWMSW6hr8KcNBhGFN1bg8kYC76o67PnuDEbxRhxacW6dui", "/dns/westmint-bootnode.turboflakes.io/tcp/30325/p2p/12D3KooWHU4qqSyqKdbXdrCTMXUJxxueaZjqpqSaQqYiFPw6XqEx", "/dns/westmint-bootnode.turboflakes.io/tcp/30425/wss/p2p/12D3KooWHU4qqSyqKdbXdrCTMXUJxxueaZjqpqSaQqYiFPw6XqEx", "/dns/boot-node.helikon.io/tcp/10200/p2p/12D3KooWMRY8wb7rMT81LLuivvsy6ahUxKHQgYJw4zm1hC1uYLxb", "/dns/boot-node.helikon.io/tcp/10202/wss/p2p/12D3KooWMRY8wb7rMT81LLuivvsy6ahUxKHQgYJw4zm1hC1uYLxb", - "/dns/westmint.bootnode.amforc.com/tcp/30339/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", - "/dns/westmint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", + "/dns/asset-hub-westend.bootnode.amforc.com/tcp/30004/p2p/12D3KooWDfepM7kqUHMXdGqJw3ZmtvAcE2CjPcnYjT2tTfAw3ZBd", + "/dns/asset-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDfepM7kqUHMXdGqJw3ZmtvAcE2CjPcnYjT2tTfAw3ZBd", "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 6644ea41ab748548d3c6dcbd06951522672dbb53..2c63b52d78395167f5a60e1b96ffede355b3eddc 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -11,14 +11,14 @@ "/dns/boot.stake.plus/tcp/41334/wss/p2p/12D3KooWBzbs2jsXjG5dipktGPKaUm9XWvkmeJFsEAGkVt946Aa7", "/dns/boot.metaspan.io/tcp/26032/p2p/12D3KooWKfuSaZrLNz43PDgM4inMALXRHTSh2WBuqQtZRq8zmT1Z", "/dns/boot.metaspan.io/tcp/26036/wss/p2p/12D3KooWKfuSaZrLNz43PDgM4inMALXRHTSh2WBuqQtZRq8zmT1Z", - "/dns/boot-cr.gatotech.network/tcp/33230/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", - "/dns/boot-cr.gatotech.network/tcp/35230/wss/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", + "/dns/boot.gatotech.network/tcp/33230/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", + "/dns/boot.gatotech.network/tcp/35230/wss/p2p/12D3KooWFQFmg8UqAYLDNc2onySB6o5LLvpbx3eXZVqz9YFxAmXs", "/dns/bridge-hub-kusama-bootnode.turboflakes.io/tcp/30615/p2p/12D3KooWE3dJXbwA5SQqbDNxHfj7BXJRcy2KiXWjJY4VUMKoa7S2", "/dns/bridge-hub-kusama-bootnode.turboflakes.io/tcp/30715/wss/p2p/12D3KooWE3dJXbwA5SQqbDNxHfj7BXJRcy2KiXWjJY4VUMKoa7S2", "/dns/boot-node.helikon.io/tcp/10250/p2p/12D3KooWDJLkhqQdXcVKWX7CqJHnpAY6PzrPc4ZG2CUWnARbmguy", "/dns/boot-node.helikon.io/tcp/10252/wss/p2p/12D3KooWDJLkhqQdXcVKWX7CqJHnpAY6PzrPc4ZG2CUWnARbmguy", - "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30337/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", - "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", + "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30010/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z", + "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWNyTBwRvCz1Ey2SgC1f3MvymhiAyLEa3cL8kU5gFH3V7Z", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index c51c5eff89b86a0ce553bbcfc472f6fb5cf0d535..7d3ba8357037d7310c56ed609cbc43e0359ca2bc 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -9,8 +9,10 @@ "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/30339/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", "/dns/polkadot-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPZ38PL3PhRVcUVYDNn7nRcZF8MykmWWLBKeDV2yna1vV", - "/dns/boot-cr.gatotech.network/tcp/33130/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", - "/dns/boot-cr.gatotech.network/tcp/35130/wss/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.gatotech.network/tcp/33130/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.gatotech.network/tcp/35130/wss/p2p/12D3KooWCnFzfEdd7MwUNrrDv66FuS2DM5MGuiaB4y48XS7qNjF6", + "/dns/boot.stake.plus/tcp/42333/p2p/12D3KooWEoTCu22Uab6prbfcD1FPpPZmfhkAVeMZQJ3fHnkCVmJz", + "/dns/boot.stake.plus/tcp/42334/wss/p2p/12D3KooWEoTCu22Uab6prbfcD1FPpPZmfhkAVeMZQJ3fHnkCVmJz", "/dns/bridge-hub-polkadot-bootnode.turboflakes.io/tcp/30610/p2p/12D3KooWNEgaQRQHJHvGDh8Rg4RyLmDCCz3yAf2gAdHZZJAUUD8Q", "/dns/bridge-hub-polkadot-bootnode.turboflakes.io/tcp/30710/wss/p2p/12D3KooWNEgaQRQHJHvGDh8Rg4RyLmDCCz3yAf2gAdHZZJAUUD8Q", "/dns/boot.metaspan.io/tcp/16032/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", @@ -24,7 +26,9 @@ "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", - "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH" + "/dns/boot-polkadot-bridgehub.luckyfriday.io/tcp/443/wss/p2p/12D3KooWKf3mBXHjLbwtPqv1BdbQuwbFNcQQYxASS7iQ25264AXH", + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp", + "/dns/bridge-hub-polkadot.bootnode.amforc.com/tcp/30010/p2p/12D3KooWGT5E56rAHfT5dY1pMLTrpAgV72yfDtD1Y5tPCHaTsifp" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 5140071ec44ca1956b01f608ce85ca2d6177a617..f98a046040f2226b5345d4d793e15ec3e24d6c43 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -11,8 +11,10 @@ "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", - "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", - "/dns/boot-cr.gatotech.network/tcp/35330/wss/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", + "/dns/boot.stake.plus/tcp/40333/p2p/12D3KooWPGMsGPdGJx6HrByiKUyz91wgUHmjG5UXTmkJ9tUphAQn", + "/dns/boot.stake.plus/tcp/40334/wss/p2p/12D3KooWPGMsGPdGJx6HrByiKUyz91wgUHmjG5UXTmkJ9tUphAQn", + "/dns/boot.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", + "/dns/boot.gatotech.network/tcp/35330/wss/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", "/dns/bridge-hub-westend-bootnode.turboflakes.io/tcp/30620/p2p/12D3KooWLeExhPWCDUjcxCdzxTP5TpPbNBVG5t9MPvk1dZUM5naU", "/dns/bridge-hub-westend-bootnode.turboflakes.io/tcp/30720/wss/p2p/12D3KooWLeExhPWCDUjcxCdzxTP5TpPbNBVG5t9MPvk1dZUM5naU", "/dns/boot.metaspan.io/tcp/36032/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", @@ -25,7 +27,9 @@ "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", - "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6", + "/dns/bridge-hub-westend.bootnode.amforc.com/tcp/30007/p2p/12D3KooWDSWod2gMtHxunXot538oEMw9p42pnPrpRELdsfYyT8R6" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index ce80e21ae625e813be0f71245bc454cb284455f3..a0d5ddff6ebb1d4808a48c5af16d3a60bb648ad8 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -11,14 +11,14 @@ "/dns/boot.stake.plus/tcp/37334/wss/p2p/12D3KooWRgFfEtwPo3xorKGYALRHRteKNgF37iN9q8xTLPYc34LA", "/dns/boot.metaspan.io/tcp/16072/p2p/12D3KooWJWTTu2t2yg5bFRH6tjEpfzKwZir5R9JRRjQpgFPXdDfp", "/dns/boot.metaspan.io/tcp/16076/wss/p2p/12D3KooWJWTTu2t2yg5bFRH6tjEpfzKwZir5R9JRRjQpgFPXdDfp", - "/dns/boot-cr.gatotech.network/tcp/33120/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", - "/dns/boot-cr.gatotech.network/tcp/35120/wss/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", + "/dns/boot.gatotech.network/tcp/33120/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", + "/dns/boot.gatotech.network/tcp/35120/wss/p2p/12D3KooWGZsa9tSeLQ1VeC996e1YsCPuyRYMipHQuXikPjcKcpVQ", "/dns/collectives-polkadot-bootnode.turboflakes.io/tcp/30605/p2p/12D3KooWPyzM7eX64J4aG8uRfSARakDVtiEtthEM8FUjrLWAg2sC", "/dns/collectives-polkadot-bootnode.turboflakes.io/tcp/30705/wss/p2p/12D3KooWPyzM7eX64J4aG8uRfSARakDVtiEtthEM8FUjrLWAg2sC", "/dns/boot-node.helikon.io/tcp/10230/p2p/12D3KooWS8CBz4P5CBny9aBy2EQUvAExFo9PUVT57X8r3zWMFkXT", "/dns/boot-node.helikon.io/tcp/10232/wss/p2p/12D3KooWS8CBz4P5CBny9aBy2EQUvAExFo9PUVT57X8r3zWMFkXT", - "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30335/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", - "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", + "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30013/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski", + "/dns/collectives-polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWL6v6FHMtCP5VsiDbMHLRFiW6YBtv37BarpW3hLqnDski", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index fdd6348f02a9ce7b07c0483847a348c7b334f5c2..6182218d3670cfc661fe528d715bbf222c747900 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -13,14 +13,14 @@ "/dns/boot.stake.plus/tcp/38334/wss/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", "/dns/boot.metaspan.io/tcp/36072/p2p/12D3KooWEf2QXWq5pAbFJLfbnexA7KYtRRDSPkqTP64n1KtdsdV2", "/dns/boot.metaspan.io/tcp/36076/wss/p2p/12D3KooWEf2QXWq5pAbFJLfbnexA7KYtRRDSPkqTP64n1KtdsdV2", - "/dns/boot-cr.gatotech.network/tcp/33320/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", - "/dns/boot-cr.gatotech.network/tcp/35320/wss/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", + "/dns/boot.gatotech.network/tcp/33320/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", + "/dns/boot.gatotech.network/tcp/35320/wss/p2p/12D3KooWMedtdBGiSn7HLZusHwafXkZAdmWD18ciGQBfS4X1fv9K", "/dns/collectives-westend-bootnode.turboflakes.io/tcp/30600/p2p/12D3KooWAe9CFXp6je3TAPQJE135KRemTLSqEqQBZMFwJontrThZ", "/dns/collectives-westend-bootnode.turboflakes.io/tcp/30700/wss/p2p/12D3KooWAe9CFXp6je3TAPQJE135KRemTLSqEqQBZMFwJontrThZ", "/dns/boot-node.helikon.io/tcp/10260/p2p/12D3KooWMzfnt29VAmrJHQcJU6Vfn4RsMbqPqgyWHqt9VTTAbSrL", "/dns/boot-node.helikon.io/tcp/10262/wss/p2p/12D3KooWMzfnt29VAmrJHQcJU6Vfn4RsMbqPqgyWHqt9VTTAbSrL", - "/dns/collectives-westend.bootnode.amforc.com/tcp/30340/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", - "/dns/collectives-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", + "/dns/collectives-westend.bootnode.amforc.com/tcp/30010/p2p/12D3KooWRfefWRo1AAB8LCJhVr8DDe9CvBmmKUzJpjd2RGk82pnL", + "/dns/collectives-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWRfefWRo1AAB8LCJhVr8DDe9CvBmmKUzJpjd2RGk82pnL", "/dns/collectives-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWMAgVm1PnsLVfxoDLCbYv1DgnN6tjcRQbrq8xhbwo4whE", "/dns/collectives-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWMAgVm1PnsLVfxoDLCbYv1DgnN6tjcRQbrq8xhbwo4whE", "/dns/westend-collectives-boot-ng.dwellir.com/tcp/30340/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", diff --git a/cumulus/parachains/chain-specs/coretime-kusama.json b/cumulus/parachains/chain-specs/coretime-kusama.json index c22daf54db24fb167ea92e8dfb6597b029201c71..f9310d6c7cc6e14f54b4cc6620bece8b626725d1 100644 --- a/cumulus/parachains/chain-specs/coretime-kusama.json +++ b/cumulus/parachains/chain-specs/coretime-kusama.json @@ -8,7 +8,25 @@ "/dns/kusama-coretime-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWR7Biy6nPgQFhk2eYP62pAkcFA6he9RUFURTDh7ewTjpo", "/dns/kusama-coretime-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAGFiMZDF9RxdacrkenzGdo8nhfSe9EXofHc5mHeJ9vGX", "/dns/boot.metaspan.io/tcp/33024/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS", - "/dns/boot.metaspan.io/tcp/33026/wss/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS" + "/dns/boot.metaspan.io/tcp/33026/wss/p2p/12D3KooWPmwMhG54ixDv2b3sCfYEJ1DWDrjaduBCBwqFFdqvVsmS", + "/dns/boot.stake.plus/tcp/47333/p2p/12D3KooWKKKoyywqdkkpZzCzVWt5VXEk5PbS9tUm635L5ohyf8bU", + "/dns/boot.stake.plus/tcp/47334/wss/p2p/12D3KooWKKKoyywqdkkpZzCzVWt5VXEk5PbS9tUm635L5ohyf8bU", + "/dns/coretime-kusama-boot-ng.dwellir.com/tcp/30358/p2p/12D3KooWSoPisbYQTAj79Dtsxx1qAiEFTouvXCfNJ1A3SQWQzuct", + "/dns/coretime-kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWSoPisbYQTAj79Dtsxx1qAiEFTouvXCfNJ1A3SQWQzuct", + "/dns/boot.gatotech.network/tcp/33250/p2p/12D3KooWMpgcWr5pb7em7rWaQV4J6P2kn3YCjCeP1ESMsJPffn1a", + "/dns/boot.gatotech.network/tcp/35250/wss/p2p/12D3KooWMpgcWr5pb7em7rWaQV4J6P2kn3YCjCeP1ESMsJPffn1a", + "/dns/kcore16.rotko.net/tcp/33726/p2p/12D3KooWCyPSkk5cq2eEdw1qHizfa6UT4QggSarCEtcvNXpnod8B", + "/dns/kcore16.rotko.net/tcp/35726/wss/p2p/12D3KooWCyPSkk5cq2eEdw1qHizfa6UT4QggSarCEtcvNXpnod8B", + "/dns/coretime-kusama-bootnode.turboflakes.io/tcp/30660/p2p/12D3KooWHTr9GLvJEnGYKCu3FHC3DwqBiFg9MQUWsjPCP4YH5xyf", + "/dns/coretime-kusama-bootnode.turboflakes.io/tcp/30760/wss/p2p/12D3KooWHTr9GLvJEnGYKCu3FHC3DwqBiFg9MQUWsjPCP4YH5xyf", + "/dns/coretime-kusama.bootnodes.polkadotters.com/tcp/30371/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/coretime-kusama.bootnodes.polkadotters.com/tcp/30373/wss/p2p/12D3KooWHy7TAuK6EoVij2tfaeh3KkaEJxhTmumbEom3HfRnSEsp", + "/dns/boot-node.helikon.io/tcp/7420/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", + "/dns/boot-node.helikon.io/tcp/7422/wss/p2p/12D3KooWK4eKFpYftyuLdBdXrkdJXHKt7KZcNLb92Ufkvo17B9T2", + "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", + "/dns/coretime-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWFzW9AgxNfkVNCepVByS7URDCRDAA5p3XzBLVptqZvWoL", + "/dns/coretime-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P", + "/dns/coretime-kusama.bootnode.amforc.com/tcp/30013/p2p/12D3KooWPrgxrrumrANp6Bp2SMEwMQHPHDbPzA1HbcrakZrbFi5P" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index ab2e97fdbf41d43dfc4d75e30358cdaefd21a630..ca723aacd881cbe56dbedc123b062465c68ec40b 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", @@ -14,7 +16,21 @@ "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/coretime-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw", - "/dns/coretime-westend-boot-ng.dwellir.com/tcp/30356/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw" + "/dns/coretime-westend-boot-ng.dwellir.com/tcp/30356/p2p/12D3KooWHewSFwJueRprNZNfkncdjud9DrGzvP1qfmgPd7VK66gw", + "/dns/boot.stake.plus/tcp/45333/p2p/12D3KooWEFQapPJXNyZMt892qXZ8YgDuHWt2vhLeRvny98oUjEto", + "/dns/boot.stake.plus/tcp/45334/wss/p2p/12D3KooWEFQapPJXNyZMt892qXZ8YgDuHWt2vhLeRvny98oUjEto", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/coretime-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWK7Zj1mCPg6h3eMp7v6akJ1o6AocRr59NLusDwBXQgrhw", + "/dns/ibp-boot-westend-coretime.luckyfriday.io/tcp/443/wss/p2p/12D3KooWBzfzNhvyRVTb9KtNYpkRf26yTRHorBZR2LmYhH5ArCey", + "/dns/ibp-boot-westend-coretime.luckyfriday.io/tcp/30340/p2p/12D3KooWBzfzNhvyRVTb9KtNYpkRf26yTRHorBZR2LmYhH5ArCey", + "/dns/wcore16.rotko.net/tcp/33736/p2p/12D3KooWFmGg7EGzxGDawuJ9EfyEznCrZfMJgGa4eHpMWjcJmg85", + "/dns/wcore16.rotko.net/tcp/35736/wss/p2p/12D3KooWFmGg7EGzxGDawuJ9EfyEznCrZfMJgGa4eHpMWjcJmg85", + "/dns/boot.gatotech.network/tcp/33350/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", + "/dns/boot.gatotech.network/tcp/35350/wss/p2p/12D3KooWN6FJDaZvWbtX1pSc6UdHgyF2UZtYxPp3UkXQZa8ko7uS", + "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30358/wss/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", + "/dns/coretime-westend.bootnodes.polkadotters.com/tcp/30356/p2p/12D3KooWDc9T2vQ8rHvX7hAt9eLWktD9Q89NDTcLm5STkuNbzUGf", + "/dns/coretime-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd", + "/dns/coretime-westend.bootnode.amforc.com/tcp/30013/p2p/12D3KooWG9a9H9An96E3kgXL1sirHta117iuacJXnJRaUywkMiSd" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-kusama.json b/cumulus/parachains/chain-specs/people-kusama.json index 518a7be751509aeeb3973f654b371e3498355949..00a38b675def73cee48e38e084938e158932bbe2 100644 --- a/cumulus/parachains/chain-specs/people-kusama.json +++ b/cumulus/parachains/chain-specs/people-kusama.json @@ -6,7 +6,9 @@ "/dns/kusama-people-connect-0.polkadot.io/tcp/30334/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", "/dns/kusama-people-connect-1.polkadot.io/tcp/30334/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", "/dns/kusama-people-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWQaqG5TNmDfRWrtH7tMsN7YeqwVkSfoZT4GkemSzezNi1", - "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm" + "/dns/kusama-people-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWKhYoQH9LdSyvY3SVZY9gFf6ZV1bFh6317TRehUP3r5fm", + "/dns/people-kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA", + "/dns/people-kusama.bootnode.amforc.com/tcp/30004/p2p/12D3KooWPjzgKZe5jdG6TY4gwcFq8QxyyhqsYbQo6N29pwGePWLA" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index 93b8c064113f01c133ef301056ff79ade6dd032a..8bfbb33264150f5d28fa80a93128f9708998db53 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -21,10 +21,12 @@ "/dns/boot.stake.plus/tcp/46334/wss/p2p/12D3KooWLNWUF4H5WE3dy2rPB56gVcR48XY2rHwEaZ6pGTK6HYFi", "/dns/boot.gatotech.network/tcp/33340/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", "/dns/boot.gatotech.network/tcp/35340/wss/p2p/12D3KooWHwURYtEHpexfrZa8k8hVgVi5FTFr4N8HBnn9kPDsWfgA", - "/dns/people-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", - "/dns/people-westend.bootnode.amforc.com/tcp/30346/p2p/12D3KooWQrMQFAXxJJJCtVr8nViBR6EDsuT1RyqU3eoCMebRQxTf", + "/dns/people-westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs", + "/dns/people-westend.bootnode.amforc.com/tcp/30016/p2p/12D3KooWE1btdwDhNpApg8BEe2QwJxdVDtz6a6BRhgTeUh9HMhWs", "/dns/people-westend-bootnode.turboflakes.io/tcp/30650/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", "/dns/people-westend-bootnode.turboflakes.io/tcp/30750/wss/p2p/12D3KooWQEhmZg3uMkuxVUx3jbsD84zEX4dUKtvHfmCoBWMhybKW", + "/dns/wppl16.rotko.net/tcp/33766/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", + "/dns/wppl16.rotko.net/tcp/35766/wss/p2p/12D3KooWHwUXBUo2WRMUBwPLC2ttVbnEk1KvDyESYAeKcNoCn7WS", "/dns/people-westend-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX", "/dns/people-westend-boot-ng.dwellir.com/tcp/30355/p2p/12D3KooWBdCpCabhgBpLn67LWcXE2JJCCTMhuJHrfDNiTiCCr3KX" ], diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index fa16205d0fd1adee1491cf3abb92a55829b16a44..2b943b6dca55989a891895b4abb3195970978b06 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,7 +13,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"], default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index d70fdfeb7095a2dbd6f5fd9a53caeb1272476743..ed9c5c483fa74282306d20c20a6c6313bd940f5a 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -222,7 +222,6 @@ mod tests { ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const MaxReserves: u32 = 50; @@ -240,7 +239,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockLength = BlockLength; type BlockWeights = (); type DbWeight = (); diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 8c44cce7d922dff0a69a4273af7adb69819d2fce..b010d2a296382f2f2d5f8ffd889f4643a17098d0 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -10,7 +10,7 @@ description = "Common resources for integration testing with xcm-emulator" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } paste = "1.0.14" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index ddd6d2d049823f36ed193597ff438f39468a8a55..9abecbecc48a725448cfb17508351d5e76f848de 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 0a2b0f6d45ee0480fe91392576068b1cafcb6cdf..e0f29cd801c346a064a4773efa5754b0e2f399f4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -11,13 +11,14 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } sp-keyring = { path = "../../../../../../../substrate/primitives/keyring", default-features = false } sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../../../substrate/frame/metadata-hash-extension" } frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false } pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index aeec9b44dab4ce7648e609e13136c381a5a50695..dc89ef1f7a44e6afc218de787bc47d452ae50fa8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -17,16 +17,15 @@ use crate::imports::*; -use sp_keyring::AccountKeyring::Alice; -use sp_runtime::{generic, MultiSignature}; +use frame_system::RawOrigin; use xcm_fee_payment_runtime_api::{ - dry_run::runtime_decl_for_xcm_dry_run_api::XcmDryRunApiV1, + dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, }; /// We are able to dry-run and estimate the fees for a teleport between relay and system para. /// Scenario: Alice on Westend relay chain wants to teleport WND to Asset Hub. -/// We want to know the fees using the `XcmDryRunApi` and `XcmPaymentApi`. +/// We want to know the fees using the `DryRunApi` and `XcmPaymentApi`. #[test] fn teleport_relay_system_para_works() { let destination: Location = Parachain(1000).into(); // Asset Hub. @@ -42,6 +41,7 @@ fn teleport_relay_system_para_works() { ::new_ext().execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; + type OriginCaller = ::OriginCaller; let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { dest: Box::new(VersionedLocation::V4(destination.clone())), @@ -50,9 +50,8 @@ fn teleport_relay_system_para_works() { fee_asset_item: 0, weight_limit: Unlimited, }); - let sender = Alice; // Is the same as `WestendSender`. - let extrinsic = construct_extrinsic_westend(sender, call); - let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + let origin = OriginCaller::system(RawOrigin::Signed(WestendSender::get())); + let result = Runtime::dry_run_call(origin, call).unwrap(); assert_eq!(result.forwarded_xcms.len(), 1); let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; assert_eq!(messages_to_query.len(), 1); @@ -105,7 +104,7 @@ fn teleport_relay_system_para_works() { /// We are able to dry-run and estimate the fees for a multi-hop XCM journey. /// Scenario: Alice on PenpalA has some WND and wants to send them to PenpalB. -/// We want to know the fees using the `XcmDryRunApi` and `XcmPaymentApi`. +/// We want to know the fees using the `DryRunApi` and `XcmPaymentApi`. #[test] fn multi_hop_works() { let destination = PenpalA::sibling_location_of(PenpalB::para_id()); @@ -142,6 +141,7 @@ fn multi_hop_works() { ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; + type OriginCaller = ::OriginCaller; let call = RuntimeCall::PolkadotXcm(pallet_xcm::Call::transfer_assets { dest: Box::new(VersionedLocation::V4(destination.clone())), @@ -150,9 +150,8 @@ fn multi_hop_works() { fee_asset_item: 0, weight_limit: Unlimited, }); - let sender = Alice; // Same as `PenpalASender`. - let extrinsic = construct_extrinsic_penpal(sender, call); - let result = Runtime::dry_run_extrinsic(extrinsic).unwrap(); + let origin = OriginCaller::system(RawOrigin::Signed(PenpalASender::get())); + let result = Runtime::dry_run_call(origin, call).unwrap(); assert_eq!(result.forwarded_xcms.len(), 1); let (destination_to_query, messages_to_query) = &result.forwarded_xcms[0]; assert_eq!(messages_to_query.len(), 1); @@ -304,67 +303,3 @@ fn transfer_assets_para_to_para(test: ParaToParaThroughRelayTest) -> DispatchRes test.args.weight_limit, ) } - -// Constructs the SignedExtra component of an extrinsic for the Westend runtime. -fn construct_extrinsic_westend( - sender: sp_keyring::AccountKeyring, - call: westend_runtime::RuntimeCall, -) -> westend_runtime::UncheckedExtrinsic { - type Runtime = ::Runtime; - let account_id = ::AccountId::from(sender.public()); - let tip = 0; - let extra: westend_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckMortality::::from(sp_runtime::generic::Era::immortal()), - frame_system::CheckNonce::::from( - frame_system::Pallet::::account(&account_id).nonce, - ), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = westend_runtime::SignedPayload::new(call, extra).unwrap(); - let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); - let (call, extra, _) = raw_payload.deconstruct(); - westend_runtime::UncheckedExtrinsic::new_signed( - call, - account_id.into(), - MultiSignature::Sr25519(signature), - extra, - ) -} - -// Constructs the SignedExtra component of an extrinsic for the Westend runtime. -fn construct_extrinsic_penpal( - sender: sp_keyring::AccountKeyring, - call: penpal_runtime::RuntimeCall, -) -> penpal_runtime::UncheckedExtrinsic { - type Runtime = ::Runtime; - let account_id = ::AccountId::from(sender.public()); - let tip = 0; - let extra: penpal_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::immortal()), - frame_system::CheckNonce::::from( - frame_system::Pallet::::account(&account_id).nonce, - ), - frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), - ); - type SignedPayload = - generic::SignedPayload; - let raw_payload = SignedPayload::new(call, extra).unwrap(); - let signature = raw_payload.using_encoded(|payload| sender.sign(payload)); - let (call, extra, _) = raw_payload.deconstruct(); - penpal_runtime::UncheckedExtrinsic::new_signed( - call, - account_id.into(), - MultiSignature::Sr25519(signature), - extra, - ) -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index bbe54c367baf41d183dd60f976a7951d20d8a6f2..bed5af92f6e55b37f7f12518dbe1ed1b290dd5aa 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } hex-literal = "0.4.1" diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index a1d871cdb618fdddfbbbc3e7812d0ec7f7ae7866..78788634e6ff45c10b5fbebc91da4843d8f595e3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -61,10 +61,13 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // Initially set only default version on all runtimes - AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); // prepare data let destination = asset_hub_westend_location(); @@ -87,42 +90,12 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // set destination version - AssetHubRococo::force_xcm_version(destination.clone(), xcm::v3::prelude::XCM_VERSION); - - // TODO: remove this block, when removing `xcm:v2` - { - // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` - // version, which does not have the `ExportMessage` instruction. If the default `2` is - // changed to `3`, then this assert can go away" - assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - - // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction - AssetHubRococo::force_xcm_version( - ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), - xcm::v2::prelude::XCM_VERSION, - ); - // send XCM from AssetHubRococo - fails - `ExportMessage` is not in `2` - assert_err!( - send_asset_from_asset_hub_rococo(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - } + AssetHubRococo::force_xcm_version(destination.clone(), newer_xcm_version); // set version with `ExportMessage` for BridgeHubRococo AssetHubRococo::force_xcm_version( ParentThen(Parachain(BridgeHubRococo::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubRococo - ok assert_ok!(send_asset_from_asset_hub_rococo( @@ -134,14 +107,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { assert_bridge_hub_rococo_message_accepted(false); // set version for remote BridgeHub on BridgeHubRococo - BridgeHubRococo::force_xcm_version( - bridge_hub_westend_location(), - xcm::v3::prelude::XCM_VERSION, - ); + BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), newer_xcm_version); // set version for AssetHubWestend on BridgeHubWestend BridgeHubWestend::force_xcm_version( ParentThen(Parachain(AssetHubWestend::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubRococo - ok @@ -164,20 +134,4 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ] ); }); - - // TODO: remove this block, when removing `xcm:v2` - { - // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have - // `UniversalOrigin` and `DescendOrigin` - BridgeHubRococo::force_xcm_version( - bridge_hub_westend_location(), - xcm::v2::prelude::XCM_VERSION, - ); - - // send XCM from AssetHubRococo - ok - assert_ok!(send_asset_from_asset_hub_rococo(destination, (native_token, amount))); - // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we - // cannot add `UniversalOrigin` and `DescendOrigin` - assert_bridge_hub_rococo_message_accepted(false); - } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index b01be5e8dc84b4edf35651d0388baa1462b54c9b..8539df97be9331ea1126a56a70a47ac3a597ce5b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -61,10 +61,13 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable #[test] fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { // Initially set only default version on all runtimes - AssetHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubRococo::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - BridgeHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); - AssetHubWestend::force_default_xcm_version(Some(xcm::v2::prelude::XCM_VERSION)); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + + AssetHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubRococo::force_default_xcm_version(Some(older_xcm_version)); + BridgeHubWestend::force_default_xcm_version(Some(older_xcm_version)); + AssetHubWestend::force_default_xcm_version(Some(older_xcm_version)); // prepare data let destination = asset_hub_rococo_location(); @@ -87,42 +90,12 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); // set destination version - AssetHubWestend::force_xcm_version(destination.clone(), xcm::v3::prelude::XCM_VERSION); - - // TODO: remove this block, when removing `xcm:v2` - { - // send XCM from AssetHubRococo - fails - AssetHubRococo is set to the default/safe `2` - // version, which does not have the `ExportMessage` instruction. If the default `2` is - // changed to `3`, then this assert can go away" - assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - - // set exact version for BridgeHubWestend to `2` without `ExportMessage` instruction - AssetHubWestend::force_xcm_version( - ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), - xcm::v2::prelude::XCM_VERSION, - ); - // send XCM from AssetHubWestend - fails - `ExportMessage` is not in `2` - assert_err!( - send_asset_from_asset_hub_westend(destination.clone(), (native_token.clone(), amount)), - DispatchError::Module(sp_runtime::ModuleError { - index: 31, - error: [1, 0, 0, 0], - message: Some("SendFailure") - }) - ); - } + AssetHubWestend::force_xcm_version(destination.clone(), newer_xcm_version); // set version with `ExportMessage` for BridgeHubWestend AssetHubWestend::force_xcm_version( ParentThen(Parachain(BridgeHubWestend::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubWestend - ok assert_ok!(send_asset_from_asset_hub_westend( @@ -134,14 +107,11 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { assert_bridge_hub_westend_message_accepted(false); // set version for remote BridgeHub on BridgeHubWestend - BridgeHubWestend::force_xcm_version( - bridge_hub_rococo_location(), - xcm::v3::prelude::XCM_VERSION, - ); + BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), newer_xcm_version); // set version for AssetHubRococo on BridgeHubRococo BridgeHubRococo::force_xcm_version( ParentThen(Parachain(AssetHubRococo::para_id().into()).into()).into(), - xcm::v3::prelude::XCM_VERSION, + newer_xcm_version, ); // send XCM from AssetHubWestend - ok @@ -164,20 +134,4 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ] ); }); - - // TODO: remove this block, when removing `xcm:v2` - { - // set `2` version for remote BridgeHub on BridgeHubRococo, which does not have - // `UniversalOrigin` and `DescendOrigin` - BridgeHubWestend::force_xcm_version( - bridge_hub_rococo_location(), - xcm::v2::prelude::XCM_VERSION, - ); - - // send XCM from AssetHubWestend - ok - assert_ok!(send_asset_from_asset_hub_westend(destination, (native_token, amount))); - // message is not accepted on the local BridgeHub (`DestinationUnsupported`) because we - // cannot add `UniversalOrigin` and `DescendOrigin` - assert_bridge_hub_westend_message_accepted(false); - } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index d1dbef9fc4156c08b2c52f54a3759de763c959e0..297f68de6218317017a36c9535ad581aa86e2883 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } assert_matches = "1.5.0" # Substrate diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml index 1570aa7662fcbbfe1b018251d2700ce2d653d165..29a939951e597a939de5ed9d244193ac8455e4d5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/Cargo.toml @@ -8,7 +8,7 @@ description = "People Rococo runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } # Substrate sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index bc093dc0de6356d7cd98d20d12d63748ed248ff5..6eab6f52aa72172ecc19fa891109fc9df859ec3c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -8,7 +8,7 @@ description = "People Westend runtime integration tests with xcm-emulator" publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } # Substrate sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index 207259bee52ceb68ba45b1237100cfe5367201f7..92e0a54631394154634900829c708431b2931b67 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/pallets/collective-content/src/mock.rs b/cumulus/parachains/pallets/collective-content/src/mock.rs index 5cb0126425e59a67332d880cc8b925f50b1753c2..91f9c29933d91dd9be6f604c28da869317a5003d 100644 --- a/cumulus/parachains/pallets/collective-content/src/mock.rs +++ b/cumulus/parachains/pallets/collective-content/src/mock.rs @@ -18,9 +18,7 @@ pub use crate as pallet_collective_content; use crate::WeightInfo; use frame_support::{ - derive_impl, ord_parameter_types, parameter_types, - traits::{ConstU32, ConstU64}, - weights::Weight, + derive_impl, ord_parameter_types, parameter_types, traits::ConstU32, weights::Weight, }; use frame_system::EnsureSignedBy; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -70,7 +68,6 @@ impl frame_system::Config for Test { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 17981d238fd1880e81ce04ebbe5b2a045ba27e41..01ee12bf4e719a9fde63fa4adb43bc6ff4e5c3ea 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -10,7 +10,7 @@ description = "Pallet to store the parachain ID" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 15169b08b9108998d850f6333e458d73580d1b2a..f51946e9ebd5d2c5fd471683a217fe2dbe8f2f61 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -10,7 +10,7 @@ description = "Ping Pallet for Cumulus XCM/UMP testing." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 888193c5c6ea7e02e879428b920cd1ed70c15e54..a880730ddacfdde9fcbda95fefb457af031b3da7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -37,7 +38,6 @@ pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } @@ -73,7 +73,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } @@ -124,7 +124,6 @@ runtime-benchmarks = [ "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -163,7 +162,6 @@ try-runtime = [ "pallet-nfts/try-runtime", "pallet-proxy/try-runtime", "pallet-session/try-runtime", - "pallet-state-trie-migration/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-uniques/try-runtime", @@ -192,6 +190,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -213,7 +212,6 @@ std = [ "pallet-nfts/std", "pallet-proxy/std", "pallet-session/std", - "pallet-state-trie-migration/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", @@ -252,7 +250,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs index 239ccac19ec7778039fb1ee56f4e772b3ddd3711..99e510e22695da8dff3867e84125c68b113f6973 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index f81a107fae0537095ece60c58592ca5e2b7068f7..1fc67ba0c305669c1aed2ef5ecb637c92fddc80b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -93,15 +93,15 @@ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ - Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, - NonFungible, Parent, ParentThen, Response, XCM_VERSION, + Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, + NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; use xcm::{ latest::prelude::{AssetId, BodyId}, - IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -118,10 +118,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 1, }; @@ -670,7 +670,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -700,12 +700,21 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -945,8 +954,6 @@ construct_runtime!( PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, - StateTrieMigration: pallet_state_trie_migration = 70, - // TODO: the pallet instance should be removed once all pools have migrated // to the new account IDs. AssetConversionMigration: pallet_asset_conversion_ops = 200, @@ -972,6 +979,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -982,6 +990,7 @@ pub type Migrations = ( InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, @@ -1286,15 +1295,8 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { @@ -1323,67 +1325,13 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - use xcm::prelude::*; - - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) } - fn dry_run_xcm(origin_location: VersionedLocation, program: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm::prelude::*; - - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let program: Xcm = program.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = program.using_encoded(sp_core::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - program, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) } } @@ -1526,7 +1474,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(xcm::v4::Assets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1557,7 +1505,7 @@ impl_runtime_apis! { ); let transfer_asset: Asset = (asset_location, asset_amount).into(); - let assets: xcm::v4::Assets = vec![fee_asset.clone(), transfer_asset].into(); + let assets: XcmAssets = vec![fee_asset.clone(), transfer_asset].into(); let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully @@ -1625,7 +1573,7 @@ impl_runtime_apis! { fn valid_destination() -> Result { Ok(TokenLocation::get()) } - fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { + fn worst_case_holding(depositable_count: u32) -> XcmAssets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; let holding_fungibles = holding_non_fungibles.saturating_sub(2); // -2 for two `iter::once` bellow @@ -1686,7 +1634,7 @@ impl_runtime_apis! { (0u64, Response::Version(Default::default())) } - fn worst_case_asset_exchange() -> Result<(xcm::v4::Assets, xcm::v4::Assets), BenchmarkError> { + fn worst_case_asset_exchange() -> Result<(XcmAssets, XcmAssets), BenchmarkError> { Err(BenchmarkError::Skip) } @@ -1705,9 +1653,9 @@ impl_runtime_apis! { Ok(TokenLocation::get()) } - fn claimable_asset() -> Result<(Location, Location, xcm::v4::Assets), BenchmarkError> { + fn claimable_asset() -> Result<(Location, Location, XcmAssets), BenchmarkError> { let origin = TokenLocation::get(); - let assets: xcm::v4::Assets = (TokenLocation::get(), 1_000 * UNITS).into(); + let assets: XcmAssets = (TokenLocation::get(), 1_000 * UNITS).into(); let ticket = Location { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } @@ -1785,47 +1733,6 @@ cumulus_pallet_parachain_system::register_validate_block! { BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } -parameter_types! { - // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) - pub const MigrationSignedDepositPerItem: Balance = CENTS; - pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; - pub const MigrationMaxKeyLen: u32 = 512; -} - -impl pallet_state_trie_migration::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type RuntimeHoldReason = RuntimeHoldReason; - type SignedDepositPerItem = MigrationSignedDepositPerItem; - type SignedDepositBase = MigrationSignedDepositBase; - // An origin that can control the whole pallet: should be Root, or a part of your council. - type ControlOrigin = frame_system::EnsureSignedBy; - // specific account for the migration, can trigger the signed migrations. - type SignedFilter = frame_system::EnsureSignedBy; - - // Replace this with weight based on your runtime. - type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; - - type MaxKeyLen = MigrationMaxKeyLen; -} - -frame_support::ord_parameter_types! { - pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); - pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); -} - -#[test] -fn ensure_key_ss58() { - use frame_support::traits::SortedMembers; - use sp_core::crypto::Ss58Codec; - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, MigController::sorted_members()[0]); - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, RootMigController::sorted_members()[0]); -} - #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index bacc9c1b7c29eba47f0ddc1140771407e7d1ab3c..953f6a8b4009a5ab86cc7d1177dfbb993d291c8d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -18,6 +18,7 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -72,7 +73,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } @@ -189,6 +190,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -247,7 +249,10 @@ std = [ "xcm/std", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs index 239ccac19ec7778039fb1ee56f4e772b3ddd3711..cf9664aeb2f3e4ae8cfd74c24c833ba7021a3432 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs @@ -13,10 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index b5c3ed5053c4b77e1ecf0adeb8c99c5beecb81e0..d9249cdfc482777f4fd6f565dc14ab371a2923df 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -85,22 +85,19 @@ pub use sp_runtime::BuildStorage; use assets_common::{foreign_creators::ForeignCreators, matching::FromSiblingParachain}; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::{ - prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, - IntoVersion, -}; +use xcm::prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; // We exclude `Assets` since it's the name of a pallet use xcm::latest::prelude::AssetId; #[cfg(feature = "runtime-benchmarks")] use xcm::latest::prelude::{ - Asset, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, NetworkId, - NonFungible, Parent, ParentThen, Response, XCM_VERSION, + Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, + NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -120,10 +117,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 15, + transaction_version: 16, state_version: 1, }; @@ -669,7 +666,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -697,13 +694,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -965,6 +971,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = @@ -984,6 +991,7 @@ pub type Migrations = ( DeleteUndecodableStorage, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -1320,15 +1328,8 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::WestendLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { @@ -1357,67 +1358,13 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - use xcm::prelude::*; - - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) } - fn dry_run_xcm(origin_location: VersionedLocation, program: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm::prelude::*; - - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let program: Xcm = program.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = program.using_encoded(sp_core::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - program, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) } } @@ -1618,7 +1565,7 @@ impl_runtime_apis! { } fn set_up_complex_asset_transfer( - ) -> Option<(xcm::v4::Assets, u32, Location, Box)> { + ) -> Option<(XcmAssets, u32, Location, Box)> { // Transfer to Relay some local AH asset (local-reserve-transfer) while paying // fees using teleported native token. // (We don't care that Relay doesn't accept incoming unknown AH local asset) @@ -1649,7 +1596,7 @@ impl_runtime_apis! { ); let transfer_asset: Asset = (asset_location, asset_amount).into(); - let assets: xcm::v4::Assets = vec![fee_asset.clone(), transfer_asset].into(); + let assets: XcmAssets = vec![fee_asset.clone(), transfer_asset].into(); let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; // verify transferred successfully @@ -1722,7 +1669,7 @@ impl_runtime_apis! { fn valid_destination() -> Result { Ok(WestendLocation::get()) } - fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { + fn worst_case_holding(depositable_count: u32) -> XcmAssets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; let holding_fungibles = holding_non_fungibles - 2; // -2 for two `iter::once` bellow @@ -1783,7 +1730,7 @@ impl_runtime_apis! { (0u64, Response::Version(Default::default())) } - fn worst_case_asset_exchange() -> Result<(xcm::v4::Assets, xcm::v4::Assets), BenchmarkError> { + fn worst_case_asset_exchange() -> Result<(XcmAssets, XcmAssets), BenchmarkError> { Err(BenchmarkError::Skip) } @@ -1802,9 +1749,9 @@ impl_runtime_apis! { Ok(WestendLocation::get()) } - fn claimable_asset() -> Result<(Location, Location, xcm::v4::Assets), BenchmarkError> { + fn claimable_asset() -> Result<(Location, Location, XcmAssets), BenchmarkError> { let origin = WestendLocation::get(); - let assets: xcm::v4::Assets = (AssetId(WestendLocation::get()), 1_000 * UNITS).into(); + let assets: XcmAssets = (AssetId(WestendLocation::get()), 1_000 * UNITS).into(); let ticket = Location { parents: 0, interior: Here }; Ok((origin, ticket, assets)) } diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 12dfd9da1fffbc6d1be0edc40ab6de3ebcc78e20..4664e0cb9a7f817ed419936dfa9e5d0ac52f599e 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } impl-trait-for-tuples = "0.2.2" diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 883c93c97b4de6774e86ee83b84d246dc1427f7f..af5b4a64680724abcbf0d659ba8a7b2b45cd0e80 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate frame-support = { path = "../../../../../substrate/frame/support", default-features = false } @@ -24,7 +24,7 @@ sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-fea sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 574406ab305f33d4266dca1a3229b92a80ab9e8c..253a21f5d0baba1218de3469766a7eb065cb8f3d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } hex-literal = { version = "0.4.1" } @@ -66,12 +66,11 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ @@ -222,6 +221,7 @@ std = [ "tuplex/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -264,6 +264,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 2a7f46feee6952d88ba67821087ecfa6b4f9bd51..e7868bcbc78d0af4b56f13a5fe844ccc2a368813 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -70,7 +70,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -97,7 +97,11 @@ pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use rococo_runtime_constants::system_parachain::{ASSET_HUB_ID, BRIDGE_HUB_ID}; -use xcm::latest::prelude::*; +use xcm::prelude::*; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -148,8 +152,9 @@ pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, - cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, snowbridge_pallet_system::migration::v0::InitializeOnUpgrade< Runtime, ConstU32, @@ -209,7 +214,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -389,7 +394,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -417,13 +422,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -952,6 +966,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 8c2435599f59780be56dcaa5060addee4c5c1d15..11e1439a1f6df2423421faf85ce6dd75c37e045b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,32 +48,52 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::FreeHeadersRemaining` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. + /// The range of component `p` is `[1, 168]`. /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { + fn submit_finality_proof(p: u32, _v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `335 + p * (60 ±0)` + // Measured: `438 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 310_124_000 picoseconds. - Weight::from_parts(18_294_977, 0) + // Minimum execution time: 300_829_000 picoseconds. + Weight::from_parts(321_573_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 5_665 - .saturating_add(Weight::from_parts(55_380_719, 0).saturating_mul(p.into())) - // Standard Error: 94_494 - .saturating_add(Weight::from_parts(2_765_959, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 25_917 + .saturating_add(Weight::from_parts(48_613_160, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `452` + // Estimated: `51735` + // Minimum execution time: 101_007_000 picoseconds. + Weight::from_parts(129_904_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index a7241cc6d10c45a292c3e0ffc0a8044e9b2fb706..0f16d629fc26051e53d3cabef19290fb4e8cff5e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -62,10 +62,11 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } @@ -185,6 +186,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -219,6 +221,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 4c467010c7c874bafe16697cd741a92bf13eb0c5..e26d490f9ac1115b465d707980afc31cb5dfd402 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -64,7 +64,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -75,13 +75,18 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; pub use sp_runtime::{MultiAddress, Perbill, Permill}; use xcm_config::{XcmOriginToTransactDispatchOrigin, XcmRouter}; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; + use bp_runtime::HeaderId; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -129,6 +134,7 @@ pub type Migrations = ( InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -183,7 +189,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -352,7 +358,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -379,13 +385,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -701,6 +716,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::WestendLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs index e87ed668dfc7acb1a92a7535d92392a272370277..e98be6ba39be74c3532290ea4a7b483640466c10 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_bridge_grandpa.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_bridge_grandpa` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-12-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-itmxxexx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,32 +48,54 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { + /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::PalletOperatingMode` (r:1 w:0) /// Proof: `BridgeRococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:1 w:1) /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::FreeHeadersRemaining` (r:1 w:0) + /// Proof: `BridgeRococoGrandpa::FreeHeadersRemaining` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. + /// The range of component `p` is `[1, 168]`. /// The range of component `v` is `[50, 100]`. fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `268 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 303_549_000 picoseconds. - Weight::from_parts(306_232_000, 0) + // Minimum execution time: 291_721_000 picoseconds. + Weight::from_parts(37_495_589, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 4_641 - .saturating_add(Weight::from_parts(55_196_301, 0).saturating_mul(p.into())) - // Standard Error: 35_813 - .saturating_add(Weight::from_parts(70_584, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 22_170 + .saturating_add(Weight::from_parts(45_403_072, 0).saturating_mul(p.into())) + // Standard Error: 73_977 + .saturating_add(Weight::from_parts(2_130_216, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } + /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:0 w:1) + /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + fn force_set_pallet_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `51735` + // Minimum execution time: 77_426_000 picoseconds. + Weight::from_parts(90_926_000, 0) + .saturating_add(Weight::from_parts(0, 51735)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml index 2f5f783ce48fb19e1087e24e712fc4dc52b607da..aece34613e6a6c4dfe100e84b0317a96ad7ee97c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/common/Cargo.toml @@ -7,7 +7,7 @@ description = "Bridge hub common utilities" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../../../substrate/frame/support", default-features = false } sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 5f2a6e050d83c3db662f8ff4896d32dc8a28fde3..80f0114cc4cadb6cd7871454eead80a0988d5e7e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } impl-trait-for-tuples = "0.2" log = { workspace = true } @@ -29,7 +29,7 @@ pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default- # Cumulus asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 8e7aa6d346420e0e65c169b539720d6264ab7b80..fe4de3114be0d0b92643946f0deafe030a8b88f8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -10,7 +10,7 @@ description = "Westend Collectives Parachain Runtime" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -66,12 +66,13 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -130,6 +131,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", @@ -236,6 +238,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs index 0c9f428c1396bede97a67002d0554d98d62dbc39..ceef6de6b7435e453e9622a3d318669e1a3a9307 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -220,6 +220,7 @@ impl pallet_core_fellowship::Config for Runtime { type ApproveOrigin = PromoteOrigin; type PromoteOrigin = PromoteOrigin; type EvidenceSize = ConstU32<65536>; + type MaxRank = ConstU32<9>; } pub type AmbassadorSalaryInstance = pallet_salary::Instance2; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs index 94765287637b57d47c588d9a4359666d1b54f509..6a4a182079671f297d18bc40de0bc557911735cd 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -210,6 +210,7 @@ impl pallet_core_fellowship::Config for Runtime { EnsureCanPromoteTo, >; type EvidenceSize = ConstU32<65536>; + type MaxRank = ConstU32<9>; } pub type FellowshipSalaryInstance = pallet_salary::Instance1; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 35b505d9da6a786392af8628b09fbaf299d54609..5fce8e5095410b1e07c363c0731fc9b18e9ca076 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -44,8 +44,9 @@ pub mod xcm_config; pub mod fellowship; pub use ambassador::pallet_ambassador_origins; +use ambassador::AmbassadorCoreInstance; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use fellowship::{pallet_fellowship_origins, Fellows}; +use fellowship::{pallet_fellowship_origins, Fellows, FellowshipCoreInstance}; use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -72,7 +73,7 @@ use frame_support::{ fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -102,7 +103,11 @@ use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; use polkadot_runtime_common::{ impls::VersionedLocatableAsset, BlockHashCount, SlowAdjustingFeeUpdate, }; -use xcm::latest::{prelude::*, BodyId}; +use xcm::prelude::*; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; @@ -117,7 +122,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -420,7 +425,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -448,13 +453,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EitherOfDiverse, Fellows>; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } @@ -727,8 +741,13 @@ type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, + // unreleased + pallet_core_fellowship::migration::MigrateV0ToV1, + // unreleased + pallet_core_fellowship::migration::MigrateV0ToV1, ); /// Executive: handles dispatch to the various modules. @@ -921,6 +940,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::WndLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::WndLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index 74c5b5f8115958d0894af621e6c7307e3f67b9b7..e43a69482c79f1d67e30d6c65ea67840a7893816 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -63,11 +63,12 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -140,6 +141,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -169,6 +171,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index df39cd811d1fd2751ea9ed9a4e277b2e536f0965..2d346e66c6c3b3f4a7e72072875646d9095a821a 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -51,7 +51,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::limits::{BlockLength, BlockWeights}; @@ -62,7 +62,12 @@ use parachains_common::{ }; pub use parachains_common::{AuraId, Balance}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; +use xcm::prelude::*; use xcm_config::CollatorSelectionUpdateOrigin; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -106,6 +111,7 @@ pub type Migrations = ( pallet_contracts::Migration, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -136,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 7, @@ -318,7 +324,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -584,6 +590,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 8c33710198605f05ec2f3f71c413e03190a3616c..ef5ded1731d0d9f3ff60e1e8cb71cd1fe18ca81b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -284,7 +284,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { cumulus_primitives_core::ParaId, parachains_common::message_queue::ParaIdToSibling, >; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EitherOfDiverse< EnsureRoot, EnsureXcm>, @@ -294,6 +298,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index ee9f5e87ec876d73d80eb088806f5b36f12079db..dc99fe331f78671b0b43842e0762db8bd96a840b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } @@ -62,10 +62,11 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -138,6 +139,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -167,6 +169,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index 742dd50f6fa1f421d6ce4abf221e05f6902cc2ae..ec3a4f31202fd5f5333a1057bb06ca5dde247619 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -232,5 +232,5 @@ impl pallet_broker::Config for Runtime { type WeightInfo = weights::pallet_broker::WeightInfo; type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index ab925b04eb7c17cd63bdc1a193c3fd9fabe3d2b5..b3eaf3d127a2f6f6acb8b161f1fcbd8bdb479d36 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -41,7 +41,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -72,10 +72,14 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, RocRelayLocation, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -110,7 +114,9 @@ pub type UncheckedExtrinsic = pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5, pallet_broker::migration::MigrateV0ToV1, + pallet_broker::migration::MigrateV1ToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -136,7 +142,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-rococo"), impl_name: create_runtime_str!("coretime-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -301,7 +307,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -341,13 +347,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -645,6 +660,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RocRelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RocRelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs index 89b1c4c86632ff0d19ee4fe1428978303ef92082..5c9175a18d98a0b6c0c791945dcd9f2cd1892cc4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_broker.rs @@ -154,8 +154,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) @@ -337,8 +337,8 @@ impl pallet_broker::WeightInfo for WeightInfo { } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `957` diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 60cc7e2f765477052332b4c422facb8b19e1f5a4..78018537f5d3ce07d75ed8a465d5e17dba157028 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -13,7 +13,7 @@ workspace = true substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } @@ -61,10 +61,11 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -137,6 +138,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -165,6 +167,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 41cbc62fa2115ff3828e6910b750622a91ff0251..a5e219b9897e0710e008ffa6b800624710877bcc 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -245,5 +245,5 @@ impl pallet_broker::Config for Runtime { type WeightInfo = weights::pallet_broker::WeightInfo; type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 61c7b6e495872be4881792a2d416b0bbf91ff068..6c22702ce872fe28e5acc9783c263bedbc135dee 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -41,7 +41,7 @@ use frame_support::{ genesis_builder_helper::{build_state, get_preset}, parameter_types, traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -72,10 +72,14 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::*; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, TokenRelayLocation, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -111,6 +115,7 @@ pub type Migrations = ( pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, + pallet_broker::migration::MigrateV1ToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); @@ -136,7 +141,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("coretime-westend"), impl_name: create_runtime_str!("coretime-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -302,7 +307,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -342,13 +347,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -637,6 +651,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::TokenRelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::TokenRelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs index 13d5fcf3898bcc07fce13c7ee2deeb8c2b9fa76f..7e1c832a90924e39c7bc7d7b24d8163ce5d65589 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_broker.rs @@ -152,8 +152,8 @@ impl pallet_broker::WeightInfo for WeightInfo { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:0 w:1) @@ -335,8 +335,8 @@ impl pallet_broker::WeightInfo for WeightInfo { } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `556` diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index 808bed38732758e546a6a513c4f19cf2b1d50fbc..92a5bbbd1376088909f315371dff6be13ffa69af 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -10,7 +10,7 @@ description = "Glutton parachain runtime." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate @@ -47,7 +47,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 424fa9cb7e726b697877363f1d7692d674940214..4092fb78594d205d8ad9ddf7d8268c4fa4db27db 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -100,7 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -208,8 +208,9 @@ impl pallet_message_queue::Config for Runtime { >; type Size = u32; type QueueChangeHandler = (); - type QueuePausedQuery = (); // No XCMP queue pallet deployed. - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + // No XCMP queue pallet deployed. + type QueuePausedQuery = (); + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 7183be5fc82cc594c6ad2f3738dbfd5b67738a2c..d4e65da3cd6426f7a21a2c96d03d53d8161410fb 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } @@ -59,10 +59,11 @@ rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/cons xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -135,6 +136,7 @@ std = [ "testnet-parachains-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -163,6 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 544b2e78a46950a2abddc61582e727bbed6af93a..c80f6879fb3448b7b20dd81c387d3ce6c3a3ea90 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -32,7 +32,7 @@ use frame_support::{ traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -65,11 +65,15 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::BodyId; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -128,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-rococo"), impl_name: create_runtime_str!("people-rococo"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -281,7 +285,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -308,13 +312,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -612,6 +625,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 576c3b1aa4e3eaafafafd0af353580fef7a3c20c..b040613d19e75a51d3008af0f71aa7953e82be29 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } @@ -59,10 +59,11 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-payment-runtime-api", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -135,6 +136,7 @@ std = [ "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", + "xcm-fee-payment-runtime-api/std", "xcm/std", ] @@ -163,6 +165,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", + "xcm-fee-payment-runtime-api/runtime-benchmarks", ] try-runtime = [ diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 50c818a20226cd9b7cce17c220f23a3a3fb008f5..06c938b8a40c31ac31c2557f480017ba385e157b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -32,7 +32,7 @@ use frame_support::{ traits::{ ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, }, - weights::{ConstantMultiplier, Weight}, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, PalletId, }; use frame_system::{ @@ -65,11 +65,15 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{consensus::*, currency::*, fee::WeightToFee, time::*}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -use xcm::latest::prelude::BodyId; +use xcm::prelude::*; use xcm_config::{ FellowshipLocation, GovernanceLocation, PriceForSiblingParachainDelivery, XcmConfig, XcmOriginToTransactDispatchOrigin, }; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -128,7 +132,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("people-westend"), impl_name: create_runtime_str!("people-westend"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -281,7 +285,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -308,13 +312,22 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type ChannelInfo = ParachainSystem; type VersionWrapper = PolkadotXcm; type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = RootOrFellows; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; } +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -612,6 +625,48 @@ impl_runtime_apis! { } } + impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + impl cumulus_primitives_core::CollectCollationInfo for Runtime { fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { ParachainSystem::collect_collation_info(header) diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index eb702c9f2cdf30844fbf4ea17534566c038c4880..910944f54a5ff3433f11fab1d33aa0e88abc35b2 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate @@ -36,7 +36,7 @@ sp-version = { path = "../../../../../substrate/primitives/version", default-fea # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index f66d04fec1fdd72eb30f183bb713e88d4fee00e7..7a7fad537ac302a9e71889c949a04aacc79df3f3 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate @@ -41,7 +41,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index a3d1629bbe5eb928f3f78998ed721968f140536d..7422b580cc3e08c1df8af5e9dce23d79d0c4e1a8 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -229,7 +229,7 @@ impl pallet_message_queue::Config for Runtime { // These need to be configured to the XCMP pallet - if it is deployed. type QueueChangeHandler = (); type QueuePausedQuery = (); - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index eda88beb7dabb41bd4075ec5ab6bf8ec2f42d3c8..c081bac4babe87413c40869917715d03c0c71a86 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate frame-support = { path = "../../../../substrate/frame/support", default-features = false } @@ -26,7 +26,7 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" } sp-core = { path = "../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 4ebb95f26cf6ab15b5576a62fe56e2fe626dd6d9..3262233053e7e130c230adbc0aef857572756b1d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } @@ -69,7 +69,7 @@ xcm-fee-payment-runtime-api = { path = "../../../../../polkadot/xcm/xcm-fee-paym # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 582154fec6d274e048558fec43893aee7d4f2817..e77416e6cd5b681a83b3d8b0571401ef1543004b 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -64,7 +64,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Dispatchable}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -83,10 +83,10 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, - IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -541,7 +541,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = MessageQueueServiceWeight; @@ -569,7 +569,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = PolkadotXcm; // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); @@ -845,24 +849,25 @@ impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - if !matches!(xcm_version, 3 | 4) { - return Err(XcmPaymentApiError::UnhandledXcmVersion); - } - Ok([VersionedAssetId::V4(xcm_config::RelayLocation::get().into())] - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetLocationId(xcm_config::RelayLocation::get())]; + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(xcm_config::RelayLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == xcm_config::RelayLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_fee_payment_runtime_api", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { @@ -874,26 +879,20 @@ impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_builder::InspectMessageQueues; use xcm_executor::RecordXcm; use xcm::prelude::*; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = call.dispatch(origin.into()); + pallet_xcm::Pallet::::set_record_xcm(false); let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); let forwarded_xcms = xcm_config::XcmRouter::get_messages(); let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + Ok(CallDryRunEffects { + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, @@ -906,7 +905,7 @@ impl_runtime_apis! { let origin_location: Location = origin_location.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Location version conversion failed with error: {:?}", error, ); @@ -914,13 +913,14 @@ impl_runtime_apis! { })?; let program: Xcm = program.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Xcm version conversion failed with error {:?}", error, ); XcmDryRunApiError::VersionedConversionFailed })?; let mut hash = program.using_encoded(sp_core::hashing::blake2_256); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. let result = xcm_executor::XcmExecutor::::prepare_and_execute( origin_location, program, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index df3aaa92c79e78e5dfb9043db69778db051b3053..cf734345a976f027ae31f2f38735b9b7aac59f7d 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate @@ -50,7 +50,7 @@ polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", def # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index f22e900ba9efd66f4abe5561442755e5ce0b5b78..fd4716ab972e8e8f6d3d1a3ca6aca74df7263fa7 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -107,7 +107,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, @@ -317,7 +317,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = (); @@ -542,7 +542,11 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { type VersionWrapper = (); // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = cumulus_pallet_xcmp_queue::weights::SubstrateWeight; diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index f21a5baf973bd7bb9362691333f6fadeec8970bb..def7d95fd5663a1c85d06b4cc6b8de796d021a22 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -17,7 +17,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.28" hex-literal = "0.4.1" log = { workspace = true, default-features = true } @@ -118,7 +118,7 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util [dev-dependencies] assert_cmd = "2.0" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } tempfile = "3.8.0" tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = "0.2" @@ -172,4 +172,3 @@ try-runtime = [ "sp-runtime/try-runtime", ] fast-runtime = ["bridge-hub-rococo-runtime/fast-runtime"] -elastic-scaling-experimental = ["polkadot-service/elastic-scaling-experimental"] diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index bbda334e4c66e0d9f8fcb7434bc4f218d81dc6e5..19047b073b057a06f19e86faf935ecb4fc3c96b5 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -37,11 +37,12 @@ const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; /// Generic extensions for Parachain ChainSpecs. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] -#[serde(deny_unknown_fields)] pub struct Extensions { /// The relay chain of the Parachain. + #[serde(alias = "relayChain", alias = "RelayChain")] pub relay_chain: String, /// The id of the Parachain. + #[serde(alias = "paraId", alias = "ParaId")] pub para_id: u32, } @@ -53,7 +54,7 @@ impl Extensions { } /// Generic chain spec for all polkadot-parachain runtimes -pub type GenericChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type GenericChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { @@ -78,3 +79,22 @@ where pub fn get_collator_keys_from_seed(seed: &str) -> ::Public { get_from_seed::(seed) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn can_decode_extension_camel_and_snake_case() { + let camel_case = r#"{"relayChain":"relay","paraId":1}"#; + let snake_case = r#"{"relay_chain":"relay","para_id":1}"#; + let pascal_case = r#"{"RelayChain":"relay","ParaId":1}"#; + + let camel_case_extension: Extensions = serde_json::from_str(camel_case).unwrap(); + let snake_case_extension: Extensions = serde_json::from_str(snake_case).unwrap(); + let pascal_case_extension: Extensions = serde_json::from_str(pascal_case).unwrap(); + + assert_eq!(camel_case_extension, snake_case_extension); + assert_eq!(snake_case_extension, pascal_case_extension); + } +} diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 041187de488f33c2c0fe65e4464d165a46c0333e..653ea3281f0f769df9a3c88629e139244b187863 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -1017,7 +1017,7 @@ mod tests { cfg_file_path } - pub type DummyChainSpec = sc_service::GenericChainSpec<(), E>; + pub type DummyChainSpec = sc_service::GenericChainSpec; pub fn create_default_with_extensions( id: &str, diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 21c06ef22d9a13bf8361156ea5e1af1216aa3e28..ef96f334d63753c73de669ddcd98b6868a88389b 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,7 +10,7 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sp-api = { path = "../../../substrate/primitives/api", default-features = false } diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 62c3f6751917ad4aaaaec6509c852a5cfd75d7f8..595aa5f72bf2453edea23e372865de95e9e46699 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -10,7 +10,7 @@ description = "Cumulus related core primitive types and traits" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 7f7353685657e7bf6bfb2c05faba32315bbbb706..29216d513465160eb94db76faedb6e7fd992b461 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -64,6 +64,8 @@ pub enum MessageSendError { TooBig, /// Some other error. Other, + /// There are too many channels open at once. + TooManyChannels, } impl From for &'static str { @@ -74,6 +76,7 @@ impl From for &'static str { NoChannel => "NoChannel", TooBig => "TooBig", Other => "Other", + TooManyChannels => "TooManyChannels", } } } @@ -135,6 +138,11 @@ pub trait GetChannelInfo { fn get_channel_info(id: ParaId) -> Option; } +/// List all open outgoing channels. +pub trait ListChannelInfos { + fn outgoing_channels() -> Vec; +} + /// Something that should be called when sending an upward message. pub trait UpwardMessageSender { /// Send the given UMP message; return the expected number of blocks before the message will diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 4da561661b6b312d8d89a9f0f5ae5b7359e79a82..0156eb02e2b4aaa9ee02e4e237f305c20792569d 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 6dbf7904bf796e81415b967637d9770356142603..bdfb83ad72a96930c1dae2d2c054a2c19c5cfcb2 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index 59f327b2642a292db56708f5770ebb35b1b82d1d..7a6f4787ba3121cf0c9c7eec3b9f3794c870037d 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.28" # Substrate diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 1e2c300b9ba257d2c8fb998689ae45847099dd63..85e3ac2f7606c9e12aa4d38f0c44c3fe1818b107 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -10,7 +10,7 @@ description = "Helper datatypes for Cumulus" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } # Substrate diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 254361e85420de982a984f76d76b96a5c7d3b4a4..120983eb9390e9007c13b08c763f13446011f98e 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -9,7 +9,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sc-service = { path = "../../../substrate/client/service" } diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index ff5c4bd66b9742383bcd170f0012695d7f0c72d2..d775c61f7801e98b4c8e8436eb95c1ec86854d77 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -10,7 +10,7 @@ description = "Mocked relay state proof builder for testing Cumulus." workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } # Substrate sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 1969045640ed1e8c95b828fb5f4073875c3e63b0..014313aa89195d36ed4d9e296ab7b5bc07cbed8b 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -9,7 +9,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # Substrate @@ -41,9 +41,7 @@ sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } cumulus-primitives-aura = { path = "../../primitives/aura", default-features = false } pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 18213b2f6326c4d755e07d66cb13ed4424226fc7..732d884528f8974b4d09b33c118f41b74d22f348 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -15,7 +15,7 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.79" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.22", features = ["server"] } rand = "0.8.5" @@ -85,7 +85,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false } cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 4db2513e2b6311675c8d3b565a817d1b7ce95f8b..28faba7377e42e03c28dc72a7bb2e55db643fdc4 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -17,7 +17,7 @@ #![allow(missing_docs)] use cumulus_primitives_core::ParaId; -use cumulus_test_runtime::{AccountId, RuntimeGenesisConfig, Signature}; +use cumulus_test_runtime::{AccountId, Signature}; use parachains_common::AuraId; use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; use sc_service::ChainType; @@ -26,7 +26,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index f2a612803861ce143aa08d6ae7abbf8963c31ef3..6f8b9d19bb29ba7445b2b67fdd9b0ac4a3263553 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -152,7 +152,7 @@ impl RecoveryHandle for FailingRecoveryHandle { message: AvailabilityRecoveryMessage, origin: &'static str, ) { - let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _) = message; + let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _, _) = message; let candidate_hash = receipt.hash(); // For every 3rd block we immediately signal unavailability to trigger @@ -160,7 +160,8 @@ impl RecoveryHandle for FailingRecoveryHandle { if self.counter % 3 == 0 && self.failed_hashes.insert(candidate_hash) { tracing::info!(target: LOG_TARGET, ?candidate_hash, "Failing pov recovery."); - let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, back_sender) = message; + let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, back_sender) = + message; back_sender .send(Err(RecoveryError::Unavailable)) .expect("Return channel should work here."); diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 6b45770a8e3df47cb083dba5a8a0eeed1759e338..0ed77bf5b7073bc9e3041388206203ce7c1829a2 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } paste = "1.0.14" log = { workspace = true } lazy_static = "1.4.0" diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index babb318a99500932dd8a2e42a2b443944751d286..1a3f3930cb3478d9e7811c49becbfdd7874bbe48 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -34,7 +34,9 @@ pub use frame_support::{ }, weights::{Weight, WeightMeter}, }; -pub use frame_system::{Config as SystemConfig, Pallet as SystemPallet}; +pub use frame_system::{ + pallet_prelude::BlockNumberFor, Config as SystemConfig, Pallet as SystemPallet, +}; pub use pallet_balances::AccountData; pub use pallet_message_queue; pub use sp_arithmetic::traits::Bounded; @@ -54,7 +56,7 @@ pub use cumulus_primitives_core::{ pub use cumulus_primitives_parachain_inherent::ParachainInherentData; pub use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; pub use pallet_message_queue::{Config as MessageQueueConfig, Pallet as MessageQueuePallet}; -pub use parachains_common::{AccountId, Balance, BlockNumber}; +pub use parachains_common::{AccountId, Balance}; pub use polkadot_primitives; pub use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; @@ -213,6 +215,7 @@ pub trait Chain: TestExt { type RuntimeOrigin; type RuntimeEvent; type System; + type OriginCaller; fn account_id_of(seed: &str) -> AccountId { helpers::get_account_id_from_seed::(seed) @@ -364,6 +367,7 @@ macro_rules! decl_test_relay_chains { type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type OriginCaller = $runtime::OriginCaller; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { ::ext_wrapper(|| $crate::SystemPallet::::account(account).data.into()) @@ -598,6 +602,7 @@ macro_rules! decl_test_parachains { type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type OriginCaller = $runtime::OriginCaller; type Network = N; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { @@ -657,7 +662,7 @@ macro_rules! decl_test_parachains { .clone() ); ::System::initialize(&block_number, &parent_head_data.hash(), &Default::default()); - <::ParachainSystem as Hooks<$crate::BlockNumber>>::on_initialize(block_number); + <::ParachainSystem as Hooks<$crate::BlockNumberFor>>::on_initialize(block_number); let _ = ::ParachainSystem::set_validation_data( ::RuntimeOrigin::none(), diff --git a/docker/dockerfiles/binary_injected.Dockerfile b/docker/dockerfiles/binary_injected.Dockerfile index c8930bd83f0274990aff281e84d75b7add9d1289..26c0ef7ae6414a472c04b17294d964572c14213a 100644 --- a/docker/dockerfiles/binary_injected.Dockerfile +++ b/docker/dockerfiles/binary_injected.Dockerfile @@ -32,7 +32,7 @@ LABEL io.parity.image.authors=${AUTHORS} \ USER root WORKDIR /app -# add polkadot binary to docker image +# add binary to docker image # sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/ COPY entrypoint.sh . COPY "bin/*" "/usr/local/bin/" diff --git a/docker/scripts/chain-spec-builder/build-injected.sh b/docker/scripts/chain-spec-builder/build-injected.sh new file mode 100755 index 0000000000000000000000000000000000000000..ede6cee3851382829d9f1793749a1358cf780edd --- /dev/null +++ b/docker/scripts/chain-spec-builder/build-injected.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Sample call: +# $0 /path/to/folder_with_binary +# This script replace the former dedicated Dockerfile +# and shows how to use the generic binary_injected.dockerfile + +PROJECT_ROOT=`git rev-parse --show-toplevel` + +export BINARY=chain-spec-builder +export ARTIFACTS_FOLDER=$1 +# export TAGS=... + +$PROJECT_ROOT/docker/scripts/build-injected.sh diff --git a/docker/scripts/chain-spec-builder/test-build.sh b/docker/scripts/chain-spec-builder/test-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..a42cab97703481c03698c417b9e995618db8c0c1 --- /dev/null +++ b/docker/scripts/chain-spec-builder/test-build.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +TMP=$(mktemp -d) +ENGINE=${ENGINE:-podman} + +export TAGS=latest,beta,7777,1.0.2-rc23 + +# Fetch some binaries +$ENGINE run --user root --rm -i \ + --pull always \ + -v "$TMP:/export" \ + --entrypoint /bin/bash \ + parity/chain-spec-builder -c \ + 'cp "$(which chain-spec-builder)" /export' + +echo "Checking binaries we got:" +ls -al $TMP + +./build-injected.sh $TMP diff --git a/docs/RELEASE.md b/docs/RELEASE.md index e73be2779a99426203e209da846f938c0f73cceb..653e6a2a3e929e052cdd839884ed30c5b5327201 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -45,7 +45,7 @@ variable. ## Westend & Rococo -For the these networks, in addition to incrementing the `Cargo.toml` version we also increment the `spec_version` and +For these networks, in addition to incrementing the `Cargo.toml` version we also increment the `spec_version` and sometimes the `transaction_version`. The spec version is also following the node version. Its schema is: `M_mmm_ppp` and for example `1_002_000` is the node release `1.2.0`. This versioning has no further meaning, and is only done to map from an on chain `spec_version` easily to the release in this repository. diff --git a/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png b/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png new file mode 100644 index 0000000000000000000000000000000000000000..8909dc96a62a93f2bd8b9204e3f1a6885dffae52 Binary files /dev/null and b/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png differ diff --git a/docs/images/Polkadot_Logo_Horizontal_Pink_White.png b/docs/images/Polkadot_Logo_Horizontal_Pink_White.png new file mode 100644 index 0000000000000000000000000000000000000000..ea814ea28d5c813613c087f0a2fe0e3e1f8f4b52 Binary files /dev/null and b/docs/images/Polkadot_Logo_Horizontal_Pink_White.png differ diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 4eb50bcf96a8932de3fa90748fcfeb3ca7f02a5f..fe9a96bcafc0053c18332ca5901a93142811977f 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,5 +1,6 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] + polkadot[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs @@ -10,5 +11,3 @@ flowchart polkadot_sdk --> cumulus polkadot_sdk --> polkadot polkadot_sdk --> xcm - - diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index fe53845d8490ba0386c21100703c594987b67898..a8c873be556c2fd06ff6a3d3c096b3a8301f16d8 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -15,7 +15,7 @@ workspace = true [dependencies] # Needed for all FRAME-based code -parity-scale-codec = { version = "3.0.0", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame", features = [ "experimental", @@ -30,6 +30,7 @@ simple-mermaid = "0.1.1" docify = "0.2.8" # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. +polkadot-sdk = { path = "../../umbrella", features = ["runtime"] } node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } @@ -38,6 +39,7 @@ frame-system = { path = "../../substrate/frame/system", default-features = false frame-support = { path = "../../substrate/frame/support", default-features = false } frame-executive = { path = "../../substrate/frame/executive", default-features = false } pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } +frame-metadata-hash-extension = { path = "../../substrate/frame/metadata-hash-extension" } # Substrate Client sc-network = { path = "../../substrate/client/network" } @@ -58,9 +60,7 @@ substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } # Cumulus cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } -cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system", features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system" } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } cumulus-primitives-proof-size-hostfunction = { path = "../../cumulus/primitives/proof-size-hostfunction" } cumulus-client-service = { path = "../../cumulus/client/service" } @@ -101,3 +101,4 @@ sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } +xcm-docs = { path = "../../polkadot/xcm/docs" } diff --git a/docs/sdk/src/guides/enable_metadata_hash.rs b/docs/sdk/src/guides/enable_metadata_hash.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9cbae8533538d685f39d1cde979758dcc59955b --- /dev/null +++ b/docs/sdk/src/guides/enable_metadata_hash.rs @@ -0,0 +1,88 @@ +//! # Enable metadata hash verification +//! +//! This guide will teach you how to enable the metadata hash verification in your runtime. +//! +//! ## What is metadata hash verification? +//! +//! Each FRAME based runtime exposes metadata about itself. This metadata is used by consumers of +//! the runtime to interpret the state, to construct transactions etc. Part of this metadata are the +//! type information. These type information can be used to e.g. decode storage entries or to decode +//! a transaction. So, the metadata is quite useful for wallets to interact with a FRAME based +//! chain. Online wallets can fetch the metadata directly from any node of the chain they are +//! connected to, but offline wallets can not do this. So, for the offline wallet to have access to +//! the metadata it needs to be transferred and stored on the device. The problem is that the +//! metadata has a size of several hundreds of kilobytes, which takes quite a while to transfer to +//! these offline wallets and the internal storage of these devices is also not big enough to store +//! the metadata for one or more networks. The next problem is that the offline wallet/user can not +//! trust the metadata to be correct. It is very important for the metadata to be correct or +//! otherwise an attacker could change them in a way that the offline wallet decodes a transaction +//! in a different way than what it will be decoded to on chain. So, the user may signs an incorrect +//! transaction leading to unexpecting behavior. +//! +//! The metadata hash verification circumvents the issues of the huge metadata and the need to trust +//! some metadata blob to be correct. To generate a hash for the metadata, the metadata is chunked, +//! these chunks are put into a merkle tree and then the root of this merkle tree is the "metadata +//! hash". For a more technical explanation on how it works, see +//! [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). At compile +//! time the metadata hash is generated and "backed" into the runtime. This makes it extremely cheap +//! for the runtime to verify on chain that the metadata hash is correct. By having the runtime +//! verify the hash on chain, the user also doesn't need to trust the offchain metadata. If the +//! metadata hash doesn't match the on chain metadata hash the transaction will be rejected. The +//! metadata hash itself is added to the data of the transaction that is signed, this means the +//! actual hash does not appear in the transaction. On chain the same procedure is repeated with the +//! metadata hash that is known by the runtime and if the metadata hash doesn't match the signature +//! verification will fail. As the metadata hash is actually the root of a merkle tree, the offline +//! wallet can get proofs of individual types to decode a transaction. This means that the offline +//! wallet does not require the entire metadata to be present on the device. +//! +//! ## Integrating metadata hash verification into your runtime +//! +//! The integration of the metadata hash verification is split into two parts, first the actual +//! integration into the runtime and secondly the enabling of the metadata hash generation at +//! compile time. +//! +//! ### Runtime integration +//! +//! From the runtime side only the +//! [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash) needs to be added to the +//! list of signed extension: +#![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", template_signed_extra)] +//! +//! > **Note:** +//! > +//! > Adding the signed extension changes the encoding of the transaction and adds one extra byte +//! > per transaction! +//! +//! This signed extension will make sure to decode the requested `mode` and will add the metadata +//! hash to the signed data depending on the requested `mode`. The `mode` gives the user/wallet +//! control over deciding if the metadata hash should be verified or not. The metadata hash itself +//! is drawn from the `RUNTIME_METADATA_HASH` environment variable. If the environment variable is +//! not set, any transaction that requires the metadata hash is rejected with the error +//! `CannotLookup`. This is a security measurement to prevent including invalid transactions. +//! +//!

+//! +//! The extension does not work with the native runtime, because the +//! `RUNTIME_METADATA_HASH` environment variable is not set when building the +//! `frame-metadata-hash-extension` crate. +//! +//!
+//! +//! ### Enable metadata hash generation +//! +//! The metadata hash generation needs to be enabled when building the wasm binary. The +//! `substrate-wasm-builder` supports this out of the box: +#![doc = docify::embed!("../../templates/parachain/runtime/build.rs", template_enable_metadata_hash)] +//! +//! > **Note:** +//! > +//! > The `metadata-hash` feature needs to be enabled for the `substrate-wasm-builder` to enable the +//! > code for being able to generate the metadata hash. It is also recommended to put the metadata +//! > hash generation behind a feature in the runtime as shown above. The reason behind is that it +//! > adds a lot of code which increases the compile time and the generation itself also increases +//! > the compile time. Thus, it is recommended to enable the feature only when the metadata hash is +//! > required (e.g. for an on-chain build). +//! +//! The two parameters to `enable_metadata_hash` are the token symbol and the number of decimals of +//! the primary token of the chain. These information are included for the wallets to show token +//! related operations in a more user friendly way. diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index 2dc807af8eaed45d78109bd1b22528d94e24bf06..f5f6d2b5e0c0768f1cff2f759769598c4703c601 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -26,3 +26,6 @@ pub mod xcm_enabled_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; + +/// How to enable metadata hash verification in the runtime. +pub mod enable_metadata_hash; diff --git a/docs/sdk/src/polkadot_sdk/polkadot.rs b/docs/sdk/src/polkadot_sdk/polkadot.rs index 61a6877696cb9298ed831562923d96d4a701bcc4..e2dcca4dc7dfcd2726d24b9f7725a67bad50246b 100644 --- a/docs/sdk/src/polkadot_sdk/polkadot.rs +++ b/docs/sdk/src/polkadot_sdk/polkadot.rs @@ -6,14 +6,16 @@ //! //! - [Polkadot Forum](https://forum.polkadot.network/) //! - [Polkadot Parachains](https://parachains.info/) -//! - [Polkadot (multi-chain) Explorer](https://subscan.io/) +//! - [Polkadot (multi-chain) Explorer: Subscan](https://subscan.io/) //! - Polkadot Fellowship //! - [Manifesto](https://github.com/polkadot-fellows/manifesto) //! - [Runtimes](https://github.com/polkadot-fellows/runtimes) //! - [RFCs](https://github.com/polkadot-fellows/rfcs) +//! - [Dashboard](https://polkadot-fellows.github.io/dashboard/) //! - [Polkadot Specs](spec.polkadot.network) //! - [The Polkadot Parachain Host Implementers' Guide](https://paritytech.github.io/polkadot-sdk/book/) //! - [Whitepaper](https://www.polkadot.network/whitepaper/) +//! - [JAM Graypaper](https://graypaper.com) //! //! ## Alternative Node Implementations 🌈 //! diff --git a/docs/sdk/src/polkadot_sdk/xcm.rs b/docs/sdk/src/polkadot_sdk/xcm.rs index 5dcdc9e1de076c4507cb64517360df73e6733dc7..58f54068642444e2010d3623d6a49d00f050ebf8 100644 --- a/docs/sdk/src/polkadot_sdk/xcm.rs +++ b/docs/sdk/src/polkadot_sdk/xcm.rs @@ -50,7 +50,7 @@ //! //! ## Get started //! -//! To learn how it works and to get started, go to the [XCM docs](https://paritytech.github.io/xcm-docs/). +//! To learn how it works and to get started, go to the [XCM docs](xcm_docs). #[cfg(test)] mod tests { diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs index 32cda5bc5345d7eee43efa9e2b99ac831c8fb3a8..1eed9857a1d5951245b4c4f6bef08f35d0c3f03a 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_types.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -102,6 +102,10 @@ //! bounds, such as being [`frame::traits::IsSubType`]: #![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] //! +//! > Once Rust's "_Associated Type Bounds RFC_" is usable, this syntax can be used to +//! > simplify the above scenario. See [this](https://github.com/paritytech/polkadot-sdk/issues/3743) +//! > issue for more information. +//! //! ### Asserting Equality of Multiple Runtime Composite Enums //! //! Recall that in the above example, `::RuntimeCall` and ` Note: You can see a live example in the `staging-node-cli` and `kitchensink-runtime` crates. +//! +//! The umbrella crate can be added to your runtime crate like this: +//! +//! `polkadot-sdk = { path = "../../../../umbrella", features = ["runtime"], default-features = +//! false }` +//! +//! or for a node: +//! +//! `polkadot-sdk = { path = "../../../../umbrella", features = ["node"], default-features = false +//! }` +//! +//! In the code, it is then possible to bring all dependencies into scope via: +//! +//! `use polkadot_sdk::*;` +//! +//! ### Known Issues +//! +//! The only known issue so far is the fact that the `use` statement brings the dependencies only +//! into the outer module scope - not the global crate scope. For example, the following code would +//! need to be adjusted: +//! +//! ```rust +//! use polkadot_sdk::*; +//! +//! mod foo { +//! // This does sadly not compile: +//! frame_support::parameter_types! { } +//! +//! // Instead, we need to do this (or add an equivalent `use` statement): +//! polkadot_sdk::frame_support::parameter_types! { } +//! } +//! ``` +//! +//! Apart from this, no issues are known. There could be some bugs with how macros locate their own +//! re-exports. Please compile issues that arise from using this crate. +//! +//! ## Dependencies +//! +//! The umbrella crate re-exports all published crates, with a few exceptions: +//! - Runtime crates like `rococo-runtime` etc are not exported. This otherwise leads to very weird +//! compile errors and should not be needed anyway. +//! - Example and fuzzing crates are not exported. This is currently detected by checking the name +//! of the crate for these magic words. In the future, it will utilize custom metadata, as it is +//! done in the `rococo-runtime` crate. +//! - The umbrella crate itself. Should be obvious :) diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 7b5679e1084efbfa6a0f419debcaa217f8c65be2..3aeec8d5961e35133233c35c38f57c2145c7f62c 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -43,7 +43,7 @@ tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_suppo [dev-dependencies] assert_cmd = "2.0.4" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } tempfile = "3.2.0" tokio = "1.37" substrate-rpc-client = { path = "../substrate/utils/frame/rpc/client" } diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index 8dfa0b87328b1af0bc4cbc1bcd677919acaaf892..9794f8286ac324b3f0fde27cb13db9d8d1cb8941 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -14,7 +14,7 @@ sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index db5967e20f5e4fdbcd54c42a92f0bcf1d1ef3bab..bf152e03be711b6ec3e1afc3d049cf5e1f727401 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -13,12 +13,13 @@ workspace = true polkadot-primitives = { path = "../primitives" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } thiserror = { workspace = true } [dev-dependencies] +quickcheck = { version = "1.0.3", default-features = false } criterion = { version = "0.5.1", default-features = false, features = ["cargo_bench_support"] } [[bench]] diff --git a/polkadot/erasure-coding/benches/README.md b/polkadot/erasure-coding/benches/README.md index 94fca5400c610636831c08b5282d6d47f4199878..20f79827d280b23782368c4f56f2a85f5d4eed41 100644 --- a/polkadot/erasure-coding/benches/README.md +++ b/polkadot/erasure-coding/benches/README.md @@ -7,7 +7,8 @@ cargo bench ## `scaling_with_validators` This benchmark evaluates the performance of constructing the chunks and the erasure root from PoV and -reconstructing the PoV from chunks. You can see the results of running this bench on 5950x below. +reconstructing the PoV from chunks (either from systematic chunks or regular chunks). +You can see the results of running this bench on 5950x below (only including recovery from regular chunks). Interestingly, with `10_000` chunks (validators) its slower than with `50_000` for both construction and reconstruction. ``` @@ -37,3 +38,6 @@ reconstruct/10000 time: [496.35 ms 505.17 ms 515.42 ms] reconstruct/50000 time: [276.56 ms 277.53 ms 278.58 ms] thrpt: [17.948 MiB/s 18.016 MiB/s 18.079 MiB/s] ``` + +Results from running on an Apple M2 Pro, systematic recovery is generally 40 times faster than +regular recovery, achieving 1 Gib/s. diff --git a/polkadot/erasure-coding/benches/scaling_with_validators.rs b/polkadot/erasure-coding/benches/scaling_with_validators.rs index 759385bbdef4ed85e5db087d2d93c7cb51250db8..3d743faa4169b48f651e99e2e005237d727841d7 100644 --- a/polkadot/erasure-coding/benches/scaling_with_validators.rs +++ b/polkadot/erasure-coding/benches/scaling_with_validators.rs @@ -53,12 +53,16 @@ fn construct_and_reconstruct_5mb_pov(c: &mut Criterion) { } group.finish(); - let mut group = c.benchmark_group("reconstruct"); + let mut group = c.benchmark_group("reconstruct_regular"); for n_validators in N_VALIDATORS { let all_chunks = chunks(n_validators, &pov); - let mut c: Vec<_> = all_chunks.iter().enumerate().map(|(i, c)| (&c[..], i)).collect(); - let last_chunks = c.split_off((c.len() - 1) * 2 / 3); + let chunks: Vec<_> = all_chunks + .iter() + .enumerate() + .take(polkadot_erasure_coding::recovery_threshold(n_validators).unwrap()) + .map(|(i, c)| (&c[..], i)) + .collect(); group.throughput(Throughput::Bytes(pov.len() as u64)); group.bench_with_input( @@ -67,7 +71,31 @@ fn construct_and_reconstruct_5mb_pov(c: &mut Criterion) { |b, &n| { b.iter(|| { let _pov: Vec = - polkadot_erasure_coding::reconstruct(n, last_chunks.clone()).unwrap(); + polkadot_erasure_coding::reconstruct(n, chunks.clone()).unwrap(); + }); + }, + ); + } + group.finish(); + + let mut group = c.benchmark_group("reconstruct_systematic"); + for n_validators in N_VALIDATORS { + let all_chunks = chunks(n_validators, &pov); + + let chunks = all_chunks + .into_iter() + .take(polkadot_erasure_coding::systematic_recovery_threshold(n_validators).unwrap()) + .collect::>(); + + group.throughput(Throughput::Bytes(pov.len() as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(n_validators), + &n_validators, + |b, &n| { + b.iter(|| { + let _pov: Vec = + polkadot_erasure_coding::reconstruct_from_systematic(n, chunks.clone()) + .unwrap(); }); }, ); diff --git a/polkadot/erasure-coding/src/lib.rs b/polkadot/erasure-coding/src/lib.rs index e5155df4beba95aa9e7944e3e9a67d4e55a1db9c..b354c3dac64ceaff4668f1edf66465214d58b54a 100644 --- a/polkadot/erasure-coding/src/lib.rs +++ b/polkadot/erasure-coding/src/lib.rs @@ -69,6 +69,9 @@ pub enum Error { /// Bad payload in reconstructed bytes. #[error("Reconstructed payload invalid")] BadPayload, + /// Unable to decode reconstructed bytes. + #[error("Unable to decode reconstructed payload: {0}")] + Decode(#[source] parity_scale_codec::Error), /// Invalid branch proof. #[error("Invalid branch proof")] InvalidBranchProof, @@ -110,6 +113,14 @@ pub const fn recovery_threshold(n_validators: usize) -> Result { Ok(needed + 1) } +/// Obtain the threshold of systematic chunks that should be enough to recover the data. +/// +/// If the regular `recovery_threshold` is a power of two, then it returns the same value. +/// Otherwise, it returns the next lower power of two. +pub fn systematic_recovery_threshold(n_validators: usize) -> Result { + code_params(n_validators).map(|params| params.k()) +} + fn code_params(n_validators: usize) -> Result { // we need to be able to reconstruct from 1/3 - eps @@ -127,6 +138,41 @@ fn code_params(n_validators: usize) -> Result { }) } +/// Reconstruct the v1 available data from the set of systematic chunks. +/// +/// Provide a vector containing chunk data. If too few chunks are provided, recovery is not +/// possible. +pub fn reconstruct_from_systematic_v1( + n_validators: usize, + chunks: Vec>, +) -> Result { + reconstruct_from_systematic(n_validators, chunks) +} + +/// Reconstruct the available data from the set of systematic chunks. +/// +/// Provide a vector containing the first k chunks in order. If too few chunks are provided, +/// recovery is not possible. +pub fn reconstruct_from_systematic( + n_validators: usize, + chunks: Vec>, +) -> Result { + let code_params = code_params(n_validators)?; + let k = code_params.k(); + + for chunk_data in chunks.iter().take(k) { + if chunk_data.len() % 2 != 0 { + return Err(Error::UnevenLength) + } + } + + let bytes = code_params.make_encoder().reconstruct_from_systematic( + chunks.into_iter().take(k).map(|data| WrappedShard::new(data)).collect(), + )?; + + Decode::decode(&mut &bytes[..]).map_err(|err| Error::Decode(err)) +} + /// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator. /// /// Works only up to 65536 validators, and `n_validators` must be non-zero. @@ -285,13 +331,41 @@ pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result

Self { + // Limit the POV len to 1 mib, otherwise the test will take forever + let pov_len = (u32::arbitrary(g) % (1024 * 1024)).max(2); + + let pov = (0..pov_len).map(|_| u8::arbitrary(g)).collect(); + + let pvd = PersistedValidationData { + parent_head: HeadData((0..u16::arbitrary(g)).map(|_| u8::arbitrary(g)).collect()), + relay_parent_number: u32::arbitrary(g), + relay_parent_storage_root: [u8::arbitrary(g); 32].into(), + max_pov_size: u32::arbitrary(g), + }; + + ArbitraryAvailableData(AvailableData { + pov: Arc::new(PoV { block_data: BlockData(pov) }), + validation_data: pvd, + }) + } + } + #[test] fn field_order_is_right_size() { assert_eq!(MAX_VALIDATORS, 65536); @@ -318,6 +392,25 @@ mod tests { assert_eq!(reconstructed, available_data); } + #[test] + fn round_trip_systematic_works() { + fn property(available_data: ArbitraryAvailableData, n_validators: u16) { + let n_validators = n_validators.max(2); + let kpow2 = systematic_recovery_threshold(n_validators as usize).unwrap(); + let chunks = obtain_chunks(n_validators as usize, &available_data.0).unwrap(); + assert_eq!( + reconstruct_from_systematic_v1( + n_validators as usize, + chunks.into_iter().take(kpow2).collect() + ) + .unwrap(), + available_data.0 + ); + } + + QuickCheck::new().quickcheck(property as fn(ArbitraryAvailableData, u16)) + } + #[test] fn reconstruct_does_not_panic_on_low_validator_count() { let reconstructed = reconstruct_v1(1, [].iter().cloned()); diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index ebc53a9e01bbe5abc6ba18e500ad3bafcf35ead3..0a28c3a830d101a08a49a3c9eda3dc0f60c9665d 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -20,7 +20,7 @@ polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 5139d6c6a3f5caa789a5eaed714408d06a3b2b31..5bf80d59ede93b9fb3cab3ffdf409f895d24dbe4 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] futures = "0.3.30" futures-timer = "3.0.2" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } gum = { package = "tracing-gum", path = "../../gum" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } schnellru = "0.2.1" diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 9a5f0d29dbd3180676a01eba0b382dbb26ef11ea..687063dd0eb302c4b61b224ee74bb9c4a56fbe5b 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -61,7 +61,7 @@ fn main() -> Result<(), String> { print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); std::io::stdout().flush().unwrap(); let (mut env, state) = prepare_test(config.clone(), options.clone(), false); - env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state)) + env.runtime().block_on(bench_approvals(&mut env, state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); @@ -81,8 +81,8 @@ fn main() -> Result<(), String> { ("Sent to peers", 63547.0330, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 7.0317, 0.1), - ("approval-voting", 9.5751, 0.1), + ("approval-distribution", 7.4075, 0.1), + ("approval-voting", 9.9873, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index f4be42a4845080ce90fb87ddc838e2c1ab792ed1..13b0b1bae1bc3281b89b6405ce166ab0cecbd512 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -607,7 +607,7 @@ pub(crate) mod tests { use super::*; use crate::{ approval_db::common::{load_block_entry, DbBackend}, - RuntimeInfo, RuntimeInfoConfig, + RuntimeInfo, RuntimeInfoConfig, MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, }; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; @@ -622,6 +622,7 @@ pub(crate) mod tests { node_features::FeatureIndex, ExecutorParams, Id as ParaId, IndexedVec, NodeFeatures, SessionInfo, ValidatorId, ValidatorIndex, }; + use schnellru::{ByLength, LruMap}; pub(crate) use sp_consensus_babe::{ digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest}, AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, @@ -658,6 +659,9 @@ pub(crate) mod tests { clock: Box::new(MockClock::default()), assignment_criteria: Box::new(MockAssignmentCriteria::default()), spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), } } diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index b5ed92fa39c873c0a1e5f40c52705a5803971b60..eece6b15805c233c8a7e674bed79fe49a682de96 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -63,6 +63,12 @@ use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; use sp_consensus::SyncOracle; use sp_consensus_slots::Slot; +use std::time::Instant; + +// The max number of blocks we keep track of assignments gathering times. Normally, +// this would never be reached because we prune the data on finalization, but we need +// to also ensure the data is not growing unecessarily large. +const MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS: u32 = 100; use futures::{ channel::oneshot, @@ -182,6 +188,14 @@ struct MetricsInner { time_recover_and_approve: prometheus::Histogram, candidate_signatures_requests_total: prometheus::Counter, unapproved_candidates_in_unfinalized_chain: prometheus::Gauge, + // The time it takes in each stage to gather enough assignments. + // We defined a `stage` as being the entire process of gathering enough assignments to + // be able to approve a candidate: + // E.g: + // - Stage 0: We wait for the needed_approvals assignments to be gathered. + // - Stage 1: We wait for enough tranches to cover all no-shows in stage 0. + // - Stage 2: We wait for enough tranches to cover all no-shows of stage 1. + assignments_gathering_time_by_stage: prometheus::HistogramVec, } /// Approval Voting metrics. @@ -302,6 +316,20 @@ impl Metrics { metrics.unapproved_candidates_in_unfinalized_chain.set(count as u64); } } + + pub fn observe_assignment_gathering_time(&self, stage: usize, elapsed_as_millis: usize) { + if let Some(metrics) = &self.0 { + let stage_string = stage.to_string(); + // We don't want to have too many metrics entries with this label to not put unncessary + // pressure on the metrics infrastructure, so we cap the stage at 10, which is + // equivalent to having already a finalization lag to 10 * no_show_slots, so it should + // be more than enough. + metrics + .assignments_gathering_time_by_stage + .with_label_values(&[if stage < 10 { stage_string.as_str() } else { "inf" }]) + .observe(elapsed_as_millis as f64); + } + } } impl metrics::Metrics for Metrics { @@ -431,6 +459,17 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + assignments_gathering_time_by_stage: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_parachain_assignments_gather_time_by_stage_ms", + "The time in ms it takes for each stage to gather enough assignments needed for approval", + ) + .buckets(vec![0.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0, 32000.0]), + &["stage"], + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -788,6 +827,28 @@ struct State { clock: Box, assignment_criteria: Box, spans: HashMap, + // Per block, candidate records about how long we take until we gather enough + // assignments, this is relevant because it gives us a good idea about how many + // tranches we trigger and why. + per_block_assignments_gathering_times: + LruMap>, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct AssignmentGatheringRecord { + // The stage we are in. + // Candidate assignment gathering goes in stages, first we wait for needed_approvals(stage 0) + // Then if we have no-shows, we move into stage 1 and wait for enough tranches to cover all + // no-shows. + stage: usize, + // The time we started the stage. + stage_start: Option, +} + +impl Default for AssignmentGatheringRecord { + fn default() -> Self { + AssignmentGatheringRecord { stage: 0, stage_start: Some(Instant::now()) } + } } #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] @@ -893,6 +954,96 @@ impl State { }, } } + + fn mark_begining_of_gathering_assignments( + &mut self, + block_number: BlockNumber, + block_hash: Hash, + candidate: CandidateHash, + ) { + if let Some(record) = self + .per_block_assignments_gathering_times + .get_or_insert(block_number, HashMap::new) + .and_then(|records| Some(records.entry((block_hash, candidate)).or_default())) + { + if record.stage_start.is_none() { + record.stage += 1; + gum::debug!( + target: LOG_TARGET, + stage = ?record.stage, + ?block_hash, + ?candidate, + "Started a new assignment gathering stage", + ); + record.stage_start = Some(Instant::now()); + } + } + } + + fn mark_gathered_enough_assignments( + &mut self, + block_number: BlockNumber, + block_hash: Hash, + candidate: CandidateHash, + ) -> AssignmentGatheringRecord { + let record = self + .per_block_assignments_gathering_times + .get(&block_number) + .and_then(|entry| entry.get_mut(&(block_hash, candidate))); + let stage = record.as_ref().map(|record| record.stage).unwrap_or_default(); + AssignmentGatheringRecord { + stage, + stage_start: record.and_then(|record| record.stage_start.take()), + } + } + + fn cleanup_assignments_gathering_timestamp(&mut self, remove_lower_than: BlockNumber) { + while let Some((block_number, _)) = self.per_block_assignments_gathering_times.peek_oldest() + { + if *block_number < remove_lower_than { + self.per_block_assignments_gathering_times.pop_oldest(); + } else { + break + } + } + } + + fn observe_assignment_gathering_status( + &mut self, + metrics: &Metrics, + required_tranches: &RequiredTranches, + block_hash: Hash, + block_number: BlockNumber, + candidate_hash: CandidateHash, + ) { + match required_tranches { + RequiredTranches::All | RequiredTranches::Pending { .. } => { + self.mark_begining_of_gathering_assignments( + block_number, + block_hash, + candidate_hash, + ); + }, + RequiredTranches::Exact { .. } => { + let time_to_gather = + self.mark_gathered_enough_assignments(block_number, block_hash, candidate_hash); + if let Some(gathering_started) = time_to_gather.stage_start { + if gathering_started.elapsed().as_millis() > 6000 { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Long assignment gathering time", + ); + } + metrics.observe_assignment_gathering_time( + time_to_gather.stage, + gathering_started.elapsed().as_millis() as usize, + ) + } + }, + } + } } #[derive(Debug, Clone)] @@ -914,6 +1065,7 @@ enum Action { candidate: CandidateReceipt, backing_group: GroupIndex, distribute_assignment: bool, + core_index: Option, }, NoteApprovedInChainSelection(Hash), IssueApproval(CandidateHash, ApprovalVoteRequest), @@ -941,6 +1093,9 @@ where clock: subsystem.clock, assignment_criteria, spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), }; // `None` on start-up. Gets initialized/updated on leaf update @@ -972,7 +1127,7 @@ where subsystem.metrics.on_wakeup(); process_wakeup( &mut ctx, - &state, + &mut state, &mut overlayed_db, &mut session_info_provider, woken_block, @@ -1174,6 +1329,7 @@ async fn handle_actions( candidate, backing_group, distribute_assignment, + core_index, } => { // Don't launch approval work if the node is syncing. if let Mode::Syncing(_) = *mode { @@ -1230,6 +1386,7 @@ async fn handle_actions( block_hash, backing_group, executor_params, + core_index, &launch_approval_span, ) .await @@ -1467,6 +1624,7 @@ async fn distribution_messages_for_activation( candidate: candidate_entry.candidate_receipt().clone(), backing_group: approval_entry.backing_group(), distribute_assignment: false, + core_index: Some(*core_index), }); } }, @@ -1628,6 +1786,7 @@ async fn handle_from_overseer( // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans // accordingly. wakeups.prune_finalized_wakeups(block_number, &mut state.spans); + state.cleanup_assignments_gathering_timestamp(block_number); // // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans // accordingly. let hash_set = @@ -2474,7 +2633,7 @@ where async fn check_and_import_approval( sender: &mut Sender, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, @@ -2706,7 +2865,7 @@ impl ApprovalStateTransition { // as necessary and schedules any further wakeups. async fn advance_approval_state( sender: &mut Sender, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, @@ -2757,6 +2916,13 @@ where approval_entry, status.required_tranches.clone(), ); + state.observe_assignment_gathering_status( + &metrics, + &status.required_tranches, + block_hash, + block_entry.block_number(), + candidate_hash, + ); // Check whether this is approved, while allowing a maximum // assignment tick of `now - APPROVAL_DELAY` - that is, that @@ -2937,7 +3103,7 @@ fn should_trigger_assignment( #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn process_wakeup( ctx: &mut Context, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, relay_block: Hash, @@ -3050,6 +3216,11 @@ async fn process_wakeup( "Launching approval work.", ); + let candidate_core_index = block_entry + .candidates() + .iter() + .find_map(|(core_index, h)| (h == &candidate_hash).then_some(*core_index)); + if let Some(claimed_core_indices) = get_assignment_core_indices(&indirect_cert.cert.kind, &candidate_hash, &block_entry) { @@ -3062,7 +3233,6 @@ async fn process_wakeup( true }; db.write_block_entry(block_entry.clone()); - actions.push(Action::LaunchApproval { claimed_candidate_indices, candidate_hash, @@ -3074,10 +3244,12 @@ async fn process_wakeup( candidate: candidate_receipt, backing_group, distribute_assignment, + core_index: candidate_core_index, }); }, Err(err) => { - // Never happens, it should only happen if no cores are claimed, which is a bug. + // Never happens, it should only happen if no cores are claimed, which is a + // bug. gum::warn!( target: LOG_TARGET, block_hash = ?relay_block, @@ -3133,6 +3305,7 @@ async fn launch_approval( block_hash: Hash, backing_group: GroupIndex, executor_params: ExecutorParams, + core_index: Option, span: &jaeger::Span, ) -> SubsystemResult> { let (a_tx, a_rx) = oneshot::channel(); @@ -3179,6 +3352,7 @@ async fn launch_approval( candidate.clone(), session_index, Some(backing_group), + core_index, a_tx, )) .await; diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 6eeb99cb99ffa017492dba93e72556eab9104c8e..59a4618100515a179a3cdc8b72aee41024ff0bbb 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -559,7 +559,7 @@ impl BlockEntry { self.distributed_assignments.resize(new_len, false); self.distributed_assignments |= bitfield; - // If the an operation did not change our current bitfied, we return true. + // If the an operation did not change our current bitfield, we return true. let distributed = total_one_bits == self.distributed_assignments.count_ones(); distributed diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 312d805bbefb7b2b2605093fa2cadde0a1f10511..43af8d476a6ba7b786be89d8d4e425d9ff44b106 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -17,6 +17,10 @@ use self::test_helpers::mock::new_leaf; use super::*; use crate::backend::V1ReadBackend; +use overseer::prometheus::{ + prometheus::{IntCounter, IntCounterVec}, + Histogram, HistogramOpts, HistogramVec, Opts, +}; use polkadot_node_primitives::{ approval::{ v1::{ @@ -40,7 +44,7 @@ use polkadot_primitives::{ ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, Id as ParaId, IndexedVec, NodeFeatures, ValidationCode, ValidatorSignature, }; -use std::time::Duration; +use std::{cmp::max, time::Duration}; use assert_matches::assert_matches; use async_trait::async_trait; @@ -3330,7 +3334,7 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Ok(available_data)).unwrap(); }, @@ -5049,3 +5053,233 @@ fn subsystem_sends_pending_approvals_on_approval_restart() { virtual_overseer }); } + +// Test we correctly update the timer when we mark the beginning of gathering assignments. +#[test] +fn test_gathering_assignments_statements() { + let mut state = State { + keystore: Arc::new(LocalKeystore::in_memory()), + slot_duration_millis: 6_000, + clock: Box::new(MockClock::default()), + assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), + spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), + }; + + for i in 0..200i32 { + state.mark_begining_of_gathering_assignments( + i as u32, + Hash::repeat_byte(i as u8), + CandidateHash(Hash::repeat_byte(i as u8)), + ); + assert!( + state.per_block_assignments_gathering_times.len() <= + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as usize + ); + + assert_eq!( + state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| block_number) + .min(), + Some(max(0, i - MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as i32 + 1) as u32).as_ref() + ) + } + assert_eq!( + state.per_block_assignments_gathering_times.len(), + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS as usize + ); + + let nothing_changes = state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| *block_number) + .sorted() + .collect::>(); + + for i in 150..200i32 { + state.mark_begining_of_gathering_assignments( + i as u32, + Hash::repeat_byte(i as u8), + CandidateHash(Hash::repeat_byte(i as u8)), + ); + assert_eq!( + nothing_changes, + state + .per_block_assignments_gathering_times + .iter() + .map(|(block_number, _)| *block_number) + .sorted() + .collect::>() + ); + } + + for i in 110..120 { + let block_hash = Hash::repeat_byte(i as u8); + let candidate_hash = CandidateHash(Hash::repeat_byte(i as u8)); + + state.mark_gathered_enough_assignments(i as u32, block_hash, candidate_hash); + + assert!(state + .per_block_assignments_gathering_times + .get(&i) + .unwrap() + .get(&(block_hash, candidate_hash)) + .unwrap() + .stage_start + .is_none()); + state.mark_begining_of_gathering_assignments(i as u32, block_hash, candidate_hash); + let record = state + .per_block_assignments_gathering_times + .get(&i) + .unwrap() + .get(&(block_hash, candidate_hash)) + .unwrap(); + + assert!(record.stage_start.is_some()); + assert_eq!(record.stage, 1); + } + + state.cleanup_assignments_gathering_timestamp(200); + assert_eq!(state.per_block_assignments_gathering_times.len(), 0); +} + +// Test we note the time we took to transition RequiredTranche from Pending to Exact and +// that we increase the stage when we transition from Exact to Pending. +#[test] +fn test_observe_assignment_gathering_status() { + let mut state = State { + keystore: Arc::new(LocalKeystore::in_memory()), + slot_duration_millis: 6_000, + clock: Box::new(MockClock::default()), + assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|_| Ok(0))), + spans: HashMap::new(), + per_block_assignments_gathering_times: LruMap::new(ByLength::new( + MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS, + )), + }; + + let metrics_inner = MetricsInner { + imported_candidates_total: IntCounter::new("dummy", "dummy").unwrap(), + assignments_produced: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")).unwrap(), + approvals_produced_total: IntCounterVec::new(Opts::new("dummy", "dummy"), &["dummy"]) + .unwrap(), + no_shows_total: IntCounter::new("dummy", "dummy").unwrap(), + observed_no_shows: IntCounter::new("dummy", "dummy").unwrap(), + approved_by_one_third: IntCounter::new("dummy", "dummy").unwrap(), + wakeups_triggered_total: IntCounter::new("dummy", "dummy").unwrap(), + coalesced_approvals_buckets: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + coalesced_approvals_delay: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + candidate_approval_time_ticks: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + block_approval_time_ticks: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + time_db_transaction: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")).unwrap(), + time_recover_and_approve: Histogram::with_opts(HistogramOpts::new("dummy", "dummy")) + .unwrap(), + candidate_signatures_requests_total: IntCounter::new("dummy", "dummy").unwrap(), + unapproved_candidates_in_unfinalized_chain: prometheus::Gauge::::new( + "dummy", "dummy", + ) + .unwrap(), + assignments_gathering_time_by_stage: HistogramVec::new( + HistogramOpts::new("test", "test"), + &["stage"], + ) + .unwrap(), + }; + + let metrics = Metrics(Some(metrics_inner)); + let block_hash = Hash::repeat_byte(1); + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let block_number = 1; + + // Transition from Pending to Exact and check stage 0 time is recorded. + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Pending { + considered: 0, + next_no_show: None, + maximum_broadcast: 0, + clock_drift: 0, + }, + block_hash, + block_number, + candidate_hash, + ); + + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Exact { + needed: 2, + tolerated_missing: 2, + next_no_show: None, + last_assignment_tick: None, + }, + block_hash, + block_number, + candidate_hash, + ); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["0"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); + + // Transition from Exact to Pending to Exact and check stage 1 time is recorded. + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Pending { + considered: 0, + next_no_show: None, + maximum_broadcast: 0, + clock_drift: 0, + }, + block_hash, + block_number, + candidate_hash, + ); + + state.observe_assignment_gathering_status( + &metrics, + &RequiredTranches::Exact { + needed: 2, + tolerated_missing: 2, + next_no_show: None, + last_assignment_tick: None, + }, + block_hash, + block_number, + candidate_hash, + ); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["0"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); + + let value = metrics + .0 + .as_ref() + .unwrap() + .assignments_gathering_time_by_stage + .get_metric_with_label_values(&["1"]) + .unwrap(); + + assert_eq!(value.get_sample_count(), 1); +} diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index bc9b979228a1db8c4adfc01bd2d60470721bfcac..c5b3c382011b12ac80e130500d7c64b481989331 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -17,7 +17,7 @@ thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 68db4686a9740bb052f7d05031d148cd5b05e0a2..59a35a6a45a91446793099b72ff36d9a2a573f52 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -48,8 +48,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util as util; use polkadot_primitives::{ - BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash, Header, ValidatorIndex, + BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, Hash, + Header, NodeFeatures, ValidatorIndex, }; +use util::availability_chunks::availability_chunk_indices; mod metrics; pub use self::metrics::*; @@ -208,9 +210,9 @@ fn load_chunk( db: &Arc, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, ) -> Result, Error> { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); query_inner(db, config.col_data, &key) } @@ -219,10 +221,10 @@ fn write_chunk( tx: &mut DBTransaction, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, erasure_chunk: &ErasureChunk, ) { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); tx.put_vec(config.col_data, &key, erasure_chunk.encode()); } @@ -231,9 +233,9 @@ fn delete_chunk( tx: &mut DBTransaction, config: &Config, candidate_hash: &CandidateHash, - chunk_index: ValidatorIndex, + validator_index: ValidatorIndex, ) { - let key = (CHUNK_PREFIX, candidate_hash, chunk_index).encode(); + let key = (CHUNK_PREFIX, candidate_hash, validator_index).encode(); tx.delete(config.col_data, &key[..]); } @@ -1139,20 +1141,23 @@ fn process_message( Some(meta) => { let mut chunks = Vec::new(); - for (index, _) in meta.chunks_stored.iter().enumerate().filter(|(_, b)| **b) { + for (validator_index, _) in + meta.chunks_stored.iter().enumerate().filter(|(_, b)| **b) + { + let validator_index = ValidatorIndex(validator_index as _); let _timer = subsystem.metrics.time_get_chunk(); match load_chunk( &subsystem.db, &subsystem.config, &candidate, - ValidatorIndex(index as _), + validator_index, )? { - Some(c) => chunks.push(c), + Some(c) => chunks.push((validator_index, c)), None => { gum::warn!( target: LOG_TARGET, ?candidate, - index, + ?validator_index, "No chunk found for set bit in meta" ); }, @@ -1169,11 +1174,17 @@ fn process_message( }); let _ = tx.send(a); }, - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, tx } => { + AvailabilityStoreMessage::StoreChunk { candidate_hash, validator_index, chunk, tx } => { subsystem.metrics.on_chunks_received(1); let _timer = subsystem.metrics.time_store_chunk(); - match store_chunk(&subsystem.db, &subsystem.config, candidate_hash, chunk) { + match store_chunk( + &subsystem.db, + &subsystem.config, + candidate_hash, + validator_index, + chunk, + ) { Ok(true) => { let _ = tx.send(Ok(())); }, @@ -1191,6 +1202,8 @@ fn process_message( n_validators, available_data, expected_erasure_root, + core_index, + node_features, tx, } => { subsystem.metrics.on_chunks_received(n_validators as _); @@ -1203,6 +1216,8 @@ fn process_message( n_validators as _, available_data, expected_erasure_root, + core_index, + node_features, ); match res { @@ -1233,6 +1248,7 @@ fn store_chunk( db: &Arc, config: &Config, candidate_hash: CandidateHash, + validator_index: ValidatorIndex, chunk: ErasureChunk, ) -> Result { let mut tx = DBTransaction::new(); @@ -1242,12 +1258,12 @@ fn store_chunk( None => return Ok(false), // we weren't informed of this candidate by import events. }; - match meta.chunks_stored.get(chunk.index.0 as usize).map(|b| *b) { + match meta.chunks_stored.get(validator_index.0 as usize).map(|b| *b) { Some(true) => return Ok(true), // already stored. Some(false) => { - meta.chunks_stored.set(chunk.index.0 as usize, true); + meta.chunks_stored.set(validator_index.0 as usize, true); - write_chunk(&mut tx, config, &candidate_hash, chunk.index, &chunk); + write_chunk(&mut tx, config, &candidate_hash, validator_index, &chunk); write_meta(&mut tx, config, &candidate_hash, &meta); }, None => return Ok(false), // out of bounds. @@ -1257,6 +1273,7 @@ fn store_chunk( target: LOG_TARGET, ?candidate_hash, chunk_index = %chunk.index.0, + validator_index = %validator_index.0, "Stored chunk index for candidate.", ); @@ -1264,13 +1281,14 @@ fn store_chunk( Ok(true) } -// Ok(true) on success, Ok(false) on failure, and Err on internal error. fn store_available_data( subsystem: &AvailabilityStoreSubsystem, candidate_hash: CandidateHash, n_validators: usize, available_data: AvailableData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { let mut tx = DBTransaction::new(); @@ -1312,16 +1330,26 @@ fn store_available_data( drop(erasure_span); - let erasure_chunks = chunks.iter().zip(branches.map(|(proof, _)| proof)).enumerate().map( - |(index, (chunk, proof))| ErasureChunk { + let erasure_chunks: Vec<_> = chunks + .iter() + .zip(branches.map(|(proof, _)| proof)) + .enumerate() + .map(|(index, (chunk, proof))| ErasureChunk { chunk: chunk.clone(), proof, - index: ValidatorIndex(index as u32), - }, - ); + index: ChunkIndex(index as u32), + }) + .collect(); - for chunk in erasure_chunks { - write_chunk(&mut tx, &subsystem.config, &candidate_hash, chunk.index, &chunk); + let chunk_indices = availability_chunk_indices(Some(&node_features), n_validators, core_index)?; + for (validator_index, chunk_index) in chunk_indices.into_iter().enumerate() { + write_chunk( + &mut tx, + &subsystem.config, + &candidate_hash, + ValidatorIndex(validator_index as u32), + &erasure_chunks[chunk_index.0 as usize], + ); } meta.data_available = true; diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index 652bf2a3fda4822fdae0020bcd48993ed8e771ed..e87f7cc3b8d6cb48c0a12e5f48d810db7cae5024 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -18,6 +18,7 @@ use super::*; use assert_matches::assert_matches; use futures::{channel::oneshot, executor, future, Future}; +use util::availability_chunks::availability_chunk_index; use self::test_helpers::mock::new_leaf; use ::test_helpers::TestCandidateBuilder; @@ -31,7 +32,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{database::Database, TimeoutExt}; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, + node_features, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, HeadData, Header, PersistedValidationData, ValidatorId, }; use sp_keyring::Sr25519Keyring; @@ -272,8 +273,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() { // but that's fine, we're still alive let (tx, rx) = oneshot::channel(); let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); - let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx); + let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, 5.into(), tx); overseer_send(&mut virtual_overseer, query_chunk.into()).await; @@ -288,12 +288,13 @@ fn store_chunk_works() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); + let validator_index = ValidatorIndex(2); let n_validators = 10; let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -314,8 +315,12 @@ fn store_chunk_works() { let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + validator_index, + chunk: chunk.clone(), + tx, + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Ok(())); @@ -336,18 +341,23 @@ fn store_chunk_does_nothing_if_no_entry_already() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); + let validator_index = ValidatorIndex(2); let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + validator_index, + chunk: chunk.clone(), + tx, + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Err(())); @@ -418,6 +428,8 @@ fn store_available_data_erasure_mismatch() { let candidate_hash = CandidateHash(Hash::repeat_byte(1)); let validator_index = ValidatorIndex(5); let n_validators = 10; + let core_index = CoreIndex(8); + let node_features = NodeFeatures::EMPTY; let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; @@ -431,6 +443,8 @@ fn store_available_data_erasure_mismatch() { candidate_hash, n_validators, available_data: available_data.clone(), + core_index, + node_features, tx, // A dummy erasure root should lead to failure. expected_erasure_root: Hash::default(), @@ -450,97 +464,183 @@ fn store_available_data_erasure_mismatch() { } #[test] -fn store_block_works() { - let store = test_store(); - let test_state = TestState::default(); - test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { - let candidate_hash = CandidateHash(Hash::repeat_byte(1)); - let validator_index = ValidatorIndex(5); - let n_validators = 10; - - let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; - - let available_data = AvailableData { - pov: Arc::new(pov), - validation_data: test_state.persisted_validation_data.clone(), - }; - let (tx, rx) = oneshot::channel(); - - let chunks = erasure::obtain_chunks_v1(10, &available_data).unwrap(); - let mut branches = erasure::branches(chunks.as_ref()); - - let block_msg = AvailabilityStoreMessage::StoreAvailableData { - candidate_hash, - n_validators, - available_data: available_data.clone(), - tx, - expected_erasure_root: branches.root(), - }; - - virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; - assert_eq!(rx.await.unwrap(), Ok(())); - - let pov = query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); - assert_eq!(pov, available_data); - - let chunk = query_chunk(&mut virtual_overseer, candidate_hash, validator_index) - .await - .unwrap(); - - let branch = branches.nth(5).unwrap(); - let expected_chunk = ErasureChunk { - chunk: branch.1.to_vec(), - index: ValidatorIndex(5), - proof: Proof::try_from(branch.0).unwrap(), - }; - - assert_eq!(chunk, expected_chunk); - virtual_overseer - }); -} - -#[test] -fn store_pov_and_query_chunk_works() { - let store = test_store(); - let test_state = TestState::default(); - - test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { - let candidate_hash = CandidateHash(Hash::repeat_byte(1)); - let n_validators = 10; - - let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; - - let available_data = AvailableData { - pov: Arc::new(pov), - validation_data: test_state.persisted_validation_data.clone(), - }; - - let chunks_expected = - erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); - let branches = erasure::branches(chunks_expected.as_ref()); - - let (tx, rx) = oneshot::channel(); - let block_msg = AvailabilityStoreMessage::StoreAvailableData { - candidate_hash, - n_validators, - available_data, - tx, - expected_erasure_root: branches.root(), - }; - - virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; +fn store_pov_and_queries_work() { + // If the AvailabilityChunkMapping feature is not enabled, + // ValidatorIndex->ChunkIndex mapping should be 1:1 for all core indices. + { + let n_cores = 4; + for core_index in 0..n_cores { + let store = test_store(); + let test_state = TestState::default(); + let core_index = CoreIndex(core_index); + + test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { + let node_features = NodeFeatures::EMPTY; + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let n_validators = 10; + + let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; + let available_data = AvailableData { + pov: Arc::new(pov), + validation_data: test_state.persisted_validation_data.clone(), + }; + + let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + + let branches = erasure::branches(chunks.as_ref()); + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData { + candidate_hash, + n_validators, + available_data: available_data.clone(), + tx, + core_index, + expected_erasure_root: branches.root(), + node_features: node_features.clone(), + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let pov: AvailableData = + query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); + assert_eq!(pov, available_data); + + let query_all_chunks_res = query_all_chunks( + &mut virtual_overseer, + availability_chunk_indices( + Some(&node_features), + n_validators as usize, + core_index, + ) + .unwrap(), + candidate_hash, + ) + .await; + assert_eq!(query_all_chunks_res.len(), chunks.len()); + + let branches: Vec<_> = branches.collect(); + + for validator_index in 0..n_validators { + let chunk = query_chunk( + &mut virtual_overseer, + candidate_hash, + ValidatorIndex(validator_index as _), + ) + .await + .unwrap(); + let branch = &branches[validator_index as usize]; + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: validator_index.into(), + proof: Proof::try_from(branch.0.clone()).unwrap(), + }; + assert_eq!(chunk, expected_chunk); + assert_eq!(chunk, query_all_chunks_res[validator_index as usize]); + } - assert_eq!(rx.await.unwrap(), Ok(())); + virtual_overseer + }); + } + } - for i in 0..n_validators { - let chunk = query_chunk(&mut virtual_overseer, candidate_hash, ValidatorIndex(i as _)) - .await - .unwrap(); + // If the AvailabilityChunkMapping feature is enabled, let's also test the + // ValidatorIndex -> ChunkIndex mapping. + { + let n_cores = 4; + for core_index in 0..n_cores { + let store = test_store(); + let test_state = TestState::default(); + + test_harness(test_state.clone(), store.clone(), |mut virtual_overseer| async move { + let mut node_features = NodeFeatures::EMPTY; + let feature_bit = node_features::FeatureIndex::AvailabilityChunkMapping; + node_features.resize((feature_bit as u8 + 1) as usize, false); + node_features.set(feature_bit as u8 as usize, true); + + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); + let n_validators = 10; + + let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; + let available_data = AvailableData { + pov: Arc::new(pov), + validation_data: test_state.persisted_validation_data.clone(), + }; + + let chunks = erasure::obtain_chunks_v1(n_validators as _, &available_data).unwrap(); + + let branches = erasure::branches(chunks.as_ref()); + let core_index = CoreIndex(core_index); + + let (tx, rx) = oneshot::channel(); + let block_msg = AvailabilityStoreMessage::StoreAvailableData { + candidate_hash, + n_validators, + available_data: available_data.clone(), + tx, + core_index, + expected_erasure_root: branches.root(), + node_features: node_features.clone(), + }; + + virtual_overseer.send(FromOrchestra::Communication { msg: block_msg }).await; + assert_eq!(rx.await.unwrap(), Ok(())); + + let pov: AvailableData = + query_available_data(&mut virtual_overseer, candidate_hash).await.unwrap(); + assert_eq!(pov, available_data); + + let query_all_chunks_res = query_all_chunks( + &mut virtual_overseer, + availability_chunk_indices( + Some(&node_features), + n_validators as usize, + core_index, + ) + .unwrap(), + candidate_hash, + ) + .await; + assert_eq!(query_all_chunks_res.len(), chunks.len()); + + let branches: Vec<_> = branches.collect(); + + for validator_index in 0..n_validators { + let chunk = query_chunk( + &mut virtual_overseer, + candidate_hash, + ValidatorIndex(validator_index as _), + ) + .await + .unwrap(); + let expected_chunk_index = availability_chunk_index( + Some(&node_features), + n_validators as usize, + core_index, + ValidatorIndex(validator_index), + ) + .unwrap(); + let branch = &branches[expected_chunk_index.0 as usize]; + let expected_chunk = ErasureChunk { + chunk: branch.1.to_vec(), + index: expected_chunk_index, + proof: Proof::try_from(branch.0.clone()).unwrap(), + }; + assert_eq!(chunk, expected_chunk); + assert_eq!( + &chunk, + query_all_chunks_res + .iter() + .find(|c| c.index == expected_chunk_index) + .unwrap() + ); + } - assert_eq!(chunk.chunk, chunks_expected[i as usize]); + virtual_overseer + }); } - virtual_overseer - }); + } } #[test] @@ -575,6 +675,8 @@ fn query_all_chunks_works() { n_validators, available_data, tx, + core_index: CoreIndex(1), + node_features: NodeFeatures::EMPTY, expected_erasure_root: branches.root(), }; @@ -598,7 +700,7 @@ fn query_all_chunks_works() { let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: ValidatorIndex(1), + index: ChunkIndex(1), proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -606,6 +708,7 @@ fn query_all_chunks_works() { let store_chunk_msg = AvailabilityStoreMessage::StoreChunk { candidate_hash: candidate_hash_2, chunk, + validator_index: ValidatorIndex(1), tx, }; @@ -615,29 +718,29 @@ fn query_all_chunks_works() { assert_eq!(rx.await.unwrap(), Ok(())); } - { - let (tx, rx) = oneshot::channel(); + let chunk_indices = + availability_chunk_indices(None, n_validators as usize, CoreIndex(0)).unwrap(); - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_1, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), n_validators as usize); - } - - { - let (tx, rx) = oneshot::channel(); - - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_2, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), 1); - } + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_1) + .await + .len(), + n_validators as usize + ); - { - let (tx, rx) = oneshot::channel(); + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_2) + .await + .len(), + 1 + ); + assert_eq!( + query_all_chunks(&mut virtual_overseer, chunk_indices.clone(), candidate_hash_3) + .await + .len(), + 0 + ); - let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash_3, tx); - virtual_overseer.send(FromOrchestra::Communication { msg }).await; - assert_eq!(rx.await.unwrap().len(), 0); - } virtual_overseer }); } @@ -667,6 +770,8 @@ fn stored_but_not_included_data_is_pruned() { n_validators, available_data: available_data.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -723,6 +828,8 @@ fn stored_data_kept_until_finalized() { n_validators, available_data: available_data.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -998,6 +1105,8 @@ fn forkfullness_works() { n_validators, available_data: available_data_1.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -1014,6 +1123,8 @@ fn forkfullness_works() { n_validators, available_data: available_data_2.clone(), tx, + node_features: NodeFeatures::EMPTY, + core_index: CoreIndex(1), expected_erasure_root: branches.root(), }; @@ -1126,6 +1237,25 @@ async fn query_chunk( rx.await.unwrap() } +async fn query_all_chunks( + virtual_overseer: &mut VirtualOverseer, + chunk_mapping: Vec, + candidate_hash: CandidateHash, +) -> Vec { + let (tx, rx) = oneshot::channel(); + + let msg = AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx); + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + + let resp = rx.await.unwrap(); + resp.into_iter() + .map(|(val_idx, chunk)| { + assert_eq!(chunk.index, chunk_mapping[val_idx.0 as usize]); + chunk + }) + .collect() +} + async fn has_all_chunks( virtual_overseer: &mut VirtualOverseer, candidate_hash: CandidateHash, @@ -1206,12 +1336,12 @@ fn query_chunk_size_works() { test_harness(TestState::default(), store.clone(), |mut virtual_overseer| async move { let candidate_hash = CandidateHash(Hash::repeat_byte(33)); - let validator_index = ValidatorIndex(5); + let chunk_index = ChunkIndex(5); let n_validators = 10; let chunk = ErasureChunk { chunk: vec![1, 2, 3], - index: validator_index, + index: chunk_index, proof: Proof::try_from(vec![vec![3, 4, 5]]).unwrap(), }; @@ -1232,8 +1362,12 @@ fn query_chunk_size_works() { let (tx, rx) = oneshot::channel(); - let chunk_msg = - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx }; + let chunk_msg = AvailabilityStoreMessage::StoreChunk { + candidate_hash, + chunk: chunk.clone(), + tx, + validator_index: chunk_index.into(), + }; overseer_send(&mut virtual_overseer, chunk_msg).await; assert_eq!(rx.await.unwrap(), Ok(())); diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 26fa54470fbda6c1e5a22999a34022173b003fe6..f426f73284e8c36d7bc11eef051c8dddbbf40518 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -21,7 +21,7 @@ statement-table = { package = "polkadot-statement-table", path = "../../../state bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 23acb0450944e8eedea437da545830174ed84449..2fa8ad29efe5fbddafeb4f212161ed8f7ee2ac9c 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -30,7 +30,7 @@ //! assigned group of validators may be backed on-chain and proceed to the availability //! stage. //! -//! Depth is a concept relating to asynchronous backing, by which validators +//! Depth is a concept relating to asynchronous backing, by which //! short sub-chains of candidates are backed and extended off-chain, and then placed //! asynchronously into blocks of the relay chain as those are authored and as the //! relay-chain state becomes ready for them. Asynchronous backing allows parachains to @@ -66,7 +66,7 @@ #![deny(unused_crate_dependencies)] use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, sync::Arc, }; @@ -88,7 +88,7 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, - HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, + HypotheticalCandidate, HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, @@ -210,6 +210,8 @@ struct PerRelayParentState { prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, + /// Session index. + session_index: SessionIndex, /// The `ParaId` assigned to the local validator at this relay parent. assigned_para: Option, /// The `CoreIndex` assigned to the local validator at this relay parent. @@ -242,20 +244,44 @@ struct PerRelayParentState { struct PerCandidateState { persisted_validation_data: PersistedValidationData, seconded_locally: bool, - para_id: ParaId, relay_parent: Hash, } -struct ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode, - /// The candidates seconded at various depths under this active - /// leaf with respect to parachain id. A candidate can only be - /// seconded when its hypothetical frontier under every active leaf - /// has an empty entry in this map. - /// - /// When prospective parachains are disabled, the only depth - /// which is allowed is 0. - seconded_at_depth: HashMap>, +enum ActiveLeafState { + // If prospective-parachains is disabled, one validator may only back one candidate per + // paraid. + ProspectiveParachainsDisabled { seconded: HashSet }, + ProspectiveParachainsEnabled { max_candidate_depth: usize, allowed_ancestry_len: usize }, +} + +impl ActiveLeafState { + fn new(mode: ProspectiveParachainsMode) -> Self { + match mode { + ProspectiveParachainsMode::Disabled => + Self::ProspectiveParachainsDisabled { seconded: HashSet::new() }, + ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } => + Self::ProspectiveParachainsEnabled { max_candidate_depth, allowed_ancestry_len }, + } + } + + fn add_seconded_candidate(&mut self, para_id: ParaId) { + if let Self::ProspectiveParachainsDisabled { seconded } = self { + seconded.insert(para_id); + } + } +} + +impl From<&ActiveLeafState> for ProspectiveParachainsMode { + fn from(state: &ActiveLeafState) -> Self { + match *state { + ActiveLeafState::ProspectiveParachainsDisabled { .. } => + ProspectiveParachainsMode::Disabled, + ActiveLeafState::ProspectiveParachainsEnabled { + max_candidate_depth, + allowed_ancestry_len, + } => ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len }, + } + } } /// The state of the subsystem. @@ -277,11 +303,11 @@ struct State { /// parachains. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. + /// never included in the fragment chains of active leaves which do. /// /// While it would be technically possible to support such leaves in - /// fragment trees, it only benefits the transition period when asynchronous - /// backing is being enabled and complicates code complexity. + /// fragment chains, it only benefits the transition period when asynchronous + /// backing is being enabled and complicates code. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. /// @@ -510,6 +536,8 @@ async fn store_available_data( candidate_hash: CandidateHash, available_data: AvailableData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { let (tx, rx) = oneshot::channel(); // Important: the `av-store` subsystem will check if the erasure root of the `available_data` @@ -522,6 +550,8 @@ async fn store_available_data( n_validators, available_data, expected_erasure_root, + core_index, + node_features, tx, }) .await; @@ -545,6 +575,8 @@ async fn make_pov_available( candidate_hash: CandidateHash, validation_data: PersistedValidationData, expected_erasure_root: Hash, + core_index: CoreIndex, + node_features: NodeFeatures, ) -> Result<(), Error> { store_available_data( sender, @@ -552,6 +584,8 @@ async fn make_pov_available( candidate_hash, AvailableData { pov, validation_data }, expected_erasure_root, + core_index, + node_features, ) .await } @@ -622,6 +656,7 @@ struct BackgroundValidationParams { tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, + session_index: SessionIndex, persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, @@ -633,12 +668,14 @@ async fn validate_and_make_available( impl overseer::CandidateBackingSenderTrait, impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync, >, + core_index: CoreIndex, ) -> Result<(), Error> { let BackgroundValidationParams { mut sender, mut tx_command, candidate, relay_parent, + session_index, persisted_validation_data, pov, n_validators, @@ -668,6 +705,10 @@ async fn validate_and_make_available( Err(e) => return Err(Error::UtilError(e)), }; + let node_features = request_node_features(relay_parent, session_index, &mut sender) + .await? + .unwrap_or(NodeFeatures::EMPTY); + let pov = match pov { PoVData::Ready(pov) => pov, PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => @@ -723,6 +764,8 @@ async fn validate_and_make_available( candidate.hash(), validation_data.clone(), candidate.descriptor.erasure_root, + core_index, + node_features, ) .await; @@ -864,17 +907,9 @@ async fn handle_active_leaves_update( return Ok(()) } - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Disabled, - // This is empty because the only allowed relay-parent and depth - // when prospective parachains are disabled is the leaf hash and 0, - // respectively. We've just learned about the leaf hash, so we cannot - // have any candidates seconded with it as a relay-parent yet. - seconded_at_depth: HashMap::new(), - }, - ); + state + .per_leaf + .insert(leaf.hash, ActiveLeafState::new(ProspectiveParachainsMode::Disabled)); (vec![leaf.hash], ProspectiveParachainsMode::Disabled) }, @@ -882,63 +917,9 @@ async fn handle_active_leaves_update( let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); - // At this point, all candidates outside of the implicit view - // have been cleaned up. For all which remain, which we've seconded, - // we ask the prospective parachains subsystem where they land in the fragment - // tree for the given active leaf. This comprises our `seconded_at_depth`. - - let remaining_seconded = state - .per_candidate - .iter() - .filter(|(_, cd)| cd.seconded_locally) - .map(|(c_hash, cd)| (*c_hash, cd.para_id)); - - // one-to-one correspondence to remaining_seconded - let mut membership_answers = FuturesOrdered::new(); - - for (candidate_hash, para_id) in remaining_seconded { - let (tx, rx) = oneshot::channel(); - membership_answers - .push_back(rx.map_ok(move |membership| (para_id, candidate_hash, membership))); - - ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( - para_id, - candidate_hash, - tx, - )) - .await; - } - - let mut seconded_at_depth = HashMap::new(); - while let Some(response) = membership_answers.next().await { - match response { - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Prospective parachains subsystem unreachable for membership request", - ); - }, - Ok((para_id, candidate_hash, membership)) => { - // This request gives membership in all fragment trees. We have some - // wasted data here, and it can be optimized if it proves - // relevant to performance. - if let Some((_, depths)) = - membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) - { - let para_entry: &mut BTreeMap = - seconded_at_depth.entry(para_id).or_default(); - for depth in depths { - para_entry.insert(depth, candidate_hash); - } - } - }, - } - } + let active_leaf_state = ActiveLeafState::new(prospective_parachains_mode); - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { prospective_parachains_mode, seconded_at_depth }, - ); + state.per_leaf.insert(leaf.hash, active_leaf_state); let fresh_relay_parent = match fresh_relay_parents { Some(f) => f.to_vec(), @@ -981,7 +962,7 @@ async fn handle_active_leaves_update( // block itself did. leaf_mode }, - Some(l) => l.prospective_parachains_mode, + Some(l) => l.into(), }; // construct a `PerRelayParent` from the runtime API @@ -1229,6 +1210,7 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, + session_index, assigned_core, assigned_para, backed: HashSet::new(), @@ -1247,20 +1229,20 @@ async fn construct_per_relay_parent_state( enum SecondingAllowed { No, - Yes(Vec<(Hash, Vec)>), + // On which leaves is seconding allowed. + Yes(Vec), } -/// Checks whether a candidate can be seconded based on its hypothetical frontiers in the fragment -/// tree and what we've already seconded in all active leaves. +/// Checks whether a candidate can be seconded based on its hypothetical membership in the fragment +/// chain. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn seconding_sanity_check( ctx: &mut Context, active_leaves: &HashMap, implicit_view: &ImplicitView, hypothetical_candidate: HypotheticalCandidate, - backed_in_path_only: bool, ) -> SecondingAllowed { - let mut membership = Vec::new(); + let mut leaves_for_seconding = Vec::new(); let mut responses = FuturesOrdered::>>::new(); let candidate_para = hypothetical_candidate.candidate_para(); @@ -1268,7 +1250,7 @@ async fn seconding_sanity_check( let candidate_hash = hypothetical_candidate.candidate_hash(); for (head, leaf_state) in active_leaves { - if leaf_state.prospective_parachains_mode.is_enabled() { + if ProspectiveParachainsMode::from(leaf_state).is_enabled() { // Check that the candidate relay parent is allowed for para, skip the // leaf otherwise. let allowed_parents_for_para = @@ -1278,40 +1260,36 @@ async fn seconding_sanity_check( } let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( - HypotheticalFrontierRequest { + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(*head), - backed_in_path_only, + fragment_chain_relay_parent: Some(*head), }, tx, )) .await; - let response = rx.map_ok(move |frontiers| { - let depths: Vec = frontiers + let response = rx.map_ok(move |candidate_memberships| { + let is_member_or_potential = candidate_memberships .into_iter() - .flat_map(|(candidate, memberships)| { - debug_assert_eq!(candidate.candidate_hash(), candidate_hash); - memberships.into_iter().flat_map(|(relay_parent, depths)| { - debug_assert_eq!(relay_parent, *head); - depths - }) + .find_map(|(candidate, leaves)| { + (candidate.candidate_hash() == candidate_hash).then_some(leaves) }) - .collect(); - (depths, head, leaf_state) + .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) + .is_some(); + + (is_member_or_potential, head) }); responses.push_back(response.boxed()); } else { if *head == candidate_relay_parent { - if leaf_state - .seconded_at_depth - .get(&candidate_para) - .map_or(false, |occupied| occupied.contains_key(&0)) - { - // The leaf is already occupied. - return SecondingAllowed::No + if let ActiveLeafState::ProspectiveParachainsDisabled { seconded } = leaf_state { + if seconded.contains(&candidate_para) { + // The leaf is already occupied. For non-prospective parachains, we only + // second one candidate. + return SecondingAllowed::No + } } - responses.push_back(futures::future::ok((vec![0], head, leaf_state)).boxed()); + responses.push_back(futures::future::ok((true, head)).boxed()); } } } @@ -1325,38 +1303,32 @@ async fn seconding_sanity_check( Err(oneshot::Canceled) => { gum::warn!( target: LOG_TARGET, - "Failed to reach prospective parachains subsystem for hypothetical frontiers", + "Failed to reach prospective parachains subsystem for hypothetical membership", ); return SecondingAllowed::No }, - Ok((depths, head, leaf_state)) => { - for depth in &depths { - if leaf_state - .seconded_at_depth - .get(&candidate_para) - .map_or(false, |occupied| occupied.contains_key(&depth)) - { - gum::debug!( - target: LOG_TARGET, - ?candidate_hash, - depth, - leaf_hash = ?head, - "Refusing to second candidate at depth - already occupied." - ); - - return SecondingAllowed::No - } - } - - membership.push((*head, depths)); + Ok((is_member_or_potential, head)) => match is_member_or_potential { + false => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + leaf_hash = ?head, + "Refusing to second candidate at leaf. Is not a potential member.", + ); + }, + true => { + leaves_for_seconding.push(*head); + }, }, } } - // At this point we've checked the depths of the candidate against all active - // leaves. - SecondingAllowed::Yes(membership) + if leaves_for_seconding.is_empty() { + SecondingAllowed::No + } else { + SecondingAllowed::Yes(leaves_for_seconding) + } } /// Performs seconding sanity check for an advertisement. @@ -1385,16 +1357,12 @@ async fn handle_can_second_request( &state.per_leaf, &state.implicit_view, hypothetical_candidate, - true, ) .await; match result { SecondingAllowed::No => false, - SecondingAllowed::Yes(membership) => { - // Candidate should be recognized by at least some fragment tree. - membership.iter().any(|(_, m)| !m.is_empty()) - }, + SecondingAllowed::Yes(leaves) => !leaves.is_empty(), } } else { // Relay parent is unknown or async backing is disabled. @@ -1435,20 +1403,6 @@ async fn handle_validated_candidate_command( commitments, }; - let parent_head_data_hash = persisted_validation_data.parent_head.hash(); - // Note that `GetHypotheticalFrontier` doesn't account for recursion, - // i.e. candidates can appear at multiple depths in the tree and in fact - // at all depths, and we don't know what depths a candidate will ultimately - // occupy because that's dependent on other candidates we haven't yet - // received. - // - // The only way to effectively rule this out is to have candidate receipts - // directly commit to the parachain block number or some other incrementing - // counter. That requires a major primitives format upgrade, so for now - // we just rule out trivial cycles. - if parent_head_data_hash == receipt.commitments.head_data.hash() { - return Ok(()) - } let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash, receipt: Arc::new(receipt.clone()), @@ -1457,12 +1411,11 @@ async fn handle_validated_candidate_command( // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. - let fragment_tree_membership = match seconding_sanity_check( + let hypothetical_membership = match seconding_sanity_check( ctx, &state.per_leaf, &state.implicit_view, hypothetical_candidate, - false, ) .await { @@ -1517,8 +1470,8 @@ async fn handle_validated_candidate_command( Some(p) => p.seconded_locally = true, } - // update seconded depths in active leaves. - for (leaf, depths) in fragment_tree_membership { + // record seconded candidates for non-prospective-parachains mode. + for leaf in hypothetical_membership { let leaf_data = match state.per_leaf.get_mut(&leaf) { None => { gum::warn!( @@ -1532,14 +1485,7 @@ async fn handle_validated_candidate_command( Some(d) => d, }; - let seconded_at_depth = leaf_data - .seconded_at_depth - .entry(candidate.descriptor().para_id) - .or_default(); - - for depth in depths { - seconded_at_depth.insert(depth, candidate_hash); - } + leaf_data.add_seconded_candidate(candidate.descriptor().para_id); } rp_state.issued_statements.insert(candidate_hash); @@ -1650,7 +1596,7 @@ fn sign_statement( /// and any of the following are true: /// 1. There is no `PersistedValidationData` attached. /// 2. Prospective parachains are enabled for the relay parent and the prospective parachains -/// subsystem returned an empty `FragmentTreeMembership` i.e. did not recognize the candidate as +/// subsystem returned an empty `HypotheticalMembership` i.e. did not recognize the candidate as /// being applicable to any of the active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn import_statement( @@ -1686,8 +1632,8 @@ async fn import_statement( if !per_candidate.contains_key(&candidate_hash) { if rp_state.prospective_parachains_mode.is_enabled() { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::IntroduceCandidate( - IntroduceCandidateRequest { + ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( + IntroduceSecondedCandidateRequest { candidate_para: candidate.descriptor().para_id, candidate_receipt: candidate.clone(), persisted_validation_data: pvd.clone(), @@ -1705,17 +1651,9 @@ async fn import_statement( return Err(Error::RejectedByProspectiveParachains) }, - Ok(membership) => - if membership.is_empty() { - return Err(Error::RejectedByProspectiveParachains) - }, + Ok(false) => return Err(Error::RejectedByProspectiveParachains), + Ok(true) => {}, } - - ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor().para_id, - candidate_hash, - )) - .await; } // Only save the candidate if it was approved by prospective parachains. @@ -1725,7 +1663,6 @@ async fn import_statement( persisted_validation_data: pvd.clone(), // This is set after importing when seconding locally. seconded_locally: false, - para_id: candidate.descriptor().para_id, relay_parent: candidate.descriptor().relay_parent, }, ); @@ -1786,13 +1723,6 @@ async fn post_import_statement_actions( candidate_hash, )) .await; - // Backed candidate potentially unblocks new advertisements, - // notify collator protocol. - ctx.send_message(CollatorProtocolMessage::Backed { - para_id, - para_head: backed.candidate().descriptor.para_head, - }) - .await; // Notify statement distribution of backed candidate. ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; } else { @@ -1878,10 +1808,11 @@ async fn background_validate_and_make_available( >, ) -> Result<(), Error> { let candidate_hash = params.candidate.hash(); + let Some(core_index) = rp_state.assigned_core else { return Ok(()) }; if rp_state.awaiting_validation.insert(candidate_hash) { // spawn background task. let bg = async move { - if let Err(error) = validate_and_make_available(params).await { + if let Err(error) = validate_and_make_available(params, core_index).await { if let Error::BackgroundValidationMpsc(error) = error { gum::debug!( target: LOG_TARGET, @@ -1956,6 +1887,7 @@ async fn kick_off_validation_work( tx_command: background_validation_tx.clone(), candidate: attesting.candidate, relay_parent: rp_state.parent, + session_index: rp_state.session_index, persisted_validation_data, pov, n_validators: rp_state.table_context.validators.len(), @@ -2016,7 +1948,7 @@ async fn maybe_validate_and_import( if let Some(summary) = summary { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate - // has already been accepted into the fragment trees. + // has already been accepted by the subsystem. let candidate_hash = summary.candidate; @@ -2109,6 +2041,7 @@ async fn validate_and_second( tx_command: background_validation_tx.clone(), candidate: candidate.clone(), relay_parent: rp_state.parent, + session_index: rp_state.session_index, persisted_validation_data, pov: PoVData::Ready(pov), n_validators: rp_state.table_context.validators.len(), @@ -2174,8 +2107,7 @@ async fn handle_second_message( collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); - - return Ok(()) + return Ok(()); } gum::debug!( diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index d1969e656db673b70b070c9b708bab867bec9fcb..00f9e4cd8ff688af3e3b7144cb9801a6eef42044 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -367,6 +367,15 @@ async fn assert_validation_requests( tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(sess_idx, tx)) + ) if sess_idx == 1 => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + } + ); } async fn assert_validate_from_exhaustive( @@ -2084,7 +2093,7 @@ fn retry_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; // Not deterministic which message comes first: - for _ in 0u32..5 { + for _ in 0u32..6 { match virtual_overseer.recv().await { AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( _, @@ -2115,6 +2124,12 @@ fn retry_works() { )) => { tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(1, tx), + )) => { + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -2662,32 +2677,7 @@ fn validator_ignores_statements_from_disabled_validators() { virtual_overseer.send(FromOrchestra::Communication { msg: statement_3 }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) - ) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx)) - ) => { - tx.send(Ok(1u32.into())).unwrap(); - } - ); - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(sess_idx, tx)) - ) if sess_idx == 1 => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - } - ); + assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 94310d2aa164650db84b78ddf361a9f465ac207d..5ef3a3b15285cc1e30577cd1abf5048b9da0b131 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -17,7 +17,7 @@ //! Tests for the backing subsystem with enabled prospective parachains. use polkadot_node_subsystem::{ - messages::{ChainApiMessage, FragmentTreeMembership}, + messages::{ChainApiMessage, HypotheticalMembership}, ActivatedLeaf, TimeoutExt, }; use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; @@ -40,7 +40,6 @@ async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: TestLeaf, test_state: &TestState, - seconded_in_view: usize, ) { let TestLeaf { activated, min_relay_parents } = leaf; let leaf_hash = activated.hash; @@ -68,15 +67,6 @@ async fn activate_leaf( .min() .unwrap_or(&leaf_number); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == leaf_hash => { - tx.send(min_relay_parents).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_min; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) @@ -118,23 +108,20 @@ async fn activate_leaf( tx.send(Ok(Some(header))).unwrap(); } ); - requested_len += 1; - } - } - for _ in 0..seconded_in_view { - let msg = match next_overseer_message.take() { - Some(msg) => msg, - None => virtual_overseer.recv().await, - }; - assert_matches!( - msg, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetTreeMembership(.., tx), - ) => { - tx.send(Vec::new()).unwrap(); + if requested_len == 0 { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents.clone()).unwrap(); + } + ); } - ); + + requested_len += 1; + } } for (hash, number) in ancestry_iter.take(requested_len) { @@ -297,11 +284,11 @@ async fn assert_validate_seconded_candidate( ); } -async fn assert_hypothetical_frontier_requests( +async fn assert_hypothetical_membership_requests( virtual_overseer: &mut VirtualOverseer, mut expected_requests: Vec<( - HypotheticalFrontierRequest, - Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + HypotheticalMembershipRequest, + Vec<(HypotheticalCandidate, HypotheticalMembership)>, )>, ) { // Requests come with no particular order. @@ -311,13 +298,13 @@ async fn assert_hypothetical_frontier_requests( assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, None => panic!( - "unexpected hypothetical frontier request, no match found for {:?}", + "unexpected hypothetical membership request, no match found for {:?}", request ), }; @@ -330,18 +317,17 @@ async fn assert_hypothetical_frontier_requests( } } -fn make_hypothetical_frontier_response( - depths: Vec, +fn make_hypothetical_membership_response( hypothetical_candidate: HypotheticalCandidate, relay_parent_hash: Hash, -) -> Vec<(HypotheticalCandidate, FragmentTreeMembership)> { - vec![(hypothetical_candidate, vec![(relay_parent_hash, depths)])] +) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { + vec![(hypothetical_candidate, vec![relay_parent_hash])] } // Test that `seconding_sanity_check` works when a candidate is allowed // for all leaves. #[test] -fn seconding_sanity_check_allowed() { +fn seconding_sanity_check_allowed_on_all() { let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate is seconded in a parent of the activated `leaf_a`. @@ -364,8 +350,8 @@ fn seconding_sanity_check_allowed() { let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -412,24 +398,19 @@ fn seconding_sanity_check_allowed() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate.clone(), - leaf_a_hash, - ); - let expected_request_b = HypotheticalFrontierRequest { + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_b_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_b_hash), }; let expected_response_b = - make_hypothetical_frontier_response(vec![3], hypothetical_candidate, leaf_b_hash); - assert_hypothetical_frontier_requests( + make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![ (expected_request_a, expected_response_a), @@ -441,7 +422,7 @@ fn seconding_sanity_check_allowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -449,19 +430,10 @@ fn seconding_sanity_check_allowed() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -484,8 +456,8 @@ fn seconding_sanity_check_allowed() { }); } -// Test that `seconding_sanity_check` works when a candidate is disallowed -// for at least one leaf. +// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed +// for all leaves. #[test] fn seconding_sanity_check_disallowed() { let test_state = TestState::default(); @@ -510,7 +482,7 @@ fn seconding_sanity_check_disallowed() { let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -557,17 +529,13 @@ fn seconding_sanity_check_disallowed() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ); - assert_hypothetical_frontier_requests( + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![(expected_request_a, expected_response_a)], ) @@ -576,7 +544,7 @@ fn seconding_sanity_check_disallowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -584,19 +552,10 @@ fn seconding_sanity_check_disallowed() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -615,10 +574,7 @@ fn seconding_sanity_check_disallowed() { } ); - // A seconded candidate occupies a depth, try to second another one. - // It is allowed in a new leaf but not allowed in the old one. - // Expect it to be rejected. - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; let leaf_a_grandparent = get_parent_hash(leaf_a_parent); let candidate = TestCandidateBuilder { para_id, @@ -659,28 +615,20 @@ fn seconding_sanity_check_disallowed() { receipt: Arc::new(candidate), persisted_validation_data: pvd, }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![3], - hypothetical_candidate.clone(), - leaf_a_hash, - ); - let expected_request_b = HypotheticalFrontierRequest { + let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; + let expected_request_b = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_b_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_b_hash), }; - let expected_response_b = - make_hypothetical_frontier_response(vec![1], hypothetical_candidate, leaf_b_hash); - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![ - (expected_request_a, expected_response_a), // All depths are occupied. - (expected_request_b, expected_response_b), + (expected_request_a, expected_empty_response.clone()), + (expected_request_b, expected_empty_response), ], ) .await; @@ -695,6 +643,137 @@ fn seconding_sanity_check_disallowed() { }); } +// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one +// leaf. +#[test] +fn seconding_sanity_check_allowed_on_at_least_one_leaf() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + // Test that a seconded candidate which is not approved by prospective parachains // subsystem doesn't change the view. #[test] @@ -712,7 +791,7 @@ fn prospective_parachains_reject_candidate() { let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -760,25 +839,20 @@ fn prospective_parachains_reject_candidate() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }, - make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), )]; - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a.clone()) + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) .await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -787,7 +861,7 @@ fn prospective_parachains_reject_candidate() { && req.candidate_para == para_id && pvd == req.persisted_validation_data => { // Reject it. - tx.send(Vec::new()).unwrap(); + tx.send(false).unwrap(); } ); @@ -825,12 +899,12 @@ fn prospective_parachains_reject_candidate() { .await; // `seconding_sanity_check` - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a).await; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -838,19 +912,10 @@ fn prospective_parachains_reject_candidate() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -890,7 +955,7 @@ fn second_multiple_candidates_per_relay_parent() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -911,12 +976,10 @@ fn second_multiple_candidates_per_relay_parent() { let mut candidate_b = candidate_a.clone(); candidate_b.relay_parent = leaf_grandparent; - // With depths. - let candidate_a = (candidate_a.build(), 1); - let candidate_b = (candidate_b.build(), 2); + let candidate_a = candidate_a.build(); + let candidate_b = candidate_b.build(); for candidate in &[candidate_a, candidate_b] { - let (candidate, depth) = candidate; let second = CandidateBackingMessage::Second( leaf_hash, candidate.to_plain(), @@ -945,46 +1008,33 @@ fn second_multiple_candidates_per_relay_parent() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_hash), }, - make_hypothetical_frontier_response( - vec![*depth], - hypothetical_candidate, - leaf_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), )]; - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, expected_request_a.clone(), ) .await; // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } ); assert_matches!( @@ -1026,7 +1076,7 @@ fn backing_works() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1048,7 +1098,6 @@ fn backing_works() { .build(); let candidate_a_hash = candidate_a.hash(); - let candidate_a_para_head = candidate_a.descriptor().para_head; let public1 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -1096,7 +1145,7 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -1104,19 +1153,10 @@ fn backing_works() { req.candidate_receipt == candidate_a && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1147,13 +1187,6 @@ fn backing_works() { ), ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { - para_id: _para_id, - para_head, - }) if para_id == _para_id && candidate_a_para_head == para_head - ); assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( @@ -1187,7 +1220,7 @@ fn concurrent_dependent_candidates() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let head_data = &[ HeadData(vec![10, 20, 30]), // Before `a`. @@ -1299,13 +1332,10 @@ fn concurrent_dependent_candidates() { // Order is not guaranteed since we have 2 statements being handled concurrently. match msg { AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate(_, tx), + ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), ) => { - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); }, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _), - ) => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(_, tx), @@ -1362,7 +1392,6 @@ fn concurrent_dependent_candidates() { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::CandidateBacked(..), ) => {}, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { .. }) => {}, AllMessages::StatementDistribution(StatementDistributionMessage::Share( _, statement, @@ -1406,7 +1435,13 @@ fn concurrent_dependent_candidates() { )) => { tx.send(Ok(test_state.validator_groups.clone())).unwrap(); }, - + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::NodeFeatures(sess_idx, tx), + )) => { + assert_eq!(sess_idx, 1); + tx.send(Ok(NodeFeatures::EMPTY)).unwrap(); + }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _parent, RuntimeApiRequest::AvailabilityCores(tx), @@ -1447,7 +1482,7 @@ fn seconding_sanity_check_occupy_same_depth() { let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1506,44 +1541,35 @@ fn seconding_sanity_check_occupy_same_depth() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_hash), }, // Send the same membership for both candidates. - make_hypothetical_frontier_response(vec![0, 1], hypothetical_candidate, leaf_hash), + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), )]; - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, expected_request_a.clone(), ) .await; // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && &req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } ); assert_matches!( @@ -1600,7 +1626,7 @@ fn occupied_core_assignment() { let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1648,23 +1674,18 @@ fn occupied_core_assignment() { persisted_validation_data: pvd.clone(), }; let expected_request = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }, - make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), )]; - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request).await; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -1673,19 +1694,10 @@ fn occupied_core_assignment() { && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 89851c4a033b58428fa8eac366aafcd5f7144140..e3effb7949eaea0381c556c47eae0a1b6f09dc33 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -27,15 +27,14 @@ use futures::{ FutureExt, }; use polkadot_node_subsystem::{ - errors::RuntimeApiError, jaeger, - messages::{ - AvailabilityStoreMessage, BitfieldDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, - }, + messages::{AvailabilityStoreMessage, BitfieldDistributionMessage}, overseer, ActivatedLeaf, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - SubsystemError, SubsystemResult, SubsystemSender, + SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + self as util, request_availability_cores, runtime::recv_runtime, Validator, }; -use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; use std::{collections::HashMap, time::Duration}; @@ -69,7 +68,7 @@ pub enum Error { MpscSend(#[from] mpsc::SendError), #[error(transparent)] - Runtime(#[from] RuntimeApiError), + Runtime(#[from] util::runtime::Error), #[error("Keystore failed: {0:?}")] Keystore(KeystoreError), @@ -79,8 +78,8 @@ pub enum Error { /// for whether we have the availability chunk for our validator index. async fn get_core_availability( core: &CoreState, - validator_idx: ValidatorIndex, - sender: &Mutex<&mut impl SubsystemSender>, + validator_index: ValidatorIndex, + sender: &Mutex<&mut impl overseer::BitfieldSigningSenderTrait>, span: &jaeger::Span, ) -> Result { if let CoreState::Occupied(core) = core { @@ -90,14 +89,11 @@ async fn get_core_availability( sender .lock() .await - .send_message( - AvailabilityStoreMessage::QueryChunkAvailability( - core.candidate_hash, - validator_idx, - tx, - ) - .into(), - ) + .send_message(AvailabilityStoreMessage::QueryChunkAvailability( + core.candidate_hash, + validator_index, + tx, + )) .await; let res = rx.await.map_err(Into::into); @@ -116,25 +112,6 @@ async fn get_core_availability( } } -/// delegates to the v1 runtime API -async fn get_availability_cores( - relay_parent: Hash, - sender: &mut impl SubsystemSender, -) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - sender - .send_message( - RuntimeApiMessage::Request(relay_parent, RuntimeApiRequest::AvailabilityCores(tx)) - .into(), - ) - .await; - match rx.await { - Ok(Ok(out)) => Ok(out), - Ok(Err(runtime_err)) => Err(runtime_err.into()), - Err(err) => Err(err.into()), - } -} - /// - get the list of core states from the runtime /// - for each core, concurrently determine chunk availability (see `get_core_availability`) /// - return the bitfield if there were no errors at any point in this process (otherwise, it's @@ -143,12 +120,12 @@ async fn construct_availability_bitfield( relay_parent: Hash, span: &jaeger::Span, validator_idx: ValidatorIndex, - sender: &mut impl SubsystemSender, + sender: &mut impl overseer::BitfieldSigningSenderTrait, ) -> Result { // get the set of availability cores from the runtime let availability_cores = { let _span = span.child("get-availability-cores"); - get_availability_cores(relay_parent, sender).await? + recv_runtime(request_availability_cores(relay_parent, sender).await).await? }; // Wrap the sender in a Mutex to share it between the futures. diff --git a/polkadot/node/core/bitfield-signing/src/tests.rs b/polkadot/node/core/bitfield-signing/src/tests.rs index 106ecc06b1569862d7adeacada748e03b586b4ec..0e61e6086d28503eb9b092326b8cadd5442e9dbc 100644 --- a/polkadot/node/core/bitfield-signing/src/tests.rs +++ b/polkadot/node/core/bitfield-signing/src/tests.rs @@ -16,7 +16,7 @@ use super::*; use futures::{executor::block_on, pin_mut, StreamExt}; -use polkadot_node_subsystem::messages::AllMessages; +use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; use polkadot_primitives::{CandidateHash, OccupiedCore}; use test_helpers::dummy_candidate_descriptor; @@ -64,7 +64,7 @@ fn construct_availability_bitfield_works() { AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkAvailability(c_hash, vidx, tx), ) => { - assert_eq!(validator_index, vidx); + assert_eq!(validator_index, vidx.into()); tx.send(c_hash == hash_a).unwrap(); }, diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 0cf4707aad29b3931cab7d65005799cf58c8c38e..e79b3a734b8f6ed1f0d18c92a0697a8a5ba2e255 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -16,7 +16,7 @@ futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", path = "../../../../substrate/primitives/maybe-compressed-blob" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } polkadot-primitives = { path = "../../../primitives" } polkadot-parachain-primitives = { path = "../../../parachain" } diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index f4d02d3f47b23ac4e71fa44c790bf4b0d1538902..bd8531c207847142a3b2ecd4407ef11253251034 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -21,7 +21,7 @@ sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } [dev-dependencies] futures = { version = "0.3.30", features = ["thread-pool"] } maplit = "1.0.2" -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" polkadot-node-primitives = { path = "../../primitives" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 318f27a43086e4c6bee0bf0a0b18a78c9cf525be..b58053b5417eceb0bddfc2f34df5f1d84126c1c0 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" thiserror = { workspace = true } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index cd3238449bea9d19ec65f418237282523ff3448a..8bd510697c913f10020605d335fb1748a2bb43c3 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -12,11 +12,11 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" kvdb = "0.13.0" thiserror = { workspace = true } schnellru = "0.2.1" -fatality = "0.0.6" +fatality = "0.1.1" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 05ea7323af1419d770cb7b68e15ee7887ceeaab8..b58ce570f8fff767a975487364cc6fe826c329e3 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -305,6 +305,7 @@ async fn participate( req.candidate_receipt().clone(), req.session(), None, + None, recover_available_data_tx, )) .await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 367454115f0be8e9aaccaf73b13e721a585c9dd7..1316508e84cf82dbecb49f7cc8509eabeeb00ea3 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -132,7 +132,7 @@ pub async fn participation_missing_availability(ctx_handle: &mut VirtualOverseer assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -151,7 +151,7 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Ok(available_data)).unwrap(); }, @@ -195,7 +195,7 @@ fn same_req_wont_get_queued_if_participation_is_already_running() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -260,7 +260,7 @@ fn reqs_get_queued_when_out_of_capacity() { { match ctx_handle.recv().await { AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx), + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx), ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); recover_available_data_msg_count += 1; @@ -346,7 +346,7 @@ fn cannot_participate_if_cannot_recover_available_data() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Unavailable)).unwrap(); }, @@ -412,7 +412,7 @@ fn cast_invalid_vote_if_available_data_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::AvailabilityRecovery( - AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx) + AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx) ) => { tx.send(Err(RecoveryError::Invalid)).unwrap(); }, diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index ab3cef99e54ff6da279ec30728c308ed8bbf3da6..5b4f12a5fbdaffd345cc8f01d92691232deb5046 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -12,9 +12,9 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = "3.6.4" +parity-scale-codec = "3.6.12" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" bitvec = "1" polkadot-primitives = { path = "../../../primitives" } @@ -23,7 +23,6 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 86814b976d13424281203aa5bca59b0a918b7251..f87d4820ff9af242bf28ca7170527ec72d1963b8 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -14,35 +14,49 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tree utility for managing parachain fragments not referenced by the relay-chain. +//! Utility for managing parachain fragments not referenced by the relay-chain. //! //! # Overview //! -//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] which are meant to -//! be used in close conjunction. Each fragment tree is associated with a particular relay-parent -//! and each node in the tree represents a candidate. Each parachain has a single candidate storage, -//! but can have multiple trees for each relay chain block in the view. +//! This module exposes two main types: [`FragmentChain`] and [`CandidateStorage`] which are meant +//! to be used in close conjunction. Each fragment chain is associated with a particular +//! relay-parent and each node in the chain represents a candidate. Each parachain has a single +//! candidate storage, but can have one chain for each relay chain block in the view. +//! Therefore, the same candidate can be present in multiple fragment chains of a parachain. One of +//! the purposes of the candidate storage is to deduplicate the large candidate data that is being +//! referenced from multiple fragment chains. //! -//! A tree has an associated [`Scope`] which defines limits on candidates within the tree. +//! A chain has an associated [`Scope`] which defines limits on candidates within the chain. //! Candidates themselves have their own [`Constraints`] which are either the constraints from the -//! scope, or, if there are previous nodes in the tree, a modified version of the previous +//! scope, or, if there are previous nodes in the chain, a modified version of the previous //! candidate's constraints. //! +//! Another use of the `CandidateStorage` is to keep a record of candidates which may not be yet +//! included in any chain, but which may become part of a chain in the future. This is needed for +//! elastic scaling, so that we may parallelise the backing process across different groups. As long +//! as some basic constraints are not violated by an unconnected candidate (like the relay parent +//! being in scope), we proceed with the backing process, hoping that its predecessors will be +//! backed soon enough. This is commonly called a potential candidate. Note that not all potential +//! candidates will be maintained in the CandidateStorage. The total number of connected + potential +//! candidates will be at most max_candidate_depth + 1. +//! //! This module also makes use of types provided by the Inclusion Emulator module, such as //! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of //! prospective fragments. //! -//! # Usage +//! # Parachain forks //! -//! It's expected that higher-level code will have a tree for each relay-chain block which might -//! reasonably have blocks built upon it. +//! Parachains are expected to not create forks, hence the use of fragment chains as opposed to +//! fragment trees. If parachains do create forks, their performance in regards to async backing and +//! elastic scaling will suffer, because different validators will have different views of the +//! future. //! -//! Because a para only has a single candidate storage, trees only store indices into the storage. -//! The storage is meant to be pruned when trees are dropped by higher-level code. +//! This is a compromise we can make - collators which want to use async backing and elastic scaling +//! need to cooperate for the highest throughput. //! -//! # Cycles +//! # Parachain cycles //! -//! Nodes do not uniquely refer to a parachain block for two reasons. +//! Parachains can create cycles, because: //! 1. There's no requirement that head-data is unique for a parachain. Furthermore, a parachain //! is under no obligation to be acyclic, and this is mostly just because it's totally //! inefficient to enforce it. Practical use-cases are acyclic, but there is still more than @@ -50,34 +64,17 @@ //! 2. and candidates only refer to their parent by its head-data. This whole issue could be //! resolved by having candidates reference their parent by candidate hash. //! -//! The implication is that when we receive a candidate receipt, there are actually multiple -//! possibilities for any candidates between the para-head recorded in the relay parent's state -//! and the candidate in question. -//! -//! This means that our candidates need to handle multiple parents and that depth is an -//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might -//! have different depths in different parts of the tree. +//! However, dealing with cycles increases complexity during the backing/inclusion process for no +//! practical reason. Therefore, fragment chains will not accept such candidates. //! -//! As an extreme example, a candidate which produces head-data which is the same as its parent -//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded -//! by the maximum depth allowed by the tree. An example with `max_depth: 4`: +//! On the other hand, enforcing that a parachain will NEVER be acyclic would be very complicated +//! (looping through the entire parachain's history on every new candidate or changing the candidate +//! receipt to reference the parent's candidate hash). //! -//! ```text -//! committed head -//! | -//! depth 0: head_a -//! | -//! depth 1: head_b -//! | -//! depth 2: head_a -//! | -//! depth 3: head_b -//! | -//! depth 4: head_a -//! ``` +//! # Spam protection //! //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, -//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective +//! [`FragmentChain`] complexity is bounded. This means that higher-level code needs to be selective //! about limiting the amount of candidates that are considered. //! //! The code in this module is not designed for speed or efficiency, but conceptual simplicity. @@ -90,16 +87,15 @@ mod tests; use std::{ - borrow::Cow, collections::{ hash_map::{Entry, HashMap}, BTreeMap, HashSet, }, + sync::Arc, }; use super::LOG_TARGET; -use bitvec::prelude::*; -use polkadot_node_subsystem::messages::Ancestors; +use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate}; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -120,11 +116,19 @@ pub enum CandidateStorageInsertionError { /// Stores candidates and information about them such as their relay-parents and their backing /// states. +#[derive(Clone, Default)] pub(crate) struct CandidateStorage { - // Index from head data hash to candidate hashes with that head data as a parent. + // Index from head data hash to candidate hashes with that head data as a parent. Purely for + // efficiency when responding to `ProspectiveValidationDataRequest`s or when trying to find a + // new candidate to push to a chain. + // Even though having multiple candidates with same parent would be invalid for a parachain, it + // could happen across different relay chain forks, hence the HashSet. by_parent_head: HashMap>, - // Index from head data hash to candidate hashes outputting that head data. + // Index from head data hash to candidate hashes outputting that head data. Purely for + // efficiency when responding to `ProspectiveValidationDataRequest`s. + // Even though having multiple candidates with same output would be invalid for a parachain, + // it could happen across different relay chain forks. by_output_head: HashMap>, // Index from candidate hash to fragment node. @@ -132,23 +136,14 @@ pub(crate) struct CandidateStorage { } impl CandidateStorage { - /// Create a new `CandidateStorage`. - pub fn new() -> Self { - CandidateStorage { - by_parent_head: HashMap::new(), - by_output_head: HashMap::new(), - by_candidate_hash: HashMap::new(), - } - } - /// Introduce a new candidate. pub fn add_candidate( &mut self, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, + state: CandidateState, ) -> Result { let candidate_hash = candidate.hash(); - if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) } @@ -157,24 +152,30 @@ impl CandidateStorage { return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) } - let parent_head_hash = persisted_validation_data.parent_head.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); let entry = CandidateEntry { candidate_hash, + parent_head_data_hash: persisted_validation_data.parent_head.hash(), + output_head_data_hash: candidate.commitments.head_data.hash(), relay_parent: candidate.descriptor.relay_parent, - state: CandidateState::Introduced, - candidate: ProspectiveCandidate { - commitments: Cow::Owned(candidate.commitments), + state, + candidate: Arc::new(ProspectiveCandidate { + commitments: candidate.commitments, collator: candidate.descriptor.collator, collator_signature: candidate.descriptor.signature, persisted_validation_data, pov_hash: candidate.descriptor.pov_hash, validation_code_hash: candidate.descriptor.validation_code_hash, - }, + }), }; - self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); - self.by_output_head.entry(output_head_hash).or_default().insert(candidate_hash); + self.by_parent_head + .entry(entry.parent_head_data_hash()) + .or_default() + .insert(candidate_hash); + self.by_output_head + .entry(entry.output_head_data_hash()) + .or_default() + .insert(candidate_hash); // sanity-checked already. self.by_candidate_hash.insert(candidate_hash, entry); @@ -184,21 +185,20 @@ impl CandidateStorage { /// Remove a candidate from the store. pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { - let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); - if let Entry::Occupied(mut e) = self.by_parent_head.entry(parent_head_hash) { + if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) + { e.get_mut().remove(&candidate_hash); if e.get().is_empty() { e.remove(); } } - } - } - /// Note that an existing candidate has been seconded. - pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { - if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { - if entry.state != CandidateState::Backed { - entry.state = CandidateState::Seconded; + if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } } } } @@ -225,6 +225,11 @@ impl CandidateStorage { self.by_candidate_hash.contains_key(candidate_hash) } + /// Return an iterator over the stored candidates. + pub fn candidates(&self) -> impl Iterator { + self.by_candidate_hash.values() + } + /// Retain only candidates which pass the predicate. pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { self.by_candidate_hash.retain(|h, _v| pred(h)); @@ -260,16 +265,17 @@ impl CandidateStorage { } /// Returns candidate's relay parent, if present. - pub(crate) fn relay_parent_by_candidate_hash( - &self, - candidate_hash: &CandidateHash, - ) -> Option { + pub(crate) fn relay_parent_of_candidate(&self, candidate_hash: &CandidateHash) -> Option { self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent) } - fn iter_para_children<'a>( + /// Returns the candidates which have the given head data hash as parent. + /// We don't allow forks in a parachain, but we may have multiple candidates with same parent + /// across different relay chain forks. That's why it returns an iterator (but only one will be + /// valid and used in the end). + fn possible_para_children<'a>( &'a self, - parent_head_hash: &Hash, + parent_head_hash: &'a Hash, ) -> impl Iterator + 'a { let by_candidate_hash = &self.by_candidate_hash; self.by_parent_head @@ -279,10 +285,6 @@ impl CandidateStorage { .filter_map(move |h| by_candidate_hash.get(h)) } - fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { - self.by_candidate_hash.get(candidate_hash) - } - #[cfg(test)] pub fn len(&self) -> (usize, usize) { (self.by_parent_head.len(), self.by_candidate_hash.len()) @@ -292,25 +294,38 @@ impl CandidateStorage { /// The state of a candidate. /// /// Candidates aren't even considered until they've at least been seconded. -#[derive(Debug, PartialEq)] -enum CandidateState { - /// The candidate has been introduced in a spam-protected way but - /// is not necessarily backed. - Introduced, +#[derive(Debug, PartialEq, Clone)] +pub(crate) enum CandidateState { /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. Backed, } -#[derive(Debug)] -struct CandidateEntry { +#[derive(Debug, Clone)] +pub(crate) struct CandidateEntry { candidate_hash: CandidateHash, + parent_head_data_hash: Hash, + output_head_data_hash: Hash, relay_parent: Hash, - candidate: ProspectiveCandidate<'static>, + candidate: Arc, state: CandidateState, } +impl CandidateEntry { + pub fn hash(&self) -> CandidateHash { + self.candidate_hash + } + + pub fn parent_head_data_hash(&self) -> Hash { + self.parent_head_data_hash + } + + pub fn output_head_data_hash(&self) -> Hash { + self.output_head_data_hash + } +} + /// A candidate existing on-chain but pending availability, for special treatment /// in the [`Scope`]. #[derive(Debug, Clone)] @@ -321,15 +336,22 @@ pub(crate) struct PendingAvailability { pub relay_parent: RelayChainBlockInfo, } -/// The scope of a [`FragmentTree`]. -#[derive(Debug)] +/// The scope of a [`FragmentChain`]. +#[derive(Debug, Clone)] pub(crate) struct Scope { + /// The assigned para id of this `FragmentChain`. para: ParaId, + /// The relay parent we're currently building on top of. relay_parent: RelayChainBlockInfo, + /// The other relay parents candidates are allowed to build upon, mapped by the block number. ancestors: BTreeMap, + /// The other relay parents candidates are allowed to build upon, mapped by the block hash. ancestors_by_hash: HashMap, + /// The candidates pending availability at this block. pending_availability: Vec, + /// The base constraints derived from the latest included candidate. base_constraints: Constraints, + /// Equal to `max_candidate_depth`. max_depth: usize, } @@ -398,7 +420,7 @@ impl Scope { }) } - /// Get the earliest relay-parent allowed in the scope of the fragment tree. + /// Get the earliest relay-parent allowed in the scope of the fragment chain. pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo { self.ancestors .iter() @@ -407,8 +429,8 @@ impl Scope { .unwrap_or_else(|| self.relay_parent.clone()) } - /// Get the ancestor of the fragment tree by hash. - pub fn ancestor_by_hash(&self, hash: &Hash) -> Option { + /// Get the relay ancestor of the fragment chain by hash. + pub fn ancestor(&self, hash: &Hash) -> Option { if hash == &self.relay_parent.hash { return Some(self.relay_parent.clone()) } @@ -430,67 +452,48 @@ impl Scope { } } -/// We use indices into a flat vector to refer to nodes in the tree. -/// Every tree also has an implicit root. -#[derive(Debug, Clone, Copy, PartialEq)] -enum NodePointer { - Root, - Storage(usize), -} - -/// A hypothetical candidate, which may or may not exist in -/// the fragment tree already. -pub(crate) enum HypotheticalCandidate<'a> { - Complete { - receipt: Cow<'a, CommittedCandidateReceipt>, - persisted_validation_data: Cow<'a, PersistedValidationData>, - }, - Incomplete { - relay_parent: Hash, - parent_head_data_hash: Hash, - }, +pub struct FragmentNode { + fragment: Fragment, + candidate_hash: CandidateHash, + cumulative_modifications: ConstraintModifications, } -impl<'a> HypotheticalCandidate<'a> { - fn parent_head_data_hash(&self) -> Hash { - match *self { - HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => - persisted_validation_data.as_ref().parent_head.hash(), - HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } => - *parent_head_data_hash, - } - } - +impl FragmentNode { fn relay_parent(&self) -> Hash { - match *self { - HypotheticalCandidate::Complete { ref receipt, .. } => - receipt.descriptor().relay_parent, - HypotheticalCandidate::Incomplete { ref relay_parent, .. } => *relay_parent, - } + self.fragment.relay_parent().hash } } -/// This is a tree of candidates based on some underlying storage of candidates and a scope. +/// Response given by `can_add_candidate_as_potential` +#[derive(PartialEq, Debug)] +pub enum PotentialAddition { + /// Can be added as either connected or unconnected candidate. + Anyhow, + /// Can only be added as a connected candidate to the chain. + IfConnected, + /// Cannot be added. + None, +} + +/// This is a chain of candidates based on some underlying storage of candidates and a scope. /// -/// All nodes in the tree must be either pending availability or within the scope. Within the scope +/// All nodes in the chain must be either pending availability or within the scope. Within the scope /// means it's built off of the relay-parent or an ancestor. -pub(crate) struct FragmentTree { +pub(crate) struct FragmentChain { scope: Scope, - // Invariant: a contiguous prefix of the 'nodes' storage will contain - // the top-level children. - nodes: Vec, + chain: Vec, + + candidates: HashSet, - // The candidates stored in this tree, mapped to a bitvec indicating the depths - // where the candidate is stored. - candidates: HashMap>, + // Index from head data hash to candidate hashes with that head data as a parent. + by_parent_head: HashMap, + // Index from head data hash to candidate hashes outputting that head data. + by_output_head: HashMap, } -impl FragmentTree { - /// Create a new [`FragmentTree`] with given scope and populated from the storage. - /// - /// Can be populated recursively (i.e. `populate` will pick up candidates that build on other - /// candidates). +impl FragmentChain { + /// Create a new [`FragmentChain`] with given scope and populated from the storage. pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { gum::trace!( target: LOG_TARGET, @@ -498,285 +501,152 @@ impl FragmentTree { relay_parent_num = scope.relay_parent.number, para_id = ?scope.para, ancestors = scope.ancestors.len(), - "Instantiating Fragment Tree", + "Instantiating Fragment Chain", ); - let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() }; + let mut fragment_chain = Self { + scope, + chain: Vec::new(), + candidates: HashSet::new(), + by_parent_head: HashMap::new(), + by_output_head: HashMap::new(), + }; - tree.populate_from_bases(storage, vec![NodePointer::Root]); + fragment_chain.populate_chain(storage); - tree + fragment_chain } - /// Get the scope of the Fragment Tree. + /// Get the scope of the Fragment Chain. pub fn scope(&self) -> &Scope { &self.scope } - // Inserts a node and updates child references in a non-root parent. - fn insert_node(&mut self, node: FragmentNode) { - let pointer = NodePointer::Storage(self.nodes.len()); - let parent_pointer = node.parent; - let candidate_hash = node.candidate_hash; - - let max_depth = self.scope.max_depth; - - self.candidates - .entry(candidate_hash) - .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1]) - .set(node.depth, true); - - match parent_pointer { - NodePointer::Storage(ptr) => { - self.nodes.push(node); - self.nodes[ptr].children.push((pointer, candidate_hash)) - }, - NodePointer::Root => { - // Maintain the invariant of node storage beginning with depth-0. - if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { - self.nodes.push(node); - } else { - let pos = - self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); - self.nodes.insert(pos, node); - } - }, - } - } - - fn node_has_candidate_child( - &self, - pointer: NodePointer, - candidate_hash: &CandidateHash, - ) -> bool { - self.node_candidate_child(pointer, candidate_hash).is_some() - } - - fn node_candidate_child( - &self, - pointer: NodePointer, - candidate_hash: &CandidateHash, - ) -> Option { - match pointer { - NodePointer::Root => self - .nodes - .iter() - .take_while(|n| n.parent == NodePointer::Root) - .enumerate() - .find(|(_, n)| &n.candidate_hash == candidate_hash) - .map(|(i, _)| NodePointer::Storage(i)), - NodePointer::Storage(ptr) => - self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)), - } + /// Returns the number of candidates in the chain + pub(crate) fn len(&self) -> usize { + self.candidates.len() } - /// Returns an O(n) iterator over the hashes of candidates contained in the - /// tree. - pub(crate) fn candidates(&self) -> impl Iterator + '_ { - self.candidates.keys().cloned() + /// Whether the candidate exists. + pub(crate) fn contains_candidate(&self, candidate: &CandidateHash) -> bool { + self.candidates.contains(candidate) } - /// Whether the candidate exists and at what depths. - pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option> { - self.candidates.get(candidate).map(|d| d.iter_ones().collect()) + /// Return a vector of the chain's candidate hashes, in-order. + pub(crate) fn to_vec(&self) -> Vec { + self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } - /// Add a candidate and recursively populate from storage. + /// Try accumulating more candidates onto the chain. /// - /// Candidates can be added either as children of the root or children of other candidates. - pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { - let candidate_entry = match storage.get(&hash) { - None => return, - Some(e) => e, - }; - - let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; - - // Select an initial set of bases, whose required relay-parent matches that of the - // candidate. - let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { - Some(NodePointer::Root) - } else { - None - }; - - let non_root_bases = self - .nodes - .iter() - .enumerate() - .filter(|(_, n)| { - n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) - }) - .map(|(i, _)| NodePointer::Storage(i)); - - let bases = root_base.into_iter().chain(non_root_bases).collect(); - - // Pass this into the population function, which will sanity-check stuff like depth, - // fragments, etc. and then recursively populate. - self.populate_from_bases(storage, bases); + /// Candidates can only be added if they build on the already existing chain. + pub(crate) fn extend_from_storage(&mut self, storage: &CandidateStorage) { + self.populate_chain(storage); } - /// Returns `true` if the path from the root to the node's parent (inclusive) - /// only contains backed candidates, `false` otherwise. - fn path_contains_backed_only_candidates( + /// Returns the hypothetical state of a candidate with the given hash and parent head data + /// in regards to the existing chain. + /// + /// Returns true if either: + /// - the candidate is already present + /// - the candidate can be added to the chain + /// - the candidate could potentially be added to the chain in the future (its ancestors are + /// still unknown but it doesn't violate other rules). + /// + /// If this returns false, the candidate could never be added to the current chain (not now, not + /// ever) + pub(crate) fn hypothetical_membership( &self, - mut parent_pointer: NodePointer, + candidate: HypotheticalCandidate, candidate_storage: &CandidateStorage, ) -> bool { - while let NodePointer::Storage(ptr) = parent_pointer { - let node = &self.nodes[ptr]; - let candidate_hash = &node.candidate_hash; - - if candidate_storage.get(candidate_hash).map_or(true, |candidate_entry| { - !matches!(candidate_entry.state, CandidateState::Backed) - }) { - return false - } - parent_pointer = node.parent; + let candidate_hash = candidate.candidate_hash(); + + // If we've already used this candidate in the chain + if self.candidates.contains(&candidate_hash) { + return true } - true - } + let can_add_as_potential = self.can_add_candidate_as_potential( + candidate_storage, + &candidate.candidate_hash(), + &candidate.relay_parent(), + candidate.parent_head_data_hash(), + candidate.output_head_data_hash(), + ); - /// Returns the hypothetical depths where a candidate with the given hash and parent head data - /// would be added to the tree, without applying other candidates recursively on top of it. - /// - /// If the candidate is already known, this returns the actual depths where this - /// candidate is part of the tree. - /// - /// Setting `backed_in_path_only` to `true` ensures this function only returns such membership - /// that every candidate in the path from the root is backed. - pub(crate) fn hypothetical_depths( - &self, - hash: CandidateHash, - candidate: HypotheticalCandidate, - candidate_storage: &CandidateStorage, - backed_in_path_only: bool, - ) -> Vec { - // if `true`, we always have to traverse the tree. - if !backed_in_path_only { - // if known. - if let Some(depths) = self.candidates.get(&hash) { - return depths.iter_ones().collect() - } + if can_add_as_potential == PotentialAddition::None { + return false } - // if out of scope. - let candidate_relay_parent = candidate.relay_parent(); - let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent { - self.scope.relay_parent.clone() - } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.clone() + let Some(candidate_relay_parent) = self.scope.ancestor(&candidate.relay_parent()) else { + // can_add_candidate_as_potential already checked for this, but just to be safe. + return false + }; + + let identity_modifications = ConstraintModifications::identity(); + let cumulative_modifications = if let Some(last_candidate) = self.chain.last() { + &last_candidate.cumulative_modifications } else { - return Vec::new() + &identity_modifications }; - let max_depth = self.scope.max_depth; - let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; - - // iterate over all nodes where parent head-data matches, - // relay-parent number is <= candidate, and depth < max_depth. - let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage); - for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) { - let (modifications, child_depth, earliest_rp) = match parent_pointer { - NodePointer::Root => - (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), - NodePointer::Storage(ptr) => { - let node = &self.nodes[ptr]; - let parent_rp = self - .scope - .ancestor_by_hash(&node.relay_parent()) - .or_else(|| { - self.scope - .get_pending_availability(&node.candidate_hash) - .map(|_| self.scope.earliest_relay_parent()) - }) - .expect("All nodes in tree are either pending availability or within scope; qed"); - - (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + let child_constraints = + match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?cumulative_modifications.required_parent, + ?candidate_hash, + err = ?e, + "Failed to apply modifications", + ); + + return false }, + Ok(c) => c, }; - if child_depth > max_depth { - continue - } - - if earliest_rp.number > candidate_relay_parent.number { - continue - } - - let child_constraints = - match self.scope.base_constraints.apply_modifications(&modifications) { - Err(e) => { - gum::debug!( - target: LOG_TARGET, - new_parent_head = ?modifications.required_parent, - err = ?e, - "Failed to apply modifications", - ); - - continue - }, - Ok(c) => c, - }; - - let parent_head_hash = candidate.parent_head_data_hash(); - if parent_head_hash != child_constraints.required_parent.hash() { - continue - } - + let parent_head_hash = candidate.parent_head_data_hash(); + if parent_head_hash == child_constraints.required_parent.hash() { // We do additional checks for complete candidates. - if let HypotheticalCandidate::Complete { ref receipt, ref persisted_validation_data } = - candidate + if let HypotheticalCandidate::Complete { + ref receipt, + ref persisted_validation_data, + .. + } = candidate { - let prospective_candidate = ProspectiveCandidate { - commitments: Cow::Borrowed(&receipt.commitments), - collator: receipt.descriptor().collator.clone(), - collator_signature: receipt.descriptor().signature.clone(), - persisted_validation_data: persisted_validation_data.as_ref().clone(), - pov_hash: receipt.descriptor().pov_hash, - validation_code_hash: receipt.descriptor().validation_code_hash, - }; - - if Fragment::new( - candidate_relay_parent.clone(), - child_constraints, - prospective_candidate, + if Fragment::check_against_constraints( + &candidate_relay_parent, + &child_constraints, + &receipt.commitments, + &receipt.descriptor().validation_code_hash, + persisted_validation_data, ) .is_err() { - continue + gum::debug!( + target: LOG_TARGET, + "Fragment::check_against_constraints() returned error", + ); + return false } } - // Check that the path only contains backed candidates, if necessary. - if !backed_in_path_only || - self.path_contains_backed_only_candidates(parent_pointer, candidate_storage) - { - depths.set(child_depth, true); - } + // If we got this far, it can be added to the chain right now. + true + } else if can_add_as_potential == PotentialAddition::Anyhow { + // Otherwise it is or can be an unconnected candidate, but only if PotentialAddition + // does not force us to only add a connected candidate. + true + } else { + false } - - depths.iter_ones().collect() } /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree after traversing the ancestors path. - /// If the ancestors draw out a path that can be traversed in multiple ways, no - /// candidates will be returned. - /// If the ancestors do not draw out a full path (the path contains holes), candidates will be - /// suggested that may fill these holes. - /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are - /// multiple possibilities of the same size, this will select the first one. If there is no - /// chain of size `count` that matches the criteria, this will return the largest chain it could - /// find with the criteria. If there are no candidates meeting those criteria, returns an empty - /// `Vec`. - /// Cycles are accepted, but this code expects that the runtime will deduplicate - /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will - /// be backed on chain). - /// /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming /// available or candidates timing out. @@ -789,362 +659,334 @@ impl FragmentTree { if count == 0 { return vec![] } - // First, we need to order the ancestors. - // The node returned is the one from which we can start finding new backable candidates. - let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; - - self.find_backable_chain_inner( - base_node, - count, - count, - &pred, - &mut Vec::with_capacity(count as usize), - ) - } + let base_pos = self.find_ancestor_path(ancestors); - // Try finding a candidate chain starting from `base_node` of length `expected_count`. - // If not possible, return the longest one we could find. - // Does a depth-first search, since we're optimistic that there won't be more than one such - // chains (parachains shouldn't usually have forks). So in the usual case, this will conclude - // in `O(expected_count)`. - // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum - // depth we'll reach is `expected_count`. - // - // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. - // Although an exponential function, this is actually a constant that can only be altered via - // sudo/governance, because: - // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` - // (because each validator in the assigned group can second `max_candidate_depth` - // candidates). The prospective-parachains subsystem assumes that the number of para forks is - // limited by collator-protocol and backing subsystems. In practice, this is a constant which - // can only be altered by sudo or governance. - // 2. `expected_count` is equal to the number of cores a para is scheduled on (in an elastic - // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a - // small number (1-3), capped by the total number of available cores (a constant alterable - // only via governance/sudo). - fn find_backable_chain_inner( - &self, - base_node: NodePointer, - expected_count: u32, - remaining_count: u32, - pred: &dyn Fn(&CandidateHash) -> bool, - accumulator: &mut Vec, - ) -> Vec { - if remaining_count == 0 { - // The best option is the chain we've accumulated so far. - return accumulator.to_vec(); + let actual_end_index = std::cmp::min(base_pos + (count as usize), self.chain.len()); + let mut res = Vec::with_capacity(actual_end_index - base_pos); + + for elem in &self.chain[base_pos..actual_end_index] { + if self.scope.get_pending_availability(&elem.candidate_hash).is_none() && + pred(&elem.candidate_hash) + { + res.push(elem.candidate_hash); + } else { + break + } } - let children: Vec<_> = match base_node { - NodePointer::Root => self - .nodes - .iter() - .enumerate() - .take_while(|(_, n)| n.parent == NodePointer::Root) - .filter(|(_, n)| self.scope.get_pending_availability(&n.candidate_hash).is_none()) - .filter(|(_, n)| pred(&n.candidate_hash)) - .map(|(ptr, n)| (NodePointer::Storage(ptr), n.candidate_hash)) - .collect(), - NodePointer::Storage(base_node_ptr) => { - let base_node = &self.nodes[base_node_ptr]; - - base_node - .children - .iter() - .filter(|(_, hash)| self.scope.get_pending_availability(&hash).is_none()) - .filter(|(_, hash)| pred(&hash)) - .map(|(ptr, hash)| (*ptr, *hash)) - .collect() - }, - }; + res + } + + // Tries to orders the ancestors into a viable path from root to the last one. + // Stops when the ancestors are all used or when a node in the chain is not present in the + // ancestor set. Returns the index in the chain were the search stopped. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize { + if self.chain.is_empty() { + return 0; + } - let mut best_result = accumulator.clone(); - for (child_ptr, child_hash) in children { - accumulator.push(child_hash); - - let result = self.find_backable_chain_inner( - child_ptr, - expected_count, - remaining_count - 1, - &pred, - accumulator, - ); - - accumulator.pop(); - - // Short-circuit the search if we've found the right length. Otherwise, we'll - // search for a max. - // Taking the first best selection doesn't introduce bias or become gameable, - // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which - // makes the order in which ancestors are visited non-deterministic. - if result.len() == expected_count as usize { - return result - } else if best_result.len() < result.len() { - best_result = result; + for (index, candidate) in self.chain.iter().enumerate() { + if !ancestors.remove(&candidate.candidate_hash) { + return index } } - best_result + // This means that we found the entire chain in the ancestor set. There won't be anything + // left to back. + self.chain.len() + } + + // Return the earliest relay parent a new candidate can have in order to be added to the chain. + // This is the relay parent of the last candidate in the chain. + // The value returned may not be valid if we want to add a candidate pending availability, which + // may have a relay parent which is out of scope. Special handling is needed in that case. + // `None` is returned if the candidate's relay parent info cannot be found. + fn earliest_relay_parent(&self) -> Option { + if let Some(last_candidate) = self.chain.last() { + self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| { + // if the relay-parent is out of scope _and_ it is in the chain, + // it must be a candidate pending availability. + self.scope + .get_pending_availability(&last_candidate.candidate_hash) + .map(|c| c.relay_parent.clone()) + }) + } else { + Some(self.scope.earliest_relay_parent()) + } + } + + // Checks if this candidate could be added in the future to this chain. + // This assumes that the chain does not already contain this candidate. It may or may not be + // present in the `CandidateStorage`. + // Even if the candidate is a potential candidate, this function will indicate that it can be + // kept only if there's enough room for it. + pub(crate) fn can_add_candidate_as_potential( + &self, + storage: &CandidateStorage, + candidate_hash: &CandidateHash, + relay_parent: &Hash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> PotentialAddition { + // If we've got enough candidates for the configured depth, no point in adding more. + if self.chain.len() > self.scope.max_depth { + return PotentialAddition::None + } + + if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) { + return PotentialAddition::None + } + + let present_in_storage = storage.contains(candidate_hash); + + let unconnected = self + .find_unconnected_potential_candidates( + storage, + present_in_storage.then_some(candidate_hash), + ) + .len(); + + if (self.chain.len() + unconnected) < self.scope.max_depth { + PotentialAddition::Anyhow + } else if (self.chain.len() + unconnected) == self.scope.max_depth { + // If we've only one slot left to fill, it must be filled with a connected candidate. + PotentialAddition::IfConnected + } else { + PotentialAddition::None + } } - // Orders the ancestors into a viable path from root to the last one. - // Returns a pointer to the last node in the path. - // We assume that the ancestors form a chain (that the - // av-cores do not back parachain forks), None is returned otherwise. - // If we cannot use all ancestors, stop at the first found hole in the chain. This usually - // translates to a timed out candidate. - fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { - // The number of elements in the path we've processed so far. - let mut depth = 0; - let mut last_node = NodePointer::Root; - let mut next_node: Option = Some(NodePointer::Root); - - while let Some(node) = next_node { - if depth > self.scope.max_depth { - return None; + // The candidates which are present in `CandidateStorage`, are not part of this chain but could + // become part of this chain in the future. Capped at the max depth minus the existing chain + // length. + // If `ignore_candidate` is supplied and found in storage, it won't be counted. + pub(crate) fn find_unconnected_potential_candidates( + &self, + storage: &CandidateStorage, + ignore_candidate: Option<&CandidateHash>, + ) -> Vec { + let mut candidates = vec![]; + for candidate in storage.candidates() { + if let Some(ignore_candidate) = ignore_candidate { + if ignore_candidate == &candidate.candidate_hash { + continue + } + } + // We stop at max_depth + 1 with the search. There's no point in looping further. + if (self.chain.len() + candidates.len()) > self.scope.max_depth { + break + } + if !self.candidates.contains(&candidate.candidate_hash) && + self.check_potential( + &candidate.relay_parent, + candidate.candidate.persisted_validation_data.parent_head.hash(), + Some(candidate.candidate.commitments.head_data.hash()), + ) { + candidates.push(candidate.candidate_hash); } + } + + candidates + } - last_node = node; + // Check if adding a candidate which transitions `parent_head_hash` to `output_head_hash` would + // introduce a fork or a cycle in the parachain. + // `output_head_hash` is optional because we sometimes make this check before retrieving the + // collation. + fn is_fork_or_cycle(&self, parent_head_hash: Hash, output_head_hash: Option) -> bool { + if self.by_parent_head.contains_key(&parent_head_hash) { + // fork. our parent has another child already + return true + } - next_node = match node { - NodePointer::Root => { - let children = self - .nodes - .iter() - .enumerate() - .take_while(|n| n.1.parent == NodePointer::Root) - .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) - .collect::>(); + if let Some(output_head_hash) = output_head_hash { + if self.by_output_head.contains_key(&output_head_hash) { + // this is not a chain, there are multiple paths to the same state. + return true + } - self.find_valid_child(&mut ancestors, children.iter()).ok()? - }, - NodePointer::Storage(ptr) => { - let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); - if let Some(children) = children { - self.find_valid_child(&mut ancestors, children).ok()? - } else { - None - } - }, - }; + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return true + } - depth += 1; + // this should catch any other cycles. our output state cannot already be the parent + // state of another candidate, unless this is a cycle, since the already added + // candidates form a chain. + if self.by_parent_head.contains_key(&output_head_hash) { + return true + } } - Some(last_node) + false } - // Find a node from the given iterator which is present in the ancestors - // collection. If there are multiple such nodes, return an error and log a warning. We don't - // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. - // If there is no such node, return None. - fn find_valid_child<'a>( + // Checks the potential of a candidate to be added to the chain in the future. + // Verifies that the relay parent is in scope and not moving backwards and that we're not + // introducing forks or cycles with other candidates in the chain. + // `output_head_hash` is optional because we sometimes make this check before retrieving the + // collation. + fn check_potential( &self, - ancestors: &'a mut Ancestors, - nodes: impl Iterator + 'a, - ) -> Result, ()> { - let mut possible_children = - nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { - true => Some(node_ptr), - false => None, - }); - - // We don't accept forks in a parachain to be backed. The supplied ancestors - // should all form a chain. - let next = possible_children.next(); - if let Some(second_child) = possible_children.next() { - if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = - (next, second_child) - { - gum::error!( - target: LOG_TARGET, - para_id = ?self.scope.para, - relay_parent = ?self.scope.relay_parent, - "Trying to find new backable candidates for a parachain for which we've backed a fork.\ - This is a bug and the runtime should not have allowed it.\n\ - Backed candidates with the same parent: {}, {}", - self.nodes[*first_child].candidate_hash, - self.nodes[*second_child].candidate_hash, - ); - } + relay_parent: &Hash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> bool { + if self.is_fork_or_cycle(parent_head_hash, output_head_hash) { + return false + } - Err(()) - } else { - Ok(next.copied()) + let Some(earliest_rp) = self.earliest_relay_parent() else { return false }; + + let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; + + if relay_parent.number < earliest_rp.number { + return false // relay parent moved backwards. } + + true } - fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { - // Populate the tree breadth-first. - let mut last_sweep_start = None; + // Populate the fragment chain with candidates from CandidateStorage. + // Can be called by the constructor or when introducing a new candidate. + // If we're introducing a new candidate onto an existing chain, we may introduce more than one, + // since we may connect already existing candidates to the chain. + fn populate_chain(&mut self, storage: &CandidateStorage) { + let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { + last_candidate.cumulative_modifications.clone() + } else { + ConstraintModifications::identity() + }; + let Some(mut earliest_rp) = self.earliest_relay_parent() else { return }; loop { - let sweep_start = self.nodes.len(); - - if Some(sweep_start) == last_sweep_start { - break + if self.chain.len() > self.scope.max_depth { + break; } - let parents: Vec = if let Some(last_start) = last_sweep_start { - (last_start..self.nodes.len()).map(NodePointer::Storage).collect() - } else { - initial_bases.clone() - }; + let child_constraints = + match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?cumulative_modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); - // 1. get parent head and find constraints - // 2. iterate all candidates building on the right head and viable relay parent - // 3. add new node - for parent_pointer in parents { - let (modifications, child_depth, earliest_rp) = match parent_pointer { - NodePointer::Root => - (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), - NodePointer::Storage(ptr) => { - let node = &self.nodes[ptr]; - let parent_rp = self - .scope - .ancestor_by_hash(&node.relay_parent()) - .or_else(|| { - // if the relay-parent is out of scope _and_ it is in the tree, - // it must be a candidate pending availability. - self.scope - .get_pending_availability(&node.candidate_hash) - .map(|c| c.relay_parent.clone()) - }) - .expect("All nodes in tree are either pending availability or within scope; qed"); - - (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + break }, + Ok(c) => c, }; - if child_depth > self.scope.max_depth { + let required_head_hash = child_constraints.required_parent.hash(); + // Even though we don't allow parachain forks under the same active leaf, they may still + // appear under different relay chain forks, hence the iterator below. + let possible_children = storage.possible_para_children(&required_head_hash); + let mut added_child = false; + for candidate in possible_children { + // Add one node to chain if + // 1. it does not introduce a fork or a cycle. + // 2. parent hash is correct. + // 3. relay-parent does not move backwards. + // 4. all non-pending-availability candidates have relay-parent in scope. + // 5. candidate outputs fulfill constraints + + if self.is_fork_or_cycle( + candidate.parent_head_data_hash(), + Some(candidate.output_head_data_hash()), + ) { continue } - let child_constraints = - match self.scope.base_constraints.apply_modifications(&modifications) { + let pending = self.scope.get_pending_availability(&candidate.candidate_hash); + let Some(relay_parent) = pending + .map(|p| p.relay_parent.clone()) + .or_else(|| self.scope.ancestor(&candidate.relay_parent)) + else { + continue + }; + + // require: candidates don't move backwards + // and only pending availability candidates can be out-of-scope. + // + // earliest_rp can be before the earliest relay parent in the scope + // when the parent is a pending availability candidate as well, but + // only other pending candidates can have a relay parent out of scope. + let min_relay_parent_number = pending + .map(|p| match self.chain.len() { + 0 => p.relay_parent.number, + _ => earliest_rp.number, + }) + .unwrap_or_else(|| earliest_rp.number); + + if relay_parent.number < min_relay_parent_number { + continue // relay parent moved backwards. + } + + // don't add candidates if they're already present in the chain. + // this can never happen, as candidates can only be duplicated if there's a cycle + // and we shouldn't have allowed for a cycle to be chained. + if self.contains_candidate(&candidate.candidate_hash) { + continue + } + + let fragment = { + let mut constraints = child_constraints.clone(); + if let Some(ref p) = pending { + // overwrite for candidates pending availability as a special-case. + constraints.min_relay_parent_number = p.relay_parent.number; + } + + let f = Fragment::new( + relay_parent.clone(), + constraints, + // It's cheap to clone because it's wrapped in an Arc + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, Err(e) => { gum::debug!( target: LOG_TARGET, - new_parent_head = ?modifications.required_parent, err = ?e, - "Failed to apply modifications", + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", ); - continue + break }, - Ok(c) => c, - }; - - // Add nodes to tree wherever - // 1. parent hash is correct - // 2. relay-parent does not move backwards. - // 3. all non-pending-availability candidates have relay-parent in scope. - // 4. candidate outputs fulfill constraints - let required_head_hash = child_constraints.required_parent.hash(); - for candidate in storage.iter_para_children(&required_head_hash) { - let pending = self.scope.get_pending_availability(&candidate.candidate_hash); - let relay_parent = pending - .map(|p| p.relay_parent.clone()) - .or_else(|| self.scope.ancestor_by_hash(&candidate.relay_parent)); - - let relay_parent = match relay_parent { - Some(r) => r, - None => continue, - }; - - // require: pending availability candidates don't move backwards - // and only those can be out-of-scope. - // - // earliest_rp can be before the earliest relay parent in the scope - // when the parent is a pending availability candidate as well, but - // only other pending candidates can have a relay parent out of scope. - let min_relay_parent_number = pending - .map(|p| match parent_pointer { - NodePointer::Root => p.relay_parent.number, - NodePointer::Storage(_) => earliest_rp.number, - }) - .unwrap_or_else(|| { - std::cmp::max( - earliest_rp.number, - self.scope.earliest_relay_parent().number, - ) - }); - - if relay_parent.number < min_relay_parent_number { - continue // relay parent moved backwards. } + }; - // don't add candidates where the parent already has it as a child. - if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { - continue - } + // Update the cumulative constraint modifications. + cumulative_modifications.stack(fragment.constraint_modifications()); + // Update the earliest rp + earliest_rp = relay_parent; - let fragment = { - let mut constraints = child_constraints.clone(); - if let Some(ref p) = pending { - // overwrite for candidates pending availability as a special-case. - constraints.min_relay_parent_number = p.relay_parent.number; - } - - let f = Fragment::new( - relay_parent.clone(), - constraints, - candidate.candidate.partial_clone(), - ); + let node = FragmentNode { + fragment, + candidate_hash: candidate.candidate_hash, + cumulative_modifications: cumulative_modifications.clone(), + }; - match f { - Ok(f) => f.into_owned(), - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - ?relay_parent, - candidate_hash = ?candidate.candidate_hash, - "Failed to instantiate fragment", - ); - - continue - }, - } - }; - - let mut cumulative_modifications = modifications.clone(); - cumulative_modifications.stack(fragment.constraint_modifications()); - - let node = FragmentNode { - parent: parent_pointer, - fragment, - candidate_hash: candidate.candidate_hash, - depth: child_depth, - cumulative_modifications, - children: Vec::new(), - }; - - self.insert_node(node); - } + self.chain.push(node); + self.candidates.insert(candidate.candidate_hash); + // We've already checked for forks and cycles. + self.by_parent_head + .insert(candidate.parent_head_data_hash(), candidate.candidate_hash); + self.by_output_head + .insert(candidate.output_head_data_hash(), candidate.candidate_hash); + added_child = true; + // We can only add one child for a candidate. (it's a chain, not a tree) + break; } - last_sweep_start = Some(sweep_start); + if !added_child { + break + } } } } - -struct FragmentNode { - // A pointer to the parent node. - parent: NodePointer, - fragment: Fragment<'static>, - candidate_hash: CandidateHash, - depth: usize, - cumulative_modifications: ConstraintModifications, - children: Vec<(NodePointer, CandidateHash)>, -} - -impl FragmentNode { - fn relay_parent(&self) -> Hash { - self.fragment.relay_parent().hash - } - - fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { - self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) - } -} diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index fd41be55f7f960367ee0a57ccd4a4c2251b90d13..26ee94d59d8ebd50bda29218c85b38476f077111 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -19,17 +19,6 @@ use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; -use rstest::rstest; -use std::iter; - -impl NodePointer { - fn unwrap_idx(self) -> usize { - match self { - NodePointer::Root => panic!("Unexpected root"), - NodePointer::Storage(index) => index, - } - } -} fn make_constraints( min_relay_parent_number: BlockNumber, @@ -204,8 +193,52 @@ fn scope_only_takes_ancestors_up_to_min() { } #[test] -fn storage_add_candidate() { - let mut storage = CandidateStorage::new(); +fn scope_rejects_unordered_ancestors() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![2], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor { number: 2, prev: 4 }) + ); +} + +#[test] +fn candidate_storage_methods() { + let mut storage = CandidateStorage::default(); let relay_parent = Hash::repeat_byte(69); let (pvd, candidate) = make_committed_candidate( @@ -220,50 +253,105 @@ fn storage_add_candidate() { let candidate_hash = candidate.hash(); let parent_head_hash = pvd.parent_head.hash(); - storage.add_candidate(candidate, pvd).unwrap(); + // Invalid pvd hash + let mut wrong_pvd = pvd.clone(); + wrong_pvd.max_pov_size = 0; + assert_matches!( + storage.add_candidate(candidate.clone(), wrong_pvd, CandidateState::Seconded), + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + ); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); + + // Add a valid candidate + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) + .unwrap(); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - - assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); -} + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 1); + assert_eq!(storage.possible_para_children(&candidate.descriptor.para_head).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), Some(relay_parent)); + assert_eq!( + storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(), + &candidate.commitments.head_data + ); + assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); + assert_eq!(storage.is_backed(&candidate_hash), false); -#[test] -fn storage_retain() { - let mut storage = CandidateStorage::new(); + storage.mark_backed(&candidate_hash); + assert_eq!(storage.is_backed(&candidate_hash), true); - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - Hash::repeat_byte(69), - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, + // Re-adding a candidate fails. + assert_matches!( + storage.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded), + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(hash)) if candidate_hash == hash ); - let candidate_hash = candidate.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); + // Remove candidate and re-add it later in backed state. + storage.remove_candidate(&candidate_hash); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); + + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Backed) + .unwrap(); + assert_eq!(storage.is_backed(&candidate_hash), true); + + // Test retain storage.retain(|_| true); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - assert!(storage.head_data_by_hash(&output_head_hash).is_some()); - storage.retain(|_| false); assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); - assert!(storage.head_data_by_hash(&output_head_hash).is_none()); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); } -// [`FragmentTree::populate`] should pick up candidates that build on other candidates. #[test] -fn populate_works_recursively() { - let mut storage = CandidateStorage::new(); +fn populate_and_extend_from_storage_empty() { + // Empty chain and empty storage. + let storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let scope = Scope::with_ancestors( + ParaId::from(2), + RelayChainBlockInfo { + number: 1, + hash: Hash::repeat_byte(1), + storage_root: Hash::repeat_byte(2), + }, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); +} + +#[test] +fn populate_and_extend_from_storage_with_existing_empty_to_vec() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_c = Hash::repeat_byte(3); let (pvd_a, candidate_a) = make_committed_candidate( para_id, @@ -285,56 +373,623 @@ fn populate_works_recursively() { ); let candidate_b_hash = candidate_b.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0d].into(), + 2, + ); + let candidate_c_hash = candidate_c.hash(); - let ancestors = vec![RelayChainBlockInfo { + let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, hash: relay_parent_a, storage_root: pvd_a.relay_parent_storage_root, - }]; - + }; let relay_parent_b_info = RelayChainBlockInfo { number: pvd_b.relay_parent_number, hash: relay_parent_b, storage_root: pvd_b.relay_parent_storage_root, }; + let relay_parent_c_info = RelayChainBlockInfo { + number: pvd_c.relay_parent_number, + hash: relay_parent_c, + storage_root: pvd_c.relay_parent_storage_root, + }; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_b_info.clone(), + relay_parent_a_info.clone(), + ]; + + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) + .unwrap(); + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) + .unwrap(); + + // Candidate A doesn't adhere to the base constraints. + { + for wrong_constraints in [ + // Different required parent + make_constraints(0, vec![0], vec![0x0e].into()), + // Min relay parent number is wrong + make_constraints(1, vec![0], vec![0x0a].into()), + ] { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + wrong_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + + // If the min relay parent number is wrong, candidate A can never become valid. + // Otherwise, if only the required parent doesn't match, candidate A is still a + // potential candidate. + if wrong_constraints.min_relay_parent_number == 1 { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + + // All other candidates can always be potential candidates. + for (candidate, pvd) in + [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + } + } + + // Various max depths. + { + // depth is 0, will only allow 1 candidate + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 0, + ancestors.clone(), + ) + .unwrap(); + // Before populating the chain, all candidates are potential candidates. However, they can + // only be added as connected candidates, because only one candidates is allowed by max + // depth + let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &CandidateStorage::default(), + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::IfConnected + ); + } + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + // since depth is maxed out, we can't add more potential candidates + // candidate A is no longer a potential candidate because it's already present. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // depth is 1, allows two candidates + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 1, + ancestors.clone(), + ) + .unwrap(); + // Before populating the chain, all candidates can be added as potential. + let mut modified_storage = CandidateStorage::default(); + let chain = FragmentChain::populate(scope.clone(), &modified_storage); + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + // Add an unconnected candidate. We now should only allow a Connected candidate, because max + // depth only allows one more candidate. + modified_storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + let chain = FragmentChain::populate(scope.clone(), &modified_storage); + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::IfConnected + ); + } + + // Now try populating from all candidates. + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // since depth is maxed out, we can't add more potential candidates + // candidate A and B are no longer a potential candidate because they're already present. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // depths larger than 2, allows all candidates + for depth in 2..6 { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + depth, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + // Candidates are no longer potential candidates because they're already part of the + // chain. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + } + } + + // Wrong relay parents + { + // Candidates A has relay parent out of scope. + let ancestors_without_a = vec![relay_parent_b_info.clone()]; + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors_without_a, + ) + .unwrap(); + + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + + // Candidate A is not a potential candidate, but candidates B and C still are. + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + for (candidate, pvd) in + [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + + // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed + // to move backwards + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 1, + vec![0x0c].into(), + vec![0x0d].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // Parachain fork and cycles are not allowed. + { + // Candidate C has the same parent as candidate B. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0b].into(), + vec![0x0d].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses + // HashSets and HashMaps. + if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); + // Candidate B is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate_b.hash(), + &candidate_b.descriptor.relay_parent, + pvd_b.parent_head.hash(), + Some(candidate_b.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else { + panic!("Unexpected chain: {:?}", chain.to_vec()); + } + + // Candidate C is a 0-length cycle. + // Candidate C has the same parent as candidate B. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0c].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + + // Candidate C points back to the pre-state of candidate C. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0b].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert!(candidates.contains(&candidate_a_hash)); - assert!(candidates.contains(&candidate_b_hash)); + // Test with candidates pending availability + { + // Valid options + for pending in [ + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_b_info.clone(), + }, + ], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_b_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_c_info.clone(), + }, + ], + ] { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending, + 3, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } - assert_eq!(tree.nodes.len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[0].depth, 0); + // Relay parents of pending availability candidates can be out of scope + // Relay parent of candidate A is out of scope. + let ancestors_without_a = vec![relay_parent_b_info.clone()]; + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }], + 4, + ancestors_without_a, + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + + // Even relay parents of pending availability candidates which are out of scope cannot move + // backwards. + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_a_info.hash, + number: 1, + storage_root: relay_parent_a_info.storage_root, + }, + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_b_info.hash, + number: 0, + storage_root: relay_parent_b_info.storage_root, + }, + }, + ], + 4, + vec![], + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[1].depth, 1); + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + } } #[test] -fn children_of_root_are_contiguous() { - let mut storage = CandidateStorage::new(); - +fn extend_from_storage_with_existing_to_vec() { let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_d = Hash::repeat_byte(3); let (pvd_a, candidate_a) = make_committed_candidate( para_id, @@ -344,6 +999,7 @@ fn children_of_root_are_contiguous() { vec![0x0b].into(), 0, ); + let candidate_a_hash = candidate_a.hash(); let (pvd_b, candidate_b) = make_committed_candidate( para_id, @@ -353,182 +1009,136 @@ fn children_of_root_are_contiguous() { vec![0x0c].into(), 1, ); + let candidate_b_hash = candidate_b.hash(); - let (pvd_a2, candidate_a2) = make_committed_candidate( + let (pvd_c, candidate_c) = make_committed_candidate( para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b, 1].into(), - 0, + // Use the same relay parent number as B to test that it doesn't need to change between + // candidates. + relay_parent_b, + 1, + vec![0x0c].into(), + vec![0x0d].into(), + 1, ); - let candidate_a2_hash = candidate_a2.hash(); + let candidate_c_hash = candidate_c.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); + // Candidate D will never be added to the chain. + let (pvd_d, candidate_d) = make_committed_candidate( + para_id, + relay_parent_d, + 2, + vec![0x0e].into(), + vec![0x0f].into(), + 1, + ); - let ancestors = vec![RelayChainBlockInfo { + let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, hash: relay_parent_a, storage_root: pvd_a.relay_parent_storage_root, - }]; - + }; let relay_parent_b_info = RelayChainBlockInfo { number: pvd_b.relay_parent_number, hash: relay_parent_b, storage_root: pvd_b.relay_parent_storage_root, }; + let relay_parent_d_info = RelayChainBlockInfo { + number: pvd_d.relay_parent_number, + hash: relay_parent_d, + storage_root: pvd_d.relay_parent_storage_root, + }; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_b_info.clone(), + relay_parent_a_info.clone(), + ]; - storage.add_candidate(candidate_a2, pvd_a2).unwrap(); - tree.add_and_populate(candidate_a2_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); + // Already had A and C in the storage. Introduce B, which should add both B and C to the chain + // now. + { + let mut storage = CandidateStorage::default(); + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) + .unwrap(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_d_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); + // Already had A and B in the chain. Introduce C. + { + let mut storage = CandidateStorage::default(); + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) + .unwrap(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_d_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } } #[test] -fn add_candidate_child_of_root() { - let mut storage = CandidateStorage::new(); - +fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() { let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); -} - -#[test] -fn add_candidate_child_of_non_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); -} - -#[test] -fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 10; - - // Empty tree - let storage = CandidateStorage::new(); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - - let relay_parent_info = - RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - - let scope = Scope::with_ancestors( + // Empty chain + let storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( para_id, relay_parent_info, base_constraints, @@ -537,64 +1147,23 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - assert_eq!(tree.candidates().collect::>().len(), 0); - assert_eq!(tree.nodes.len(), 0); + let chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); // Invalid candidate. let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); - assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!(chain.find_backable_chain(ancestors, 2, |_| true), vec![]); } -#[rstest] -#[case(true, 13)] -#[case(false, 8)] -// The tree with no cycles looks like: -// Make a tree that looks like this (note that there's no cycle): -// +-(root)-+ -// | | -// +----0---+ 7 -// | | -// 1----+ 5 -// | | -// | | -// 2 6 -// | -// 3 -// | -// 4 -// -// The tree with cycles is the same as the first but has a cycle from 4 back to the state -// produced by 0 (It's bounded by the max_depth + 1). -// +-(root)-+ -// | | -// +----0---+ 7 -// | | -// 1----+ 5 -// | | -// | | -// 2 6 -// | -// 3 -// | -// 4---+ -// | | -// 1 5 -// | -// 2 -// | -// 3 -fn test_find_ancestor_path_and_find_backable_chain( - #[case] has_cycle: bool, - #[case] expected_node_count: usize, -) { +#[test] +fn test_find_ancestor_path_and_find_backable_to_vec() { let para_id = ParaId::from(5u32); let relay_parent = Hash::repeat_byte(1); let required_parent: HeadData = vec![0xff].into(); - let max_depth = 7; + let max_depth = 5; let relay_parent_number = 0; let relay_parent_storage_root = Hash::repeat_byte(69); @@ -650,42 +1219,13 @@ fn test_find_ancestor_path_and_find_backable_chain( para_id, relay_parent, 0, - vec![0].into(), + vec![4].into(), vec![5].into(), 0, )); - // Candidate 6 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![6].into(), - 0, - )); - // Candidate 7 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![7].into(), - 0, - )); - - if has_cycle { - candidates[4] = make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![0].into(), // put the cycle here back to the output state of 0. - 0, - ); - } let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::new(); + let mut storage = CandidateStorage::default(); let relay_parent_info = RelayChainBlockInfo { number: relay_parent_number, @@ -694,265 +1234,175 @@ fn test_find_ancestor_path_and_find_backable_chain( }; for (pvd, candidate) in candidates.iter() { - storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) + .unwrap(); } let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); let scope = Scope::with_ancestors( para_id, - relay_parent_info, - base_constraints, + relay_parent_info.clone(), + base_constraints.clone(), vec![], max_depth, vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!(tree.candidates().collect::>().len(), candidates.len()); - assert_eq!(tree.nodes.len(), expected_node_count); - - // Do some common tests on both trees. - { - // No ancestors supplied. - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // Ancestor which is not part of the tree. Will be ignored. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // A chain fork. - let ancestors: Ancestors = - [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + let chain = FragmentChain::populate(scope, &storage); - // Ancestors which are part of the tree but don't form a path. Will be ignored. - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors. - let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[7].hash()); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(candidates.len(), 6); + assert_eq!(chain.to_vec().len(), 6); - // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = - [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[0].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - if has_cycle { - assert_eq!( - tree.find_backable_chain(ancestors, 2, |_| true), - [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } else { - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } + // No ancestors supplied. + assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 1, |_| true), + [0].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 2, |_| true), + [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 5, |_| true), + [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - assert_eq!(res, NodePointer::Root); + for count in 6..10 { assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + chain.find_backable_chain(Ancestors::new(), count, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // Requested count is 0. - assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 7, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 10, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + // Ancestor which is not part of the chain. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + let ancestors: Ancestors = + [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + let ancestors: Ancestors = + [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - } + // Ancestors which are part of the chain but don't form a path from root. Will be ignored. + let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - // Now do some tests only on the tree with cycles - if has_cycle { - // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at - // 0-1-2-3-4-1-2-3. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - ] + // Valid ancestors. + let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] .into_iter() .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[4].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1-2. - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 1, |_| true), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + for count in 3..10 { assert_eq!( - tree.find_backable_chain(ancestors, 5, |_| true), - [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + chain.find_backable_chain(ancestors.clone(), count, |_| true), + [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // 0-1 - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 4, |_| true), + [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + for count in 5..10 { assert_eq!( - tree.find_backable_chain(ancestors, 6, |_| true), - [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + chain.find_backable_chain(ancestors.clone(), count, |_| true), + [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // For 0-1-2-3-4-5, there's more than 1 way of finding this path in - // the tree. `None` should be returned. The runtime should not have accepted this. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - candidates[5].hash(), - ] + let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] .into_iter() .collect(); - let res = tree.find_ancestor_path(ancestors.clone()); - assert_eq!(res, None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - } -} - -#[test] -fn graceful_cycle_of_0() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0a].into(), // input same as output - 0, + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 4, |_| true), + [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); - let candidate_a_hash = candidate_a.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 1); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + // Requested count is 0. + assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); + // Stop when we've found a candidate for which pred returns false. + let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); for count in 1..10 { assert_eq!( - tree.find_backable_chain(Ancestors::new(), count, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize, max_depth + 1)) - .collect::>() + // Stop at 4. + chain.find_backable_chain(ancestors.clone(), count, |hash| hash != + &candidates[4].hash()), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } + + // Stop when we've found a candidate which is pending availability + { + let scope = Scope::with_ancestors( + para_id, + relay_parent_info.clone(), + base_constraints, + // Mark the third candidate as pending availability + vec![PendingAvailability { + candidate_hash: candidates[3].hash(), + relay_parent: relay_parent_info, + }], + max_depth, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, &storage); + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), count - 1, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize - 1, max_depth)) - .collect::>() + // Stop at 4. + chain.find_backable_chain(ancestors.clone(), 3, |_| true), + [2].into_iter().map(|i| candidates[i].hash()).collect::>() ); } } #[test] -fn graceful_cycle_of_1() { - let mut storage = CandidateStorage::new(); +fn hypothetical_membership() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -962,7 +1412,7 @@ fn graceful_cycle_of_1() { relay_parent_a, 0, vec![0x0a].into(), - vec![0x0b].into(), // input same as output + vec![0x0b].into(), 0, ); let candidate_a_hash = candidate_a.hash(); @@ -972,13 +1422,12 @@ fn graceful_cycle_of_1() { relay_parent_a, 0, vec![0x0b].into(), - vec![0x0a].into(), // input same as output + vec![0x0c].into(), 0, ); let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -987,182 +1436,153 @@ fn graceful_cycle_of_1() { }; let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); + storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); let scope = Scope::with_ancestors( para_id, - relay_parent_a_info, - base_constraints, - pending_availability, + relay_parent_a_info.clone(), + base_constraints.clone(), + vec![], max_depth, vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let chain = FragmentChain::populate(scope, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); + assert_eq!(chain.to_vec().len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + // Check candidates which are already present + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_a_hash, + }, + &storage, + )); + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_b_hash, + }, + &storage, + )); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + // Forks not allowed. + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(21)), + }, + &storage, + )); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(22)), + }, + &storage, + )); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 2, |_| true), - vec![candidate_a_hash, candidate_b_hash], - ); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 3, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), - vec![candidate_b_hash, candidate_a_hash], - ); + // Unknown candidate which builds on top of the current chain. + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), + }, + &storage, + )); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 6, |_| true), - vec![ - candidate_a_hash, - candidate_b_hash, - candidate_a_hash, - candidate_b_hash, - candidate_a_hash - ], - ); + // Unknown unconnected candidate which may be valid. + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), + }, + &storage, + )); - for count in 3..7 { - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash, candidate_b_hash].into_iter().collect(), - count, - |_| true - ), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth + { + // C will be an unconnected candidate. + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0e].into(), + vec![0x0f].into(), + 0, ); - } -} + let candidate_c_hash = candidate_c.hash(); -#[test] -fn hypothetical_depths_known_and_unknown() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + // Add an invalid candidate in the storage. This would introduce a fork. Just to test that + // it's ignored. + let (invalid_pvd, invalid_candidate) = make_committed_candidate( + para_id, + relay_parent_a, + 1, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + vec![], + 2, + vec![], + ) + .unwrap(); + let mut storage = storage.clone(); + storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); + let chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - assert_eq!( - tree.hypothetical_depths( - candidate_b_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3], - ); + storage + .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) + .unwrap(); - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(21)), + // Check that C is accepted as a potential unconnected candidate. + assert!(!chain.hypothetical_membership( HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, + parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_hash: candidate_c_hash, + candidate_para: para_id }, &storage, - false, - ), - vec![0, 2, 4], - ); + )); - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(22)), + // Since C is already an unconnected candidate in the storage. + assert!(!chain.hypothetical_membership( HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, + parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), }, &storage, - false, - ), - vec![1, 3] - ); + )); + } } #[test] -fn hypothetical_depths_stricter_on_complete() { - let storage = CandidateStorage::new(); +fn hypothetical_membership_stricter_on_complete_candidates() { + let storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -1197,161 +1617,31 @@ fn hypothetical_depths_stricter_on_complete() { vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0], - ); - - assert!(tree - .hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Complete { - receipt: Cow::Owned(candidate_a), - persisted_validation_data: Cow::Owned(pvd_a), - }, - &storage, - false, - ) - .is_empty()); -} - -#[test] -fn hypothetical_depths_backed_in_path() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0d].into(), - 0, - ); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.add_candidate(candidate_c, pvd_c).unwrap(); - - // `A` and `B` are backed, `C` is not. - storage.mark_backed(&candidate_a_hash); - storage.mark_backed(&candidate_b_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - assert_eq!(tree.nodes.len(), 3); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![0], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![2], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - Vec::::new(), - ); + let chain = FragmentChain::populate(scope, &storage); + + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_a_hash, + }, + &storage, + )); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![2], // non-empty if `false`. - ); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Complete { + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + candidate_hash: candidate_a_hash, + }, + &storage, + )); } #[test] -fn pending_availability_in_scope() { - let mut storage = CandidateStorage::new(); +fn hypothetical_membership_with_pending_availability_in_scope() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -1402,8 +1692,8 @@ fn pending_availability_in_scope() { }; let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); + storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); storage.mark_backed(&candidate_a_hash); let scope = Scope::with_ancestors( @@ -1415,37 +1705,49 @@ fn pending_availability_in_scope() { vec![relay_parent_b_info], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let chain = FragmentChain::populate(scope, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), 2); + assert_eq!(chain.to_vec().len(), 2); let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_c, - }, - &storage, - false, - ), - vec![1], - ); + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_hash: candidate_a_hash, + candidate_para: para_id + }, + &storage, + )); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_b, - }, - &storage, - false, - ), - vec![2], - ); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_c, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); + + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_c, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); + + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + candidate_relay_parent: relay_parent_b, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 0b1a2e034a2893e6397b986405df28a91776d819..d5bb5ff76ba8e8e603f2ab6b915b6c8fdb2d5f3c 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -21,22 +21,20 @@ //! This is the main coordinator of work within the node for the collation and //! backing phases of parachain consensus. //! -//! This is primarily an implementation of "Fragment Trees", as described in +//! This is primarily an implementation of "Fragment Chains", as described in //! [`polkadot_node_subsystem_util::inclusion_emulator`]. //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. -use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, -}; +use std::collections::{HashMap, HashSet}; +use fragment_chain::{FragmentChain, PotentialAddition}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, - HypotheticalFrontierRequest, IntroduceCandidateRequest, ParentHeadData, + Ancestors, ChainApiMessage, HypotheticalCandidate, HypotheticalMembership, + HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, ParentHeadData, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -56,7 +54,8 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, fragment_chain::{ - CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, + CandidateState, CandidateStorage, CandidateStorageInsertionError, + Scope as FragmentChainScope, }, }; @@ -72,7 +71,7 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. - fragment_trees: HashMap, + fragment_chains: HashMap, pending_availability: HashSet, } @@ -141,12 +140,10 @@ async fn run_iteration( }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { - ProspectiveParachainsMessage::IntroduceCandidate(request, tx) => - handle_candidate_introduced(&mut *ctx, view, request, tx).await?, - ProspectiveParachainsMessage::CandidateSeconded(para, candidate_hash) => - handle_candidate_seconded(view, para, candidate_hash), + ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) => + handle_introduce_seconded_candidate(&mut *ctx, view, request, tx, metrics).await, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, + handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await, ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para, @@ -154,10 +151,8 @@ async fn run_iteration( ancestors, tx, ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), - ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => - answer_hypothetical_frontier_request(&view, request, tx), - ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => - answer_tree_membership_request(&view, para, candidate, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx) => + answer_hypothetical_membership_request(&view, request, tx, metrics), ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => answer_minimum_relay_parents_request(&view, relay_parent, tx), ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) => @@ -175,8 +170,8 @@ async fn handle_active_leaves_update( metrics: &Metrics, ) -> JfyiErrorResult<()> { // 1. clean up inactive leaves - // 2. determine all scheduled para at new block - // 3. construct new fragment tree for each para for each new leaf + // 2. determine all scheduled paras at the new block + // 3. construct new fragment chain for each para for each new leaf // 4. prune candidate storage. for deactivated in &update.deactivated { @@ -203,9 +198,7 @@ async fn handle_active_leaves_update( return Ok(()) }; - let mut pending_availability = HashSet::new(); - let scheduled_paras = - fetch_upcoming_paras(&mut *ctx, hash, &mut pending_availability).await?; + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { @@ -227,30 +220,30 @@ async fn handle_active_leaves_update( let ancestry = fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; + let mut all_pending_availability = HashSet::new(); + // Find constraints. - let mut fragment_trees = HashMap::new(); + let mut fragment_chains = HashMap::new(); for para in scheduled_paras { let candidate_storage = - view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); + view.candidate_storage.entry(para).or_insert_with(CandidateStorage::default); let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; - let (constraints, pending_availability) = match backing_state { - Some(c) => c, - None => { - // This indicates a runtime conflict of some kind. - - gum::debug!( - target: LOG_TARGET, - para_id = ?para, - relay_parent = ?hash, - "Failed to get inclusion backing state." - ); + let Some((constraints, pending_availability)) = backing_state else { + // This indicates a runtime conflict of some kind. + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + relay_parent = ?hash, + "Failed to get inclusion backing state." + ); - continue - }, + continue }; + all_pending_availability.extend(pending_availability.iter().map(|c| c.candidate_hash)); + let pending_availability = preprocess_candidates_pending_availability( ctx, &mut temp_header_cache, @@ -261,15 +254,15 @@ async fn handle_active_leaves_update( let mut compact_pending = Vec::with_capacity(pending_availability.len()); for c in pending_availability { - let res = candidate_storage.add_candidate(c.candidate, c.persisted_validation_data); + let res = candidate_storage.add_candidate( + c.candidate, + c.persisted_validation_data, + CandidateState::Backed, + ); let candidate_hash = c.compact.candidate_hash; - compact_pending.push(c.compact); match res { - Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { - // Anything on-chain is guaranteed to be backed. - candidate_storage.mark_backed(&candidate_hash); - }, + Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -278,11 +271,15 @@ async fn handle_active_leaves_update( ?err, "Scraped invalid candidate pending availability", ); + + break }, } + + compact_pending.push(c.compact); } - let scope = TreeScope::with_ancestors( + let scope = FragmentChainScope::with_ancestors( para, block_info.clone(), constraints, @@ -297,16 +294,26 @@ async fn handle_active_leaves_update( relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, para_id = ?para, - "Creating fragment tree" + "Creating fragment chain" ); - let tree = FragmentTree::populate(scope, &*candidate_storage); + let chain = FragmentChain::populate(scope, &*candidate_storage); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Populated fragment chain with {} candidates", + chain.len() + ); - fragment_trees.insert(para, tree); + fragment_chains.insert(para, chain); } - view.active_leaves - .insert(hash, RelayBlockViewData { fragment_trees, pending_availability }); + view.active_leaves.insert( + hash, + RelayBlockViewData { fragment_chains, pending_availability: all_pending_availability }, + ); } if !update.deactivated.is_empty() { @@ -318,18 +325,39 @@ async fn handle_active_leaves_update( } fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { - metrics.time_prune_view_candidate_storage(); + let _timer = metrics.time_prune_view_candidate_storage(); let active_leaves = &view.active_leaves; let mut live_candidates = HashSet::new(); let mut live_paras = HashSet::new(); for sub_view in active_leaves.values() { - for (para_id, fragment_tree) in &sub_view.fragment_trees { - live_candidates.extend(fragment_tree.candidates()); + live_candidates.extend(sub_view.pending_availability.iter().cloned()); + + for (para_id, fragment_chain) in &sub_view.fragment_chains { + live_candidates.extend(fragment_chain.to_vec()); live_paras.insert(*para_id); } + } - live_candidates.extend(sub_view.pending_availability.iter().cloned()); + let connected_candidates_count = live_candidates.len(); + for (leaf, sub_view) in active_leaves.iter() { + for (para_id, fragment_chain) in &sub_view.fragment_chains { + if let Some(storage) = view.candidate_storage.get(para_id) { + let unconnected_potential = + fragment_chain.find_unconnected_potential_candidates(storage, None); + if !unconnected_potential.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?leaf, + "Keeping {} unconnected candidates for paraid {} in storage: {:?}", + unconnected_potential.len(), + para_id, + unconnected_potential + ); + } + live_candidates.extend(unconnected_potential); + } + } } view.candidate_storage.retain(|para_id, storage| { @@ -343,7 +371,21 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { // This maintains a convenient invariant that para-id storage exists // as long as there's an active head which schedules the para. true - }) + }); + + for (para_id, storage) in view.candidate_storage.iter() { + gum::trace!( + target: LOG_TARGET, + "Keeping a total of {} connected candidates for paraid {} in storage", + storage.candidates().count(), + para_id, + ); + } + + metrics.record_candidate_storage_size( + connected_candidates_count as u64, + live_candidates.len().saturating_sub(connected_candidates_count) as u64, + ); } struct ImportablePendingAvailability { @@ -365,22 +407,20 @@ async fn preprocess_candidates_pending_availability( let expected_count = pending_availability.len(); for (i, pending) in pending_availability.into_iter().enumerate() { - let relay_parent = - match fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? { - None => { - gum::debug!( - target: LOG_TARGET, - ?pending.candidate_hash, - ?pending.descriptor.para_id, - index = ?i, - ?expected_count, - "Had to stop processing pending candidates early due to missing info.", - ); + let Some(relay_parent) = + fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? + else { + gum::debug!( + target: LOG_TARGET, + ?pending.candidate_hash, + ?pending.descriptor.para_id, + index = ?i, + ?expected_count, + "Had to stop processing pending candidates early due to missing info.", + ); - break - }, - Some(b) => b, - }; + break + }; let next_required_parent = pending.commitments.head_data.clone(); importable.push(ImportablePendingAvailability { @@ -407,104 +447,139 @@ async fn preprocess_candidates_pending_availability( } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_introduced( +async fn handle_introduce_seconded_candidate( _ctx: &mut Context, view: &mut View, - request: IntroduceCandidateRequest, - tx: oneshot::Sender, -) -> JfyiErrorResult<()> { - let IntroduceCandidateRequest { + request: IntroduceSecondedCandidateRequest, + tx: oneshot::Sender, + metrics: &Metrics, +) { + let _timer = metrics.time_introduce_seconded_candidate(); + + let IntroduceSecondedCandidateRequest { candidate_para: para, candidate_receipt: candidate, persisted_validation_data: pvd, } = request; - // Add the candidate to storage. - // Then attempt to add it to all trees. - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - candidate_hash = ?candidate.hash(), - "Received seconded candidate for inactive para", - ); + let Some(storage) = view.candidate_storage.get_mut(¶) else { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + candidate_hash = ?candidate.hash(), + "Received seconded candidate for inactive para", + ); - let _ = tx.send(Vec::new()); - return Ok(()) - }, - Some(storage) => storage, + let _ = tx.send(false); + return }; - let candidate_hash = match storage.add_candidate(candidate, pvd) { - Ok(c) => c, - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { - // Candidate known - return existing fragment tree membership. - let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c)); - return Ok(()) - }, - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { - // We can't log the candidate hash without either doing more ~expensive - // hashing but this branch indicates something is seriously wrong elsewhere - // so it's doubtful that it would affect debugging. + let parent_head_hash = pvd.parent_head.hash(); + let output_head_hash = Some(candidate.commitments.head_data.hash()); + + // We first introduce the candidate in the storage and then try to extend the chain. + // If the candidate gets included in the chain, we can keep it in storage. + // If it doesn't, check that it's still a potential candidate in at least one fragment chain. + // If it's not, we can remove it. + + let candidate_hash = + match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) { + Ok(c) => c, + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + gum::debug!( + target: LOG_TARGET, + para = ?para, + "Attempting to introduce an already known candidate: {:?}", + candidate.hash() + ); + // Candidate already known. + let _ = tx.send(true); + return + }, + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { + // We can't log the candidate hash without either doing more ~expensive + // hashing but this branch indicates something is seriously wrong elsewhere + // so it's doubtful that it would affect debugging. - gum::warn!( + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Received seconded candidate had mismatching validation data", + ); + + let _ = tx.send(false); + return + }, + }; + + let mut keep_in_storage = false; + for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + gum::trace!( target: LOG_TARGET, para = ?para, - "Received seconded candidate had mismatching validation data", + ?relay_parent, + "Candidates in chain before trying to introduce a new one: {:?}", + chain.to_vec() ); + chain.extend_from_storage(&*storage); + if chain.contains_candidate(&candidate_hash) { + keep_in_storage = true; - let _ = tx.send(Vec::new()); - return Ok(()) - }, - }; + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para = ?para, + ?candidate_hash, + "Added candidate to chain.", + ); + } else { + match chain.can_add_candidate_as_potential( + &storage, + &candidate_hash, + &candidate.descriptor.relay_parent, + parent_head_hash, + output_head_hash, + ) { + PotentialAddition::Anyhow => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + ?candidate_hash, + "Kept candidate as unconnected potential.", + ); - let mut membership = Vec::new(); - for (relay_parent, leaf_data) in &mut view.active_leaves { - if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { - tree.add_and_populate(candidate_hash, &*storage); - if let Some(depths) = tree.candidate(&candidate_hash) { - membership.push((*relay_parent, depths)); + keep_in_storage = true; + }, + _ => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + "Not introducing a new candidate: {:?}", + candidate_hash + ); + }, + } } } } - if membership.is_empty() { + // If there is at least one leaf where this candidate can be added or potentially added in the + // future, keep it in storage. + if !keep_in_storage { storage.remove_candidate(&candidate_hash); - } - - let _ = tx.send(membership); - - Ok(()) -} - -fn handle_candidate_seconded(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to second unknown candidate", - ); - return - }, - Some(storage) => storage, - }; - - if !storage.contains(&candidate_hash) { - gum::warn!( + gum::debug!( target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to second unknown candidate", + para = ?para, + candidate = ?candidate_hash, + "Newly-seconded candidate cannot be kept under any active leaf", ); - - return } - storage.mark_seconded(&candidate_hash); + let _ = tx.send(keep_in_storage); } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -513,19 +588,16 @@ async fn handle_candidate_backed( view: &mut View, para: ParaId, candidate_hash: CandidateHash, -) -> JfyiErrorResult<()> { - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to back unknown candidate", - ); +) { + let Some(storage) = view.candidate_storage.get_mut(¶) else { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to back a candidate for unscheduled para", + ); - return Ok(()) - }, - Some(storage) => storage, + return }; if !storage.contains(&candidate_hash) { @@ -536,7 +608,7 @@ async fn handle_candidate_backed( "Received instruction to back unknown candidate", ); - return Ok(()) + return } if storage.is_backed(&candidate_hash) { @@ -547,11 +619,10 @@ async fn handle_candidate_backed( "Received redundant instruction to mark candidate as backed", ); - return Ok(()) + return } storage.mark_backed(&candidate_hash); - Ok(()) } fn answer_get_backable_candidates( @@ -562,62 +633,71 @@ fn answer_get_backable_candidates( ancestors: Ancestors, tx: oneshot::Sender>, ) { - let data = match view.active_leaves.get(&relay_parent) { - None => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "Requested backable candidate for inactive relay-parent." - ); + let Some(data) = view.active_leaves.get(&relay_parent) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive relay-parent." + ); - let _ = tx.send(vec![]); - return - }, - Some(d) => d, + let _ = tx.send(vec![]); + return }; - let tree = match data.fragment_trees.get(¶) { - None => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "Requested backable candidate for inactive para." - ); + let Some(chain) = data.fragment_chains.get(¶) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive para." + ); - let _ = tx.send(vec![]); - return - }, - Some(tree) => tree, + let _ = tx.send(vec![]); + return }; - let storage = match view.candidate_storage.get(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "No candidate storage for active para", - ); + let Some(storage) = view.candidate_storage.get(¶) else { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "No candidate storage for active para", + ); - let _ = tx.send(vec![]); - return - }, - Some(s) => s, + let _ = tx.send(vec![]); + return }; - let backable_candidates: Vec<_> = tree + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Candidate storage for para: {:?}", + storage.candidates().map(|candidate| candidate.hash()).collect::>() + ); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Candidate chain for para: {:?}", + chain.to_vec() + ); + + let backable_candidates: Vec<_> = chain .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { - storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( + storage.relay_parent_of_candidate(&child_hash).map_or_else( || { + // Here, we'd actually need to trim all of the candidates that follow. Or + // not, the runtime will do this. Impossible scenario anyway. gum::error!( target: LOG_TARGET, ?child_hash, para_id = ?para, - "Candidate is present in fragment tree but not in candidate's storage!", + "Candidate is present in fragment chain but not in candidate's storage!", ); None }, @@ -639,6 +719,7 @@ fn answer_get_backable_candidates( target: LOG_TARGET, ?relay_parent, ?backable_candidates, + ?ancestors, "Found backable candidates", ); } @@ -646,58 +727,32 @@ fn answer_get_backable_candidates( let _ = tx.send(backable_candidates); } -fn answer_hypothetical_frontier_request( +fn answer_hypothetical_membership_request( view: &View, - request: HypotheticalFrontierRequest, - tx: oneshot::Sender>, + request: HypotheticalMembershipRequest, + tx: oneshot::Sender>, + metrics: &Metrics, ) { + let _timer = metrics.time_hypothetical_membership_request(); + let mut response = Vec::with_capacity(request.candidates.len()); for candidate in request.candidates { - response.push((candidate, Vec::new())); + response.push((candidate, vec![])); } - let required_active_leaf = request.fragment_tree_relay_parent; + let required_active_leaf = request.fragment_chain_relay_parent; for (active_leaf, leaf_view) in view .active_leaves .iter() .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) { - for &mut (ref c, ref mut membership) in &mut response { - let fragment_tree = match leaf_view.fragment_trees.get(&c.candidate_para()) { - None => continue, - Some(f) => f, - }; - let candidate_storage = match view.candidate_storage.get(&c.candidate_para()) { - None => continue, - Some(storage) => storage, - }; - - let candidate_hash = c.candidate_hash(); - let hypothetical = match c { - HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => - fragment_chain::HypotheticalCandidate::Complete { - receipt: Cow::Borrowed(receipt), - persisted_validation_data: Cow::Borrowed(persisted_validation_data), - }, - HypotheticalCandidate::Incomplete { - parent_head_data_hash, - candidate_relay_parent, - .. - } => fragment_chain::HypotheticalCandidate::Incomplete { - relay_parent: *candidate_relay_parent, - parent_head_data_hash: *parent_head_data_hash, - }, - }; - - let depths = fragment_tree.hypothetical_depths( - candidate_hash, - hypothetical, - candidate_storage, - request.backed_in_path_only, - ); + for &mut (ref candidate, ref mut membership) in &mut response { + let para_id = &candidate.candidate_para(); + let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; + let Some(candidate_storage) = view.candidate_storage.get(para_id) else { continue }; - if !depths.is_empty() { - membership.push((*active_leaf, depths)); + if fragment_chain.hypothetical_membership(candidate.clone(), candidate_storage) { + membership.push(*active_leaf); } } } @@ -705,31 +760,6 @@ fn answer_hypothetical_frontier_request( let _ = tx.send(response); } -fn fragment_tree_membership( - active_leaves: &HashMap, - para: ParaId, - candidate: CandidateHash, -) -> FragmentTreeMembership { - let mut membership = Vec::new(); - for (relay_parent, view_data) in active_leaves { - if let Some(tree) = view_data.fragment_trees.get(¶) { - if let Some(depths) = tree.candidate(&candidate) { - membership.push((*relay_parent, depths)); - } - } - } - membership -} - -fn answer_tree_membership_request( - view: &View, - para: ParaId, - candidate: CandidateHash, - tx: oneshot::Sender, -) { - let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, candidate)); -} - fn answer_minimum_relay_parents_request( view: &View, relay_parent: Hash, @@ -737,8 +767,8 @@ fn answer_minimum_relay_parents_request( ) { let mut v = Vec::new(); if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { - for (para_id, fragment_tree) in &leaf_data.fragment_trees { - v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number)); + for (para_id, fragment_chain) in &leaf_data.fragment_chains { + v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); } } @@ -752,9 +782,9 @@ fn answer_prospective_validation_data_request( ) { // 1. Try to get the head-data from the candidate store if known. // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by iterating - // fragment trees. + // fragment chains. // 3. Otherwise, it is unknown. - // 4. Also try to find the relay parent block info by scanning fragment trees. + // 4. Also try to find the relay parent block info by scanning fragment chains. // 5. If head data and relay parent block info are found - success. Otherwise, failure. let storage = match view.candidate_storage.get(&request.para_id) { @@ -776,35 +806,32 @@ fn answer_prospective_validation_data_request( let mut relay_parent_info = None; let mut max_pov_size = None; - for fragment_tree in view + for fragment_chain in view .active_leaves .values() - .filter_map(|x| x.fragment_trees.get(&request.para_id)) + .filter_map(|x| x.fragment_chains.get(&request.para_id)) { if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() { break } if relay_parent_info.is_none() { - relay_parent_info = - fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent); + relay_parent_info = fragment_chain.scope().ancestor(&request.candidate_relay_parent); } if head_data.is_none() { - let required_parent = &fragment_tree.scope().base_constraints().required_parent; + let required_parent = &fragment_chain.scope().base_constraints().required_parent; if required_parent.hash() == parent_head_data_hash { head_data = Some(required_parent.clone()); } } if max_pov_size.is_none() { - let contains_ancestor = fragment_tree - .scope() - .ancestor_by_hash(&request.candidate_relay_parent) - .is_some(); + let contains_ancestor = + fragment_chain.scope().ancestor(&request.candidate_relay_parent).is_some(); if contains_ancestor { // We are leaning hard on two assumptions here. - // 1. That the fragment tree never contains allowed relay-parents whose session for + // 1. That the fragment chain never contains allowed relay-parents whose session for // children is different from that of the base block's. // 2. That the max_pov_size is only configurable per session. - max_pov_size = Some(fragment_tree.scope().base_constraints().max_pov_size); + max_pov_size = Some(fragment_chain.scope().base_constraints().max_pov_size); } } } @@ -843,7 +870,6 @@ async fn fetch_backing_state( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, - pending_availability: &mut HashSet, ) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); @@ -860,8 +886,6 @@ async fn fetch_upcoming_paras( for core in cores { match core { CoreState::Occupied(occupied) => { - pending_availability.insert(occupied.candidate_hash); - if let Some(next_up_on_available) = occupied.next_up_on_available { upcoming.insert(next_up_on_available.para_id); } diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 57061497a1c0d2923dedeea341a4cf4cf2ed8807..5abd9f56f306cdad515b93c794a51de516417b88 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -14,11 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use polkadot_node_subsystem_util::metrics::{self, prometheus}; +use polkadot_node_subsystem::prometheus::Opts; +use polkadot_node_subsystem_util::metrics::{ + self, + prometheus::{self, GaugeVec, U64}, +}; #[derive(Clone)] pub(crate) struct MetricsInner { - pub(crate) prune_view_candidate_storage: prometheus::Histogram, + prune_view_candidate_storage: prometheus::Histogram, + introduce_seconded_candidate: prometheus::Histogram, + hypothetical_membership: prometheus::Histogram, + candidate_storage_count: prometheus::GaugeVec, } /// Candidate backing metrics. @@ -34,6 +41,40 @@ impl Metrics { .as_ref() .map(|metrics| metrics.prune_view_candidate_storage.start_timer()) } + + /// Provide a timer for handling `IntroduceSecondedCandidate` which observes on drop. + pub fn time_introduce_seconded_candidate( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.introduce_seconded_candidate.start_timer()) + } + + /// Provide a timer for handling `GetHypotheticalMembership` which observes on drop. + pub fn time_hypothetical_membership_request( + &self, + ) -> Option { + self.0.as_ref().map(|metrics| metrics.hypothetical_membership.start_timer()) + } + + /// Record the size of the candidate storage. First param is the connected candidates count, + /// second param is the unconnected candidates count. + pub fn record_candidate_storage_size(&self, connected_count: u64, unconnected_count: u64) { + self.0.as_ref().map(|metrics| { + metrics + .candidate_storage_count + .with_label_values(&["connected"]) + .set(connected_count) + }); + + self.0.as_ref().map(|metrics| { + metrics + .candidate_storage_count + .with_label_values(&["unconnected"]) + .set(unconnected_count) + }); + } } impl metrics::Metrics for Metrics { @@ -46,6 +87,30 @@ impl metrics::Metrics for Metrics { ))?, registry, )?, + introduce_seconded_candidate: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_introduce_seconded_candidate", + "Time spent within `prospective_parachains::handle_introduce_seconded_candidate`", + ))?, + registry, + )?, + hypothetical_membership: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_hypothetical_membership", + "Time spent responding to `GetHypotheticalMembership`", + ))?, + registry, + )?, + candidate_storage_count: prometheus::register( + GaugeVec::new( + Opts::new( + "polkadot_parachain_prospective_parachains_candidate_storage_count", + "Number of candidates present in the candidate storage, split by connected and unconnected" + ), + &["type"], + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 8989911a33239d3b6f775bcf6271c6f40d9a0bac..4bc47367278864e6f5e7136fe07a4fe1d1be88a5 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -19,7 +19,7 @@ use assert_matches::assert_matches; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{ - AllMessages, HypotheticalFrontierRequest, ParentHeadData, ProspectiveParachainsMessage, + AllMessages, HypotheticalMembershipRequest, ParentHeadData, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, }, }; @@ -340,36 +340,42 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { .await; } -async fn introduce_candidate( +async fn introduce_seconded_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, ) { - let req = IntroduceCandidateRequest { + let req = IntroduceSecondedCandidateRequest { candidate_para: candidate.descriptor().para_id, candidate_receipt: candidate, persisted_validation_data: pvd, }; - let (tx, _) = oneshot::channel(); + let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx), + msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx), }) .await; + assert!(rx.await.unwrap()); } -async fn second_candidate( +async fn introduce_seconded_candidate_failed( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, ) { + let req = IntroduceSecondedCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate, + persisted_validation_data: pvd, + }; + let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor.para_id, - candidate.hash(), - ), + msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx), }) .await; + assert!(!rx.await.unwrap()); } async fn back_candidate( @@ -387,22 +393,6 @@ async fn back_candidate( .await; } -async fn get_membership( - virtual_overseer: &mut VirtualOverseer, - para_id: ParaId, - candidate_hash: CandidateHash, - expected_membership_response: Vec<(Hash, Vec)>, -) { - let (tx, rx) = oneshot::channel(); - virtual_overseer - .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx), - }) - .await; - let resp = rx.await.unwrap(); - assert_eq!(resp, expected_membership_response); -} - async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, @@ -420,42 +410,39 @@ async fn get_backable_candidates( }) .await; let resp = rx.await.unwrap(); - assert_eq!(resp.len(), expected_result.len()); assert_eq!(resp, expected_result); } -async fn get_hypothetical_frontier( +async fn get_hypothetical_membership( virtual_overseer: &mut VirtualOverseer, candidate_hash: CandidateHash, receipt: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - fragment_tree_relay_parent: Hash, - backed_in_path_only: bool, - expected_depths: Vec, + expected_membership: Vec, ) { let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash, receipt: Arc::new(receipt), persisted_validation_data, }; - let request = HypotheticalFrontierRequest { + let request = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(fragment_tree_relay_parent), - backed_in_path_only, + fragment_chain_relay_parent: None, }; let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), + msg: ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), }) .await; - let resp = rx.await.unwrap(); - let expected_frontier = if expected_depths.is_empty() { - vec![(hypothetical_candidate, vec![])] - } else { - vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])] - }; - assert_eq!(resp, expected_frontier); + let mut resp = rx.await.unwrap(); + assert_eq!(resp.len(), 1); + let (candidate, membership) = resp.remove(0); + assert_eq!(candidate, hypothetical_candidate); + assert_eq!( + membership.into_iter().collect::>(), + expected_membership.into_iter().collect::>() + ); } async fn get_pvd( @@ -513,11 +500,11 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { } // Send some candidates and make sure all are found: -// - Two for the same leaf A +// - Two for the same leaf A (one for parachain 1 and one for parachain 2) // - One for leaf B on parachain 1 // - One for leaf C on parachain 2 #[test] -fn send_candidates_and_check_if_found() { +fn introduce_candidates_basic() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -563,7 +550,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_a1 = candidate_a1.hash(); - let response_a1 = vec![(leaf_a.hash, vec![0])]; + let response_a1 = vec![(candidate_hash_a1, leaf_a.hash)]; // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( @@ -575,7 +562,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_a2 = candidate_a2.hash(); - let response_a2 = vec![(leaf_a.hash, vec![0])]; + let response_a2 = vec![(candidate_hash_a2, leaf_a.hash)]; // Candidate B let (candidate_b, pvd_b) = make_candidate( @@ -587,7 +574,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; + let response_b = vec![(candidate_hash_b, leaf_b.hash)]; // Candidate C let (candidate_c, pvd_c) = make_candidate( @@ -599,25 +586,78 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; + let response_c = vec![(candidate_hash_c, leaf_c.hash)]; // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; - introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await; + back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await; // Check candidate tree membership. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, response_a2).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a1, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + response_a2, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_b, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c, + ) + .await; + + // Check membership on other leaves. + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; - // The candidates should not be found on other parachains. - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; virtual_overseer }); @@ -629,10 +669,8 @@ fn send_candidates_and_check_if_found() { assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2)); } -// Send some candidates, check if the candidate won't be found once its relay parent leaves the -// view. #[test] -fn check_candidate_parent_leaving_view() { +fn introduce_candidate_multiple_times() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -644,32 +682,11 @@ fn check_candidate_parent_leaving_view() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Leaf B - let leaf_b = TestLeaf { - number: 101, - hash: Hash::from_low_u64_be(131), - para_data: vec![ - (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), - (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), - ], - }; - // Leaf C - let leaf_c = TestLeaf { - number: 102, - hash: Hash::from_low_u64_be(132), - para_data: vec![ - (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), - (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), - ], - }; - // Activate leaves. activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; - // Candidate A1 - let (candidate_a1, pvd_a1) = make_candidate( + // Candidate A. + let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -677,86 +694,45 @@ fn check_candidate_parent_leaving_view() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a1 = candidate_a1.hash(); - - // Candidate A2 - let (candidate_a2, pvd_a2) = make_candidate( - leaf_a.hash, - leaf_a.number, - 2.into(), - HeadData(vec![2, 3, 4]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_a2 = candidate_a2.hash(); - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_b.hash, - leaf_b.number, - 1.into(), - HeadData(vec![3, 4, 5]), - HeadData(vec![3]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; - - // Candidate C - let (candidate_c, pvd_c) = make_candidate( - leaf_c.hash, - leaf_c.number, - 2.into(), - HeadData(vec![6, 7, 8]), - HeadData(vec![4]), - test_state.validation_code_hash, - ); - let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(candidate_hash_a, leaf_a.hash)]; // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; - introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; - - // Deactivate leaf A. - deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; - - // Candidates A1 and A2 should be gone. Candidates B and C should remain. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c.clone()).await; - - // Deactivate leaf B. - deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; - // Candidate B should be gone, C should remain. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - // Deactivate leaf C. - deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a, + ) + .await; - // Candidate C should be gone. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, vec![]).await; + // Introduce the same candidate multiple times. It'll return true but it won't be added. + // We'll check below that the candidate count remains 1. + for _ in 0..5 { + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + } virtual_overseer }); - assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Introduce a candidate to multiple forks, see how the membership is returned. #[test] -fn check_candidate_on_multiple_forks() { +fn fragment_chain_length_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -768,31 +744,16 @@ fn check_candidate_on_multiple_forks() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Leaf B - let leaf_b = TestLeaf { - number: 101, - hash: Hash::from_low_u64_be(131), - para_data: vec![ - (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), - (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), - ], - }; - // Leaf C - let leaf_c = TestLeaf { - number: 102, - hash: Hash::from_low_u64_be(132), - para_data: vec![ - (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), - (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), - ], - }; - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, + ) + .await; - // Candidate on leaf A. + // Candidates A, B and C form a chain. let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, @@ -801,56 +762,59 @@ fn check_candidate_on_multiple_forks() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; - - // Candidate on leaf B. let (candidate_b, pvd_b) = make_candidate( - leaf_b.hash, - leaf_b.number, + leaf_a.hash, + leaf_a.number, 1.into(), - HeadData(vec![3, 4, 5]), HeadData(vec![1]), + HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; - - // Candidate on leaf C. let (candidate_c, pvd_c) = make_candidate( - leaf_c.hash, - leaf_c.number, + leaf_a.hash, + leaf_a.number, 1.into(), - HeadData(vec![5, 6, 7]), - HeadData(vec![1]), + HeadData(vec![2]), + HeadData(vec![3]), test_state.validation_code_hash, ); - let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; - // Introduce candidates on all three leaves. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + // Introduce candidates A and B. Since max depth is 1, only these two will be allowed. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; // Check candidate tree membership. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, response_c).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)], + ) + .await; + + // Introducing C will fail. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) + .await; virtual_overseer }); - assert_eq!(view.active_leaves.len(), 3); + assert_eq!(view.active_leaves.len(), 1); assert_eq!(view.candidate_storage.len(), 2); - // Three parents and three candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (3, 3)); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. #[test] -fn check_backable_query_single_candidate() { +fn unconnected_candidate_count_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -862,54 +826,534 @@ fn check_backable_query_single_candidate() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, + ) + .await; - // Candidate A + // Candidates A, B and C are all potential candidates but don't form a chain. let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), - HeadData(vec![1, 2, 3]), HeadData(vec![1]), + HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); - - // Candidate B - let (mut candidate_b, pvd_b) = make_candidate( + let (candidate_b, pvd_b) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), - HeadData(vec![1]), - HeadData(vec![2]), + HeadData(vec![3]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let (candidate_c, pvd_c) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![4]), + HeadData(vec![5]), test_state.validation_code_hash, ); - // Set a field to make this candidate unique. - candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); - let candidate_hash_b = candidate_b.hash(); - - // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - // Should not get any backable candidates. - get_backable_candidates( + // Introduce candidates A and B. Although max depth is 1 (which should allow for two + // candidates), only 1 is allowed, because the last candidate must be a connected candidate. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate_failed( &mut virtual_overseer, - &leaf_a, + candidate_b.clone(), + pvd_b.clone(), + ) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + + // Check candidate tree membership. Should be empty. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + + // Introducing C will also fail. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Send some candidates, check if the candidate won't be found once its relay parent leaves the +// view. +#[test] +fn introduce_candidate_parent_leaving_view() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Candidate A1 + let (candidate_a1, pvd_a1) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a1 = candidate_a1.hash(); + + // Candidate A2 + let (candidate_a2, pvd_a2) = make_candidate( + leaf_a.hash, + leaf_a.number, + 2.into(), + HeadData(vec![2, 3, 4]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_a2 = candidate_a2.hash(); + + // Candidate B + let (candidate_b, pvd_b) = make_candidate( + leaf_b.hash, + leaf_b.number, + 1.into(), + HeadData(vec![3, 4, 5]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(candidate_hash_b, leaf_b.hash)]; + + // Candidate C + let (candidate_c, pvd_c) = make_candidate( + leaf_c.hash, + leaf_c.number, + 2.into(), + HeadData(vec![6, 7, 8]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(candidate_hash_c, leaf_c.hash)]; + + // Introduce candidates. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await; + back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await; + + // Deactivate leaf A. + deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; + + // Candidates A1 and A2 should be gone. Candidates B and C should remain. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_b, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c.clone(), + ) + .await; + + // Deactivate leaf B. + deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await; + + // Candidate B should be gone, C should remain. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c, + ) + .await; + + // Deactivate leaf C. + deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + + // Candidate C should be gone. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 0); + assert_eq!(view.candidate_storage.len(), 0); +} + +// Introduce a candidate to multiple forks, see how the membership is returned. +#[test] +fn introduce_candidate_on_multiple_forks() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: get_parent_hash(leaf_b.hash), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + + // Candidate built on leaf A. + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(candidate_hash_a, leaf_a.hash)]; + + // Introduce candidate. Should be present on leaves B and C. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a.clone(), + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_a.clone(), + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 2); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +#[test] +fn unconnected_candidates_become_connected() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidates A, B, C and D all form a chain, but we'll first introduce A, C and D. + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![], + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let (candidate_b, pvd_b) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let (candidate_c, pvd_c) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![2]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let (candidate_d, pvd_d) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![3]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + + // Introduce candidates A, C and D. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_d.clone(), pvd_d.clone()) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_d, candidate_d.hash()).await; + + // Check candidate tree membership. Only A should be returned. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![(candidate_a.hash(), leaf_a.hash)], + ) + .await; + + // Introduce C and check membership. Full chain should be returned. + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![ + (candidate_a.hash(), leaf_a.hash), + (candidate_b.hash(), leaf_a.hash), + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + ], ) .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. +#[test] +fn check_backable_query_single_candidate() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + + // Candidate B + let (mut candidate_b, pvd_b) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + // Set a field to make this candidate unique. + candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); + let candidate_hash_b = candidate_b.hash(); + + // Introduce candidates. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + + // Should not get any backable candidates. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), vec![candidate_hash_a].into_iter().collect(), - 0, + 1, vec![], ) .await; @@ -917,23 +1361,17 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - Ancestors::new(), + vec![candidate_hash_a].into_iter().collect(), 0, vec![], ) .await; - - // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - - // Should not get any backable candidates. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1019,392 +1457,327 @@ fn check_backable_query_multiple_candidates() { // Set a field to make this candidate unique. candidate.descriptor.para_head = Hash::from_low_u64_le($index); let candidate_hash = candidate.hash(); - introduce_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; - second_candidate(&mut $virtual_overseer, candidate.clone()).await; + introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; (candidate, candidate_hash) }}; } - // Parachain 1 looks like this: - // +---A----+ - // | | - // +----B---+ C - // | | | | - // D E F H - // | | - // G I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_b, candidate_hash_b) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); - - // Should not get any backable candidates for the other para. - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - Ancestors::new(), - 1, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - Ancestors::new(), - 5, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![], - ) - .await; - - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 1, - vec![(candidate_hash_a, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_b, leaf_a.hash), - (candidate_hash_f, leaf_a.hash), - (candidate_hash_g, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 3, - vec![ - (candidate_hash_b, leaf_a.hash), - (candidate_hash_f, leaf_a.hash), - (candidate_hash_g, leaf_a.hash), - ], - ) - .await; + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 5..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - count, - vec![ - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - // required path of 2 and higher - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] - .into_iter() - .collect(), - 1, - vec![(candidate_hash_j, leaf_a.hash)], - ) - .await; + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; + let (candidate_b, candidate_hash_b) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); + let (candidate_c, candidate_hash_c) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 3); + let (_candidate_d, candidate_hash_d) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4); - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } + // Should not get any backable candidates for the other para. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), + 1, + vec![], + ) + .await; - // No more candidates in any chain. - { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] - .into_iter() - .collect(), - count, - vec![], - ) - .await; - - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ] - .into_iter() - .collect(), - count, - vec![], - ) - .await; - } - } + // Test various scenarios with various counts. - // Wrong paths. + // empty ancestors + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b].into_iter().collect(), + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) .await; + for count in 4..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), + count, + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], + ) + .await; + } + } + + // ancestors of size 1 + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), - 3, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_b, leaf_a.hash), - (candidate_hash_d, leaf_a.hash), - ], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), - 4, - vec![ - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], + vec![candidate_hash_a].into_iter().collect(), + 1, + vec![(candidate_hash_b, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + vec![candidate_hash_a].into_iter().collect(), 2, - vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], ) .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), - 2, - vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], - ) - .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 3..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a].into_iter().collect(), + count, + vec![ + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], + ) + .await; + } + } - // Parachain fork. + // ancestor count 2 and higher + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; - // Non-existent candidate. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] - .into_iter() - .collect(), - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 1, + vec![(candidate_hash_c, leaf_a.hash)], ) .await; - // Requested count is zero. - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 0, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 0, - vec![], - ) - .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 3..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + count, + vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; + } + } + + // No more candidates in the chain. + for count in 1..4 { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), - 0, + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c, candidate_hash_d] + .into_iter() + .collect(), + count, vec![], ) .await; + } + + // Wrong paths. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_b].into_iter().collect(), + 1, + vec![(candidate_hash_a, leaf_a.hash)], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_b, candidate_hash_c].into_iter().collect(), + 3, + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + ], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], + ) + .await; - virtual_overseer - }); + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], + ) + .await; - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } + // Requested count is zero. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, + vec![], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + // 4 candidates on para 1. + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Test depth query. +// Test hypothetical membership query. #[test] -fn check_hypothetical_frontier_query() { +fn check_hypothetical_membership_query() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; // Leaf A let leaf_a = TestLeaf { number: 100, - hash: Hash::from_low_u64_be(130), + hash: get_parent_hash(leaf_b.hash), para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (1.into(), PerParaData::new(98, HeadData(vec![1, 2, 3]))), (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 }, + ) + .await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_b, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 }, + ) + .await; + + // Candidates will be valid on both leaves. // Candidate A. let (candidate_a, pvd_a) = make_candidate( @@ -1415,7 +1788,6 @@ fn check_hypothetical_frontier_query() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); // Candidate B. let (candidate_b, pvd_b) = make_candidate( @@ -1426,7 +1798,6 @@ fn check_hypothetical_frontier_query() { HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_b = candidate_b.hash(); // Candidate C. let (candidate_c, pvd_c) = make_candidate( @@ -1437,127 +1808,99 @@ fn check_hypothetical_frontier_query() { HeadData(vec![3]), test_state.validation_code_hash, ); - let candidate_hash_c = candidate_c.hash(); - // Get hypothetical frontier of candidate A before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - false, - vec![0], - ) - .await; - // Should work with `backed_in_path_only: true`, too. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - true, - vec![0], - ) - .await; + // Get hypothetical membership of candidates before adding candidate A. + // Candidate A can be added directly, candidates B and C are potential candidates. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } // Add candidate A. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; - - // Get frontier of candidate A after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - false, - vec![0], - ) - .await; - - // Get hypothetical frontier of candidate B before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_b, - candidate_b.clone(), - pvd_b.clone(), - leaf_a.hash, - false, - vec![1], - ) - .await; - - // Add candidate B. - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; - // Get frontier of candidate B after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_b, - candidate_b, - pvd_b.clone(), - leaf_a.hash, - false, - vec![1], - ) - .await; + // Get membership of candidates after adding A. C is not a potential candidate because we + // may only add one more candidate, which must be a connected candidate. + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] + { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } - // Get hypothetical frontier of candidate C before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_c, - candidate_c.clone(), - pvd_c.clone(), - leaf_a.hash, - false, - vec![2], - ) - .await; - // Should be empty with `backed_in_path_only` because we haven't backed anything. - get_hypothetical_frontier( + get_hypothetical_membership( &mut virtual_overseer, - candidate_hash_c, + candidate_c.hash(), candidate_c.clone(), pvd_c.clone(), - leaf_a.hash, - true, vec![], ) .await; - // Add candidate C. - introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await; + // Candidate D has invalid relay parent. + let (candidate_d, pvd_d) = make_candidate( + Hash::from_low_u64_be(200), + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await; - // Get frontier of candidate C after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_c, - candidate_c.clone(), - pvd_c.clone(), - leaf_a.hash, - false, - vec![2], - ) - .await; - // Should be empty with `backed_in_path_only` because we haven't backed anything. - get_hypothetical_frontier( + // Add candidate B. + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + + // Get membership of candidates after adding B. + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] + { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } + + get_hypothetical_membership( &mut virtual_overseer, - candidate_hash_c, + candidate_c.hash(), candidate_c.clone(), pvd_c.clone(), - leaf_a.hash, - true, vec![], ) .await; + // Add candidate C. It will fail because we have enough candidates for the configured depth. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c).await; + virtual_overseer }); - assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.active_leaves.len(), 2); assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); } #[test] @@ -1618,7 +1961,8 @@ fn check_pvd_query() { .await; // Add candidate A. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; // Get pvd of candidate A after adding it. @@ -1642,7 +1986,7 @@ fn check_pvd_query() { .await; // Add candidate B. - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; // Get pvd of candidate B after adding it. get_pvd( @@ -1665,7 +2009,7 @@ fn check_pvd_query() { .await; // Add candidate C. - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. get_pvd( @@ -1849,8 +2193,7 @@ fn persists_pending_availability_candidate() { ); let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; let candidate_a_pending_av = CandidatePendingAvailability { @@ -1874,8 +2217,7 @@ fn persists_pending_availability_candidate() { }; activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; get_backable_candidates( @@ -1942,8 +2284,7 @@ fn backwards_compatible() { ); let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; get_backable_candidates( diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index ec1a4abb3ece0a3dc8a01e090f9bf3302dde3294..d197832126442cd7bf67236395cfcc5800f00153 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" -fatality = "0.0.6" +fatality = "0.1.1" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 5cfcb96dc2bc710c42400fa49fa888195c79aa48..fa16b38d28bda4e364c5b6d8b74bb85ca727036d 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -877,7 +877,7 @@ async fn get_block_number_under_construction( } /// Requests backable candidates from Prospective Parachains based on -/// the given ancestors in the fragment tree. The ancestors may not be ordered. +/// the given ancestors in the fragment chain. The ancestors may not be ordered. async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 9666206b1e7dcb053c39353c46c1613f062866be..ba9954a10668e154ee44cccc9888a6e3d3c5ed09 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -25,7 +25,7 @@ tempfile = "3.3.0" thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index e1ce6e79cb99038bcdd183c32096432e8e8428be..5ad7409cc6c78d16cf292c1bf1c1542417abeb5d 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -14,10 +14,10 @@ cpu-time = "1.0.0" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" -nix = { version = "0.27.1", features = ["resource", "sched"] } +nix = { version = "0.28.0", features = ["resource", "sched"] } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 04a620573b2eceb311bdce67006e282778442191..ac90fac4d57ad69db5bcf00df1fb9af15cdbd2e2 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -13,10 +13,10 @@ workspace = true cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } cfg-if = "1.0" -nix = { version = "0.27.1", features = ["process", "resource", "sched"] } +nix = { version = "0.28.0", features = ["process", "resource", "sched"] } libc = "0.2.152" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-parachain-primitives = { path = "../../../../parachain" } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 9ecf1c8af501130822fed983f4105c23f01c89a6..1850a204890720e697b9e505d8656597dd946844 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -18,9 +18,9 @@ rayon = "1.5.1" tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } -nix = { version = "0.27.1", features = ["process", "resource", "sched"] } +nix = { version = "0.28.0", features = ["process", "resource", "sched"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } polkadot-node-core-pvf-common = { path = "../common" } polkadot-primitives = { path = "../../../../primitives" } diff --git a/polkadot/node/gum/src/lib.rs b/polkadot/node/gum/src/lib.rs index dad5887af224382ec384b4d354fd74fb2d9c31f5..f78e20cdecfca214b4d936d55cfc8ea5bdd60e15 100644 --- a/polkadot/node/gum/src/lib.rs +++ b/polkadot/node/gum/src/lib.rs @@ -40,7 +40,7 @@ //! //! ### Log levels //! -//! All of the the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available. +//! All of the [`tracing` macros](https://docs.rs/tracing/latest/tracing/index.html#macros) are available. //! In decreasing order of priority they are: //! //! - `error!` diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index bee725c0876f0cd57692d754738219dd731f8d62..f879f9550d014c0571558f5238f13ce9dad5fe4e 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -21,4 +21,4 @@ sp-core = { path = "../../../substrate/primitives/core" } thiserror = { workspace = true } tokio = "1.37" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs index 68fa57e2ca14f859b7bcfcf02884deaa34d7d156..fcee8be9a50f504b666ea796c5ac9a63509da0d8 100644 --- a/polkadot/node/jaeger/src/spans.rs +++ b/polkadot/node/jaeger/src/spans.rs @@ -85,7 +85,9 @@ use parity_scale_codec::Encode; use polkadot_node_primitives::PoV; -use polkadot_primitives::{BlakeTwo256, CandidateHash, Hash, HashT, Id as ParaId, ValidatorIndex}; +use polkadot_primitives::{ + BlakeTwo256, CandidateHash, ChunkIndex, Hash, HashT, Id as ParaId, ValidatorIndex, +}; use sc_network_types::PeerId; use std::{fmt, sync::Arc}; @@ -338,8 +340,8 @@ impl Span { } #[inline(always)] - pub fn with_chunk_index(self, chunk_index: u32) -> Self { - self.with_string_tag("chunk-index", chunk_index) + pub fn with_chunk_index(self, chunk_index: ChunkIndex) -> Self { + self.with_string_tag("chunk-index", &chunk_index.0) } #[inline(always)] diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index fbf0abf829e136a139f3de0db3aef216b2e49679..e3a53cc6df1b435d3e2361d388624252c2df9d3d 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -21,7 +21,7 @@ sc-cli = { path = "../../../substrate/client/cli" } substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } primitives = { package = "polkadot-primitives", path = "../../primitives" } bs58 = { version = "0.5.0", features = ["alloc"] } log = { workspace = true, default-features = true } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index b5636203f166efdbc2d871df778d148c0f9ce3e4..01b208421d793965422b76bd9d6e2a77221838ab 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -12,20 +12,21 @@ workspace = true [dependencies] futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-network-protocol = { path = "../protocol" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } +sc-network = { path = "../../../../substrate/client/network" } sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keystore = { path = "../../../../substrate/primitives/keystore" } thiserror = { workspace = true } rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" -fatality = "0.0.6" +fatality = "0.1.1" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } @@ -36,6 +37,7 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" assert_matches = "1.4.0" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +rstest = "0.18.2" polkadot-subsystem-bench = { path = "../../subsystem-bench" } diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 5e3072be3a8c13d08bfec7fc5840320fcc33c6c5..6083a90e48126bbf5747880835f6753176be4559 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -53,11 +53,7 @@ fn main() -> Result<(), String> { polkadot_subsystem_bench::availability::TestDataAvailability::Write, false, ); - env.runtime().block_on(benchmark_availability_write( - "data_availability_write", - &mut env, - &state, - )) + env.runtime().block_on(benchmark_availability_write(&mut env, &state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); @@ -77,9 +73,9 @@ fn main() -> Result<(), String> { ("Sent to peers", 18479.9000, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("availability-distribution", 0.0123, 0.1), - ("availability-store", 0.1597, 0.1), - ("bitfield-distribution", 0.0223, 0.1), + ("availability-distribution", 0.0127, 0.1), + ("availability-store", 0.1626, 0.1), + ("bitfield-distribution", 0.0224, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-distribution/src/error.rs b/polkadot/node/network/availability-distribution/src/error.rs index c547a1abbc27604862cebef2790223a0b54fac37..72a809dd114080f185c8aee45f87ff8655c236a4 100644 --- a/polkadot/node/network/availability-distribution/src/error.rs +++ b/polkadot/node/network/availability-distribution/src/error.rs @@ -49,7 +49,7 @@ pub enum Error { #[fatal] #[error("Oneshot for receiving response from Chain API got cancelled")] - ChainApiSenderDropped(#[source] oneshot::Canceled), + ChainApiSenderDropped(#[from] oneshot::Canceled), #[fatal] #[error("Retrieving response from Chain API unexpectedly failed with error: {0}")] @@ -82,6 +82,9 @@ pub enum Error { #[error("Given validator index could not be found in current session")] InvalidValidatorIndex, + + #[error("Erasure coding error: {0}")] + ErasureCoding(#[from] polkadot_erasure_coding::Error), } /// General result abbreviation type alias. @@ -104,7 +107,8 @@ pub fn log_error( JfyiError::InvalidValidatorIndex | JfyiError::NoSuchCachedSession { .. } | JfyiError::QueryAvailableDataResponseChannel(_) | - JfyiError::QueryChunkResponseChannel(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx), + JfyiError::QueryChunkResponseChannel(_) | + JfyiError::ErasureCoding(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx), JfyiError::FetchPoV(_) | JfyiError::SendResponse | JfyiError::NoSuchPoV | diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs index c62ce1dd981a9ce8c4fe6e2b7c451a401e609b25..ec2c01f99b0186f80a42ad635426ace626139ffc 100644 --- a/polkadot/node/network/availability-distribution/src/lib.rs +++ b/polkadot/node/network/availability-distribution/src/lib.rs @@ -18,7 +18,9 @@ use futures::{future::Either, FutureExt, StreamExt, TryFutureExt}; use sp_keystore::KeystorePtr; -use polkadot_node_network_protocol::request_response::{v1, IncomingRequestReceiver}; +use polkadot_node_network_protocol::request_response::{ + v1, v2, IncomingRequestReceiver, ReqProtocolNames, +}; use polkadot_node_subsystem::{ jaeger, messages::AvailabilityDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, @@ -41,7 +43,7 @@ mod pov_requester; /// Responding to erasure chunk requests: mod responder; -use responder::{run_chunk_receiver, run_pov_receiver}; +use responder::{run_chunk_receivers, run_pov_receiver}; mod metrics; /// Prometheus `Metrics` for availability distribution. @@ -58,6 +60,8 @@ pub struct AvailabilityDistributionSubsystem { runtime: RuntimeInfo, /// Receivers to receive messages from. recvs: IncomingRequestReceivers, + /// Mapping of the req-response protocols to the full protocol names. + req_protocol_names: ReqProtocolNames, /// Prometheus metrics. metrics: Metrics, } @@ -66,8 +70,10 @@ pub struct AvailabilityDistributionSubsystem { pub struct IncomingRequestReceivers { /// Receiver for incoming PoV requests. pub pov_req_receiver: IncomingRequestReceiver, - /// Receiver for incoming availability chunk requests. - pub chunk_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming v1 availability chunk requests. + pub chunk_req_v1_receiver: IncomingRequestReceiver, + /// Receiver for incoming v2 availability chunk requests. + pub chunk_req_v2_receiver: IncomingRequestReceiver, } #[overseer::subsystem(AvailabilityDistribution, error=SubsystemError, prefix=self::overseer)] @@ -85,18 +91,27 @@ impl AvailabilityDistributionSubsystem { #[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)] impl AvailabilityDistributionSubsystem { /// Create a new instance of the availability distribution. - pub fn new(keystore: KeystorePtr, recvs: IncomingRequestReceivers, metrics: Metrics) -> Self { + pub fn new( + keystore: KeystorePtr, + recvs: IncomingRequestReceivers, + req_protocol_names: ReqProtocolNames, + metrics: Metrics, + ) -> Self { let runtime = RuntimeInfo::new(Some(keystore)); - Self { runtime, recvs, metrics } + Self { runtime, recvs, req_protocol_names, metrics } } /// Start processing work as passed on from the Overseer. async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { - let Self { mut runtime, recvs, metrics } = self; + let Self { mut runtime, recvs, metrics, req_protocol_names } = self; let mut spans: HashMap = HashMap::new(); - let IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver } = recvs; - let mut requester = Requester::new(metrics.clone()).fuse(); + let IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + } = recvs; + let mut requester = Requester::new(req_protocol_names, metrics.clone()).fuse(); let mut warn_freq = gum::Freq::new(); { @@ -109,7 +124,13 @@ impl AvailabilityDistributionSubsystem { ctx.spawn( "chunk-receiver", - run_chunk_receiver(sender, chunk_req_receiver, metrics.clone()).boxed(), + run_chunk_receivers( + sender, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + metrics.clone(), + ) + .boxed(), ) .map_err(FatalError::SpawnTask)?; } diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index f478defcaa96530d695eace0acad11057609d8e9..7bd36709bc5f3265a8e3c6f88f3ad913766a2000 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -22,10 +22,12 @@ use futures::{ FutureExt, SinkExt, }; +use parity_scale_codec::Decode; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::request_response::{ outgoing::{OutgoingRequest, Recipient, RequestError, Requests}, - v1::{ChunkFetchingRequest, ChunkFetchingResponse}, + v1::{self, ChunkResponse}, + v2, }; use polkadot_node_primitives::ErasureChunk; use polkadot_node_subsystem::{ @@ -34,9 +36,10 @@ use polkadot_node_subsystem::{ overseer, }; use polkadot_primitives::{ - AuthorityDiscoveryId, BlakeTwo256, CandidateHash, GroupIndex, Hash, HashT, OccupiedCore, - SessionIndex, + AuthorityDiscoveryId, BlakeTwo256, CandidateHash, ChunkIndex, GroupIndex, Hash, HashT, + OccupiedCore, SessionIndex, }; +use sc_network::ProtocolName; use crate::{ error::{FatalError, Result}, @@ -111,8 +114,8 @@ struct RunningTask { /// This vector gets drained during execution of the task (it will be empty afterwards). group: Vec, - /// The request to send. - request: ChunkFetchingRequest, + /// The request to send. We can store it as either v1 or v2, they have the same payload. + request: v2::ChunkFetchingRequest, /// Root hash, for verifying the chunks validity. erasure_root: Hash, @@ -128,6 +131,16 @@ struct RunningTask { /// Span tracking the fetching of this chunk. span: jaeger::Span, + + /// Expected chunk index. We'll validate that the remote did send us the correct chunk (only + /// important for v2 requests). + chunk_index: ChunkIndex, + + /// Full protocol name for ChunkFetchingV1. + req_v1_protocol_name: ProtocolName, + + /// Full protocol name for ChunkFetchingV2. + req_v2_protocol_name: ProtocolName, } impl FetchTaskConfig { @@ -140,13 +153,17 @@ impl FetchTaskConfig { sender: mpsc::Sender, metrics: Metrics, session_info: &SessionInfo, + chunk_index: ChunkIndex, span: jaeger::Span, + req_v1_protocol_name: ProtocolName, + req_v2_protocol_name: ProtocolName, ) -> Self { let span = span .child("fetch-task-config") .with_trace_id(core.candidate_hash) .with_string_tag("leaf", format!("{:?}", leaf)) .with_validator_index(session_info.our_index) + .with_chunk_index(chunk_index) .with_uint_tag("group-index", core.group_responsible.0 as u64) .with_relay_parent(core.candidate_descriptor.relay_parent) .with_string_tag("pov-hash", format!("{:?}", core.candidate_descriptor.pov_hash)) @@ -165,7 +182,7 @@ impl FetchTaskConfig { group: session_info.validator_groups.get(core.group_responsible.0 as usize) .expect("The responsible group of a candidate should be available in the corresponding session. qed.") .clone(), - request: ChunkFetchingRequest { + request: v2::ChunkFetchingRequest { candidate_hash: core.candidate_hash, index: session_info.our_index, }, @@ -174,6 +191,9 @@ impl FetchTaskConfig { metrics, sender, span, + chunk_index, + req_v1_protocol_name, + req_v2_protocol_name }; FetchTaskConfig { live_in, prepared_running: Some(prepared_running) } } @@ -271,7 +291,8 @@ impl RunningTask { count += 1; let _chunk_fetch_span = span .child("fetch-chunk-request") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); // Send request: let resp = match self @@ -296,11 +317,12 @@ impl RunningTask { drop(_chunk_fetch_span); let _chunk_recombine_span = span .child("recombine-chunk") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); let chunk = match resp { - ChunkFetchingResponse::Chunk(resp) => resp.recombine_into_chunk(&self.request), - ChunkFetchingResponse::NoSuchChunk => { + Some(chunk) => chunk, + None => { gum::debug!( target: LOG_TARGET, validator = ?validator, @@ -320,11 +342,12 @@ impl RunningTask { drop(_chunk_recombine_span); let _chunk_validate_and_store_span = span .child("validate-and-store-chunk") - .with_chunk_index(self.request.index.0) + .with_validator_index(self.request.index) + .with_chunk_index(self.chunk_index) .with_stage(jaeger::Stage::AvailabilityDistribution); // Data genuine? - if !self.validate_chunk(&validator, &chunk) { + if !self.validate_chunk(&validator, &chunk, self.chunk_index) { bad_validators.push(validator); continue } @@ -350,7 +373,7 @@ impl RunningTask { validator: &AuthorityDiscoveryId, network_error_freq: &mut gum::Freq, canceled_freq: &mut gum::Freq, - ) -> std::result::Result { + ) -> std::result::Result, TaskError> { gum::trace!( target: LOG_TARGET, origin = ?validator, @@ -362,9 +385,13 @@ impl RunningTask { "Starting chunk request", ); - let (full_request, response_recv) = - OutgoingRequest::new(Recipient::Authority(validator.clone()), self.request); - let requests = Requests::ChunkFetchingV1(full_request); + let (full_request, response_recv) = OutgoingRequest::new_with_fallback( + Recipient::Authority(validator.clone()), + self.request, + // Fallback to v1, for backwards compatibility. + v1::ChunkFetchingRequest::from(self.request), + ); + let requests = Requests::ChunkFetching(full_request); self.sender .send(FromFetchTask::Message( @@ -378,7 +405,58 @@ impl RunningTask { .map_err(|_| TaskError::ShuttingDown)?; match response_recv.await { - Ok(resp) => Ok(resp), + Ok((bytes, protocol)) => match protocol { + _ if protocol == self.req_v2_protocol_name => + match v2::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(chunk_response) => Ok(Option::::from(chunk_response)), + Err(e) => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + err = ?e, + "Peer sent us invalid erasure chunk data (v2)" + ); + Err(TaskError::PeerError) + }, + }, + _ if protocol == self.req_v1_protocol_name => + match v1::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(chunk_response) => Ok(Option::::from(chunk_response) + .map(|c| c.recombine_into_chunk(&self.request.into()))), + Err(e) => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + err = ?e, + "Peer sent us invalid erasure chunk data" + ); + Err(TaskError::PeerError) + }, + }, + _ => { + gum::warn!( + target: LOG_TARGET, + origin = ?validator, + relay_parent = ?self.relay_parent, + group_index = ?self.group_index, + session_index = ?self.session_index, + chunk_index = ?self.request.index, + candidate_hash = ?self.request.candidate_hash, + "Peer sent us invalid erasure chunk data - unknown protocol" + ); + Err(TaskError::PeerError) + }, + }, Err(RequestError::InvalidResponse(err)) => { gum::warn!( target: LOG_TARGET, @@ -427,7 +505,23 @@ impl RunningTask { } } - fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool { + fn validate_chunk( + &self, + validator: &AuthorityDiscoveryId, + chunk: &ErasureChunk, + expected_chunk_index: ChunkIndex, + ) -> bool { + if chunk.index != expected_chunk_index { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.request.candidate_hash, + origin = ?validator, + chunk_index = ?chunk.index, + expected_chunk_index = ?expected_chunk_index, + "Validator sent the wrong chunk", + ); + return false + } let anticipated_hash = match branch_hash(&self.erasure_root, chunk.proof(), chunk.index.0 as usize) { Ok(hash) => hash, @@ -459,6 +553,7 @@ impl RunningTask { AvailabilityStoreMessage::StoreChunk { candidate_hash: self.request.candidate_hash, chunk, + validator_index: self.request.index, tx, } .into(), diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index a5a81082e39ad8897845363960120956b1599a95..25fae37f725aaa5a89f6c4ac9d8d019d929c362b 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -24,21 +24,26 @@ use futures::{ task::{noop_waker, Context, Poll}, Future, FutureExt, StreamExt, }; +use rstest::rstest; use sc_network::{self as network, ProtocolName}; use sp_keyring::Sr25519Keyring; -use polkadot_node_network_protocol::request_response::{v1, Recipient}; +use polkadot_node_network_protocol::request_response::{ + v1::{self, ChunkResponse}, + Protocol, Recipient, ReqProtocolNames, +}; use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_node_subsystem::messages::AllMessages; -use polkadot_primitives::{CandidateHash, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, ChunkIndex, ValidatorIndex}; use super::*; use crate::{metrics::Metrics, tests::mock::get_valid_chunk_data}; #[test] fn task_can_be_canceled() { - let (task, _rx) = get_test_running_task(); + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let (task, _rx) = get_test_running_task(&req_protocol_names, 0.into(), 0.into()); let (handle, kill) = oneshot::channel(); std::mem::drop(handle); let running_task = task.run(kill); @@ -49,96 +54,130 @@ fn task_can_be_canceled() { } /// Make sure task won't accept a chunk that has is invalid. -#[test] -fn task_does_not_accept_invalid_chunk() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_does_not_accept_invalid_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let chunk_index = ChunkIndex(1); + let validator_index = ValidatorIndex(0); + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let validators = vec![Sr25519Keyring::Alice.public().into()]; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: vec![1, 2, 3], - proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), - }), - ); - m + get_response( + protocol, + protocol_name.clone(), + Some(( + vec![1, 2, 3], + Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), + chunk_index, + )), + ), + )] + .into_iter() + .collect() }, valid_chunks: HashSet::new(), + req_protocol_names, }; test.run(task, rx); } -#[test] -fn task_stores_valid_chunk() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_stores_valid_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(0); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(0) } else { ValidatorIndex(1) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); + let validators = vec![Sr25519Keyring::Alice.public().into()]; let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, chunk_index); task.erasure_root = root_hash; - task.request.index = chunk.index; - - let validators = vec![Sr25519Keyring::Alice.public().into()]; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m - }, - valid_chunks: { - let mut s = HashSet::new(); - s.insert(chunk.chunk); - s + get_response( + protocol, + protocol_name.clone(), + Some((chunk.chunk.clone(), chunk.proof, chunk_index)), + ), + )] + .into_iter() + .collect() }, + valid_chunks: [(chunk.chunk)].into_iter().collect(), + req_protocol_names, }; test.run(task, rx); } -#[test] -fn task_does_not_accept_wrongly_indexed_chunk() { - let (mut task, rx) = get_test_running_task(); - let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); - task.erasure_root = root_hash; - task.request.index = ValidatorIndex(chunk.index.0 + 1); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_does_not_accept_wrongly_indexed_chunk(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(0); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(0) } else { ValidatorIndex(1) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let validators = vec![Sr25519Keyring::Alice.public().into()]; + let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; + let (_, other_chunk) = get_valid_chunk_data(pov.clone(), 10, ChunkIndex(3)); + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, ChunkIndex(0)); + task.erasure_root = root_hash; + task.request.index = chunk.index.into(); task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( + [( Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m + get_response( + protocol, + protocol_name.clone(), + Some((other_chunk.chunk.clone(), chunk.proof, other_chunk.index)), + ), + )] + .into_iter() + .collect() }, valid_chunks: HashSet::new(), + req_protocol_names, }; test.run(task, rx); } /// Task stores chunk, if there is at least one validator having a valid chunk. -#[test] -fn task_stores_valid_chunk_if_there_is_one() { - let (mut task, rx) = get_test_running_task(); +#[rstest] +#[case(Protocol::ChunkFetchingV1)] +#[case(Protocol::ChunkFetchingV2)] +fn task_stores_valid_chunk_if_there_is_one(#[case] protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + // In order for protocol version 1 to work, the chunk index needs to be equal to the validator + // index. + let chunk_index = ChunkIndex(1); + let validator_index = + if protocol == Protocol::ChunkFetchingV1 { ValidatorIndex(1) } else { ValidatorIndex(2) }; + let (mut task, rx) = get_test_running_task(&req_protocol_names, validator_index, chunk_index); let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - let (root_hash, chunk) = get_valid_chunk_data(pov); - task.erasure_root = root_hash; - task.request.index = chunk.index; let validators = [ // Only Alice has valid chunk - should succeed, even though she is tried last. @@ -151,37 +190,45 @@ fn task_stores_valid_chunk_if_there_is_one() { .iter() .map(|v| v.public().into()) .collect::>(); + + let (root_hash, chunk) = get_valid_chunk_data(pov, 10, chunk_index); + task.erasure_root = root_hash; task.group = validators; + let protocol_name = req_protocol_names.get_name(protocol); let test = TestRun { chunk_responses: { - let mut m = HashMap::new(); - m.insert( - Recipient::Authority(Sr25519Keyring::Alice.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: chunk.chunk.clone(), - proof: chunk.proof, - }), - ); - m.insert( - Recipient::Authority(Sr25519Keyring::Bob.public().into()), - ChunkFetchingResponse::NoSuchChunk, - ); - m.insert( - Recipient::Authority(Sr25519Keyring::Charlie.public().into()), - ChunkFetchingResponse::Chunk(v1::ChunkResponse { - chunk: vec![1, 2, 3], - proof: Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), - }), - ); - - m - }, - valid_chunks: { - let mut s = HashSet::new(); - s.insert(chunk.chunk); - s + [ + ( + Recipient::Authority(Sr25519Keyring::Alice.public().into()), + get_response( + protocol, + protocol_name.clone(), + Some((chunk.chunk.clone(), chunk.proof, chunk_index)), + ), + ), + ( + Recipient::Authority(Sr25519Keyring::Bob.public().into()), + get_response(protocol, protocol_name.clone(), None), + ), + ( + Recipient::Authority(Sr25519Keyring::Charlie.public().into()), + get_response( + protocol, + protocol_name.clone(), + Some(( + vec![1, 2, 3], + Proof::try_from(vec![vec![9, 8, 2], vec![2, 3, 4]]).unwrap(), + chunk_index, + )), + ), + ), + ] + .into_iter() + .collect() }, + valid_chunks: [(chunk.chunk)].into_iter().collect(), + req_protocol_names, }; test.run(task, rx); } @@ -189,14 +236,16 @@ fn task_stores_valid_chunk_if_there_is_one() { struct TestRun { /// Response to deliver for a given validator index. /// None means, answer with `NetworkError`. - chunk_responses: HashMap, + chunk_responses: HashMap, ProtocolName)>, /// Set of chunks that should be considered valid: valid_chunks: HashSet>, + /// Request protocol names + req_protocol_names: ReqProtocolNames, } impl TestRun { fn run(self, task: RunningTask, rx: mpsc::Receiver) { - sp_tracing::try_init_simple(); + sp_tracing::init_for_tests(); let mut rx = rx.fuse(); let task = task.run_inner().fuse(); futures::pin_mut!(task); @@ -240,20 +289,41 @@ impl TestRun { let mut valid_responses = 0; for req in reqs { let req = match req { - Requests::ChunkFetchingV1(req) => req, + Requests::ChunkFetching(req) => req, _ => panic!("Unexpected request"), }; let response = self.chunk_responses.get(&req.peer).ok_or(network::RequestFailure::Refused); - if let Ok(ChunkFetchingResponse::Chunk(resp)) = &response { - if self.valid_chunks.contains(&resp.chunk) { - valid_responses += 1; + if let Ok((resp, protocol)) = response { + let chunk = if protocol == + &self.req_protocol_names.get_name(Protocol::ChunkFetchingV1) + { + Into::>::into( + v1::ChunkFetchingResponse::decode(&mut &resp[..]).unwrap(), + ) + .map(|c| c.chunk) + } else if protocol == + &self.req_protocol_names.get_name(Protocol::ChunkFetchingV2) + { + Into::>::into( + v2::ChunkFetchingResponse::decode(&mut &resp[..]).unwrap(), + ) + .map(|c| c.chunk) + } else { + unreachable!() + }; + + if let Some(chunk) = chunk { + if self.valid_chunks.contains(&chunk) { + valid_responses += 1; + } } + + req.pending_response + .send(response.cloned()) + .expect("Sending response should succeed"); } - req.pending_response - .send(response.map(|r| (r.encode(), ProtocolName::from("")))) - .expect("Sending response should succeed"); } return (valid_responses == 0) && self.valid_chunks.is_empty() }, @@ -274,8 +344,12 @@ impl TestRun { } } -/// Get a `RunningTask` filled with dummy values. -fn get_test_running_task() -> (RunningTask, mpsc::Receiver) { +/// Get a `RunningTask` filled with (mostly) dummy values. +fn get_test_running_task( + req_protocol_names: &ReqProtocolNames, + validator_index: ValidatorIndex, + chunk_index: ChunkIndex, +) -> (RunningTask, mpsc::Receiver) { let (tx, rx) = mpsc::channel(0); ( @@ -283,16 +357,45 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver) { session_index: 0, group_index: GroupIndex(0), group: Vec::new(), - request: ChunkFetchingRequest { + request: v2::ChunkFetchingRequest { candidate_hash: CandidateHash([43u8; 32].into()), - index: ValidatorIndex(0), + index: validator_index, }, erasure_root: Hash::repeat_byte(99), relay_parent: Hash::repeat_byte(71), sender: tx, metrics: Metrics::new_dummy(), span: jaeger::Span::Disabled, + req_v1_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV1), + req_v2_protocol_name: req_protocol_names.get_name(Protocol::ChunkFetchingV2), + chunk_index, }, rx, ) } + +/// Make a versioned ChunkFetchingResponse. +fn get_response( + protocol: Protocol, + protocol_name: ProtocolName, + chunk: Option<(Vec, Proof, ChunkIndex)>, +) -> (Vec, ProtocolName) { + ( + match protocol { + Protocol::ChunkFetchingV1 => if let Some((chunk, proof, _)) = chunk { + v1::ChunkFetchingResponse::Chunk(ChunkResponse { chunk, proof }) + } else { + v1::ChunkFetchingResponse::NoSuchChunk + } + .encode(), + Protocol::ChunkFetchingV2 => if let Some((chunk, proof, index)) = chunk { + v2::ChunkFetchingResponse::Chunk(ErasureChunk { chunk, index, proof }) + } else { + v2::ChunkFetchingResponse::NoSuchChunk + } + .encode(), + _ => unreachable!(), + }, + protocol_name, + ) +} diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index 97e80d696e7ef2adabdbc24dda76172603e462f0..efbdceb43bddc6c9501fb5ed6dc3c2e42100df7b 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -18,10 +18,7 @@ //! availability. use std::{ - collections::{ - hash_map::{Entry, HashMap}, - hash_set::HashSet, - }, + collections::{hash_map::HashMap, hash_set::HashSet}, iter::IntoIterator, pin::Pin, }; @@ -32,13 +29,17 @@ use futures::{ Stream, }; +use polkadot_node_network_protocol::request_response::{v1, v2, IsRequest, ReqProtocolNames}; use polkadot_node_subsystem::{ jaeger, messages::{ChainApiMessage, RuntimeApiMessage}, overseer, ActivatedLeaf, ActiveLeavesUpdate, }; -use polkadot_node_subsystem_util::runtime::{get_occupied_cores, RuntimeInfo}; -use polkadot_primitives::{CandidateHash, Hash, OccupiedCore, SessionIndex}; +use polkadot_node_subsystem_util::{ + availability_chunks::availability_chunk_index, + runtime::{get_occupied_cores, RuntimeInfo}, +}; +use polkadot_primitives::{CandidateHash, CoreIndex, Hash, OccupiedCore, SessionIndex}; use super::{FatalError, Metrics, Result, LOG_TARGET}; @@ -77,6 +78,9 @@ pub struct Requester { /// Prometheus Metrics metrics: Metrics, + + /// Mapping of the req-response protocols to the full protocol names. + req_protocol_names: ReqProtocolNames, } #[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)] @@ -88,9 +92,16 @@ impl Requester { /// /// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress /// by advancing the stream. - pub fn new(metrics: Metrics) -> Self { + pub fn new(req_protocol_names: ReqProtocolNames, metrics: Metrics) -> Self { let (tx, rx) = mpsc::channel(1); - Requester { fetches: HashMap::new(), session_cache: SessionCache::new(), tx, rx, metrics } + Requester { + fetches: HashMap::new(), + session_cache: SessionCache::new(), + tx, + rx, + metrics, + req_protocol_names, + } } /// Update heads that need availability distribution. @@ -197,56 +208,76 @@ impl Requester { runtime: &mut RuntimeInfo, leaf: Hash, leaf_session_index: SessionIndex, - cores: impl IntoIterator, + cores: impl IntoIterator, span: jaeger::Span, ) -> Result<()> { - for core in cores { + for (core_index, core) in cores { let mut span = span .child("check-fetch-candidate") .with_trace_id(core.candidate_hash) .with_string_tag("leaf", format!("{:?}", leaf)) .with_candidate(core.candidate_hash) .with_stage(jaeger::Stage::AvailabilityDistribution); - match self.fetches.entry(core.candidate_hash) { - Entry::Occupied(mut e) => + + if let Some(e) = self.fetches.get_mut(&core.candidate_hash) { // Just book keeping - we are already requesting that chunk: - { - span.add_string_tag("already-requested-chunk", "true"); - e.get_mut().add_leaf(leaf); - }, - Entry::Vacant(e) => { - span.add_string_tag("already-requested-chunk", "false"); - let tx = self.tx.clone(); - let metrics = self.metrics.clone(); - - let task_cfg = self - .session_cache - .with_session_info( - context, - runtime, - // We use leaf here, the relay_parent must be in the same session as - // the leaf. This is guaranteed by runtime which ensures that cores are - // cleared at session boundaries. At the same time, only leaves are - // guaranteed to be fetchable by the state trie. - leaf, - leaf_session_index, - |info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span), - ) - .await - .map_err(|err| { - gum::warn!( - target: LOG_TARGET, - error = ?err, - "Failed to spawn a fetch task" - ); - err + span.add_string_tag("already-requested-chunk", "true"); + e.add_leaf(leaf); + } else { + span.add_string_tag("already-requested-chunk", "false"); + let tx = self.tx.clone(); + let metrics = self.metrics.clone(); + + let session_info = self + .session_cache + .get_session_info( + context, + runtime, + // We use leaf here, the relay_parent must be in the same session as + // the leaf. This is guaranteed by runtime which ensures that cores are + // cleared at session boundaries. At the same time, only leaves are + // guaranteed to be fetchable by the state trie. + leaf, + leaf_session_index, + ) + .await + .map_err(|err| { + gum::warn!( + target: LOG_TARGET, + error = ?err, + "Failed to spawn a fetch task" + ); + err + })?; + + if let Some(session_info) = session_info { + let n_validators = + session_info.validator_groups.iter().fold(0usize, |mut acc, group| { + acc = acc.saturating_add(group.len()); + acc }); - - if let Ok(Some(task_cfg)) = task_cfg { - e.insert(FetchTask::start(task_cfg, context).await?); - } - // Not a validator, nothing to do. - }, + let chunk_index = availability_chunk_index( + session_info.node_features.as_ref(), + n_validators, + core_index, + session_info.our_index, + )?; + + let task_cfg = FetchTaskConfig::new( + leaf, + &core, + tx, + metrics, + session_info, + chunk_index, + span, + self.req_protocol_names.get_name(v1::ChunkFetchingRequest::PROTOCOL), + self.req_protocol_names.get_name(v2::ChunkFetchingRequest::PROTOCOL), + ); + + self.fetches + .insert(core.candidate_hash, FetchTask::start(task_cfg, context).await?); + } } } Ok(()) diff --git a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs index 8a48e19c2827d13fe2d15ea4cc5ded50f058ca03..a762c262dba3ec1df0c7609017b704d867f11141 100644 --- a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs +++ b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs @@ -20,8 +20,10 @@ use rand::{seq::SliceRandom, thread_rng}; use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::overseer; -use polkadot_node_subsystem_util::runtime::RuntimeInfo; -use polkadot_primitives::{AuthorityDiscoveryId, GroupIndex, Hash, SessionIndex, ValidatorIndex}; +use polkadot_node_subsystem_util::runtime::{request_node_features, RuntimeInfo}; +use polkadot_primitives::{ + AuthorityDiscoveryId, GroupIndex, Hash, NodeFeatures, SessionIndex, ValidatorIndex, +}; use crate::{ error::{Error, Result}, @@ -62,6 +64,9 @@ pub struct SessionInfo { /// /// `None`, if we are not in fact part of any group. pub our_group: Option, + + /// Node features. + pub node_features: Option, } /// Report of bad validators. @@ -87,39 +92,29 @@ impl SessionCache { } } - /// Tries to retrieve `SessionInfo` and calls `with_info` if successful. - /// + /// Tries to retrieve `SessionInfo`. /// If this node is not a validator, the function will return `None`. - /// - /// Use this function over any `fetch_session_info` if all you need is a reference to - /// `SessionInfo`, as it avoids an expensive clone. - pub async fn with_session_info( - &mut self, + pub async fn get_session_info<'a, Context>( + &'a mut self, ctx: &mut Context, runtime: &mut RuntimeInfo, parent: Hash, session_index: SessionIndex, - with_info: F, - ) -> Result> - where - F: FnOnce(&SessionInfo) -> R, - { - if let Some(o_info) = self.session_info_cache.get(&session_index) { - gum::trace!(target: LOG_TARGET, session_index, "Got session from lru"); - return Ok(Some(with_info(o_info))) + ) -> Result> { + gum::trace!(target: LOG_TARGET, session_index, "Calling `get_session_info`"); + + if self.session_info_cache.get(&session_index).is_none() { + if let Some(info) = + Self::query_info_from_runtime(ctx, runtime, parent, session_index).await? + { + gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!"); + self.session_info_cache.insert(session_index, info); + } else { + return Ok(None) + } } - if let Some(info) = - self.query_info_from_runtime(ctx, runtime, parent, session_index).await? - { - gum::trace!(target: LOG_TARGET, session_index, "Calling `with_info`"); - let r = with_info(&info); - gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!"); - self.session_info_cache.insert(session_index, info); - Ok(Some(r)) - } else { - Ok(None) - } + Ok(self.session_info_cache.get(&session_index).map(|i| &*i)) } /// Variant of `report_bad` that never fails, but just logs errors. @@ -171,7 +166,6 @@ impl SessionCache { /// /// Returns: `None` if not a validator. async fn query_info_from_runtime( - &self, ctx: &mut Context, runtime: &mut RuntimeInfo, relay_parent: Hash, @@ -181,6 +175,9 @@ impl SessionCache { .get_session_info_by_index(ctx.sender(), relay_parent, session_index) .await?; + let node_features = + request_node_features(relay_parent, session_index, ctx.sender()).await?; + let discovery_keys = info.session_info.discovery_keys.clone(); let mut validator_groups = info.session_info.validator_groups.clone(); @@ -208,7 +205,13 @@ impl SessionCache { }) .collect(); - let info = SessionInfo { validator_groups, our_index, session_index, our_group }; + let info = SessionInfo { + validator_groups, + our_index, + session_index, + our_group, + node_features, + }; return Ok(Some(info)) } return Ok(None) diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 0dedd4f091acd692c5b319f5669c1356bf335e2b..09567a8f87d322f4befca53c14712b72b9b0e0fc 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -14,21 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::HashMap; - -use std::future::Future; - use futures::FutureExt; +use std::{collections::HashMap, future::Future}; -use polkadot_node_network_protocol::jaeger; +use polkadot_node_network_protocol::{jaeger, request_response::ReqProtocolNames}; use polkadot_node_primitives::{BlockData, ErasureChunk, PoV}; -use polkadot_node_subsystem_test_helpers::mock::new_leaf; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - BlockNumber, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, + BlockNumber, ChunkIndex, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionIndex, SessionInfo, }; -use sp_core::traits::SpawnNamed; +use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; use polkadot_node_subsystem::{ messages::{ @@ -38,19 +34,21 @@ use polkadot_node_subsystem::{ ActiveLeavesUpdate, SpawnGlue, }; use polkadot_node_subsystem_test_helpers::{ - make_subsystem_context, mock::make_ferdie_keystore, TestSubsystemContext, - TestSubsystemContextHandle, + make_subsystem_context, + mock::{make_ferdie_keystore, new_leaf}, + TestSubsystemContext, TestSubsystemContextHandle, }; -use sp_core::testing::TaskExecutor; - -use crate::tests::mock::{get_valid_chunk_data, make_session_info, OccupiedCoreBuilder}; +use crate::tests::{ + mock::{get_valid_chunk_data, make_session_info, OccupiedCoreBuilder}, + node_features_with_mapping_enabled, +}; use super::Requester; fn get_erasure_chunk() -> ErasureChunk { let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; - get_valid_chunk_data(pov).1 + get_valid_chunk_data(pov, 10, ChunkIndex(0)).1 } #[derive(Clone)] @@ -126,7 +124,7 @@ fn spawn_virtual_overseer( .expect("Receiver should be alive."); }, RuntimeApiRequest::NodeFeatures(_, tx) => { - tx.send(Ok(NodeFeatures::EMPTY)) + tx.send(Ok(node_features_with_mapping_enabled())) .expect("Receiver should be alive."); }, RuntimeApiRequest::AvailabilityCores(tx) => { @@ -146,6 +144,8 @@ fn spawn_virtual_overseer( group_responsible: GroupIndex(1), para_id, relay_parent: hash, + n_validators: 10, + chunk_index: ChunkIndex(0), } .build() .0, @@ -201,7 +201,8 @@ fn test_harness>( #[test] fn check_ancestry_lookup_in_same_session() { let test_state = TestState::new(); - let mut requester = Requester::new(Default::default()); + let mut requester = + Requester::new(ReqProtocolNames::new(&Hash::repeat_byte(0xff), None), Default::default()); let keystore = make_ferdie_keystore(); let mut runtime = RuntimeInfo::new(Some(keystore)); @@ -268,7 +269,8 @@ fn check_ancestry_lookup_in_same_session() { #[test] fn check_ancestry_lookup_in_different_sessions() { let mut test_state = TestState::new(); - let mut requester = Requester::new(Default::default()); + let mut requester = + Requester::new(ReqProtocolNames::new(&Hash::repeat_byte(0xff), None), Default::default()); let keystore = make_ferdie_keystore(); let mut runtime = RuntimeInfo::new(Some(keystore)); diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index 54b188f7f01fc7e22b4ea7679be0a67f8d9d0d37..2c1885d277275c56772147d4ddd4cb29ccdb4cdc 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -18,11 +18,12 @@ use std::sync::Arc; -use futures::channel::oneshot; +use futures::{channel::oneshot, select, FutureExt}; use fatality::Nested; +use parity_scale_codec::{Decode, Encode}; use polkadot_node_network_protocol::{ - request_response::{v1, IncomingRequest, IncomingRequestReceiver}, + request_response::{v1, v2, IncomingRequest, IncomingRequestReceiver, IsRequest}, UnifiedReputationChange as Rep, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; @@ -66,33 +67,66 @@ pub async fn run_pov_receiver( } /// Receiver task to be forked as a separate task to handle chunk requests. -pub async fn run_chunk_receiver( +pub async fn run_chunk_receivers( mut sender: Sender, - mut receiver: IncomingRequestReceiver, + mut receiver_v1: IncomingRequestReceiver, + mut receiver_v2: IncomingRequestReceiver, metrics: Metrics, ) where Sender: SubsystemSender, { + let make_resp_v1 = |chunk: Option| match chunk { + None => v1::ChunkFetchingResponse::NoSuchChunk, + Some(chunk) => v1::ChunkFetchingResponse::Chunk(chunk.into()), + }; + + let make_resp_v2 = |chunk: Option| match chunk { + None => v2::ChunkFetchingResponse::NoSuchChunk, + Some(chunk) => v2::ChunkFetchingResponse::Chunk(chunk.into()), + }; + loop { - match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() { - Ok(Ok(msg)) => { - answer_chunk_request_log(&mut sender, msg, &metrics).await; - }, - Err(fatal) => { - gum::debug!( - target: LOG_TARGET, - error = ?fatal, - "Shutting down chunk receiver." - ); - return - }, - Ok(Err(jfyi)) => { - gum::debug!( - target: LOG_TARGET, - error = ?jfyi, - "Error decoding incoming chunk request." - ); + select! { + res = receiver_v1.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { + Ok(Ok(msg)) => { + answer_chunk_request_log(&mut sender, msg, make_resp_v1, &metrics).await; + }, + Err(fatal) => { + gum::debug!( + target: LOG_TARGET, + error = ?fatal, + "Shutting down chunk receiver." + ); + return + }, + Ok(Err(jfyi)) => { + gum::debug!( + target: LOG_TARGET, + error = ?jfyi, + "Error decoding incoming chunk request." + ); + } }, + res = receiver_v2.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { + Ok(Ok(msg)) => { + answer_chunk_request_log(&mut sender, msg.into(), make_resp_v2, &metrics).await; + }, + Err(fatal) => { + gum::debug!( + target: LOG_TARGET, + error = ?fatal, + "Shutting down chunk receiver." + ); + return + }, + Ok(Err(jfyi)) => { + gum::debug!( + target: LOG_TARGET, + error = ?jfyi, + "Error decoding incoming chunk request." + ); + } + } } } } @@ -124,15 +158,18 @@ pub async fn answer_pov_request_log( /// Variant of `answer_chunk_request` that does Prometheus metric and logging on errors. /// /// Any errors of `answer_request` will simply be logged. -pub async fn answer_chunk_request_log( +pub async fn answer_chunk_request_log( sender: &mut Sender, - req: IncomingRequest, + req: IncomingRequest, + make_response: MakeResp, metrics: &Metrics, -) -> () -where +) where + Req: IsRequest + Decode + Encode + Into, + Req::Response: Encode, Sender: SubsystemSender, + MakeResp: Fn(Option) -> Req::Response, { - let res = answer_chunk_request(sender, req).await; + let res = answer_chunk_request(sender, req, make_response).await; match res { Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }), Err(err) => { @@ -177,39 +214,46 @@ where /// Answer an incoming chunk request by querying the av store. /// /// Returns: `Ok(true)` if chunk was found and served. -pub async fn answer_chunk_request( +pub async fn answer_chunk_request( sender: &mut Sender, - req: IncomingRequest, + req: IncomingRequest, + make_response: MakeResp, ) -> Result where Sender: SubsystemSender, + Req: IsRequest + Decode + Encode + Into, + Req::Response: Encode, + MakeResp: Fn(Option) -> Req::Response, { - let span = jaeger::Span::new(req.payload.candidate_hash, "answer-chunk-request"); + // V1 and V2 requests have the same payload, so decoding into either one will work. It's the + // responses that differ, hence the `MakeResp` generic. + let payload: v1::ChunkFetchingRequest = req.payload.into(); + let span = jaeger::Span::new(payload.candidate_hash, "answer-chunk-request"); let _child_span = span .child("answer-chunk-request") - .with_trace_id(req.payload.candidate_hash) - .with_chunk_index(req.payload.index.0); + .with_trace_id(payload.candidate_hash) + .with_validator_index(payload.index); - let chunk = query_chunk(sender, req.payload.candidate_hash, req.payload.index).await?; + let chunk = query_chunk(sender, payload.candidate_hash, payload.index).await?; let result = chunk.is_some(); gum::trace!( target: LOG_TARGET, - hash = ?req.payload.candidate_hash, - index = ?req.payload.index, + hash = ?payload.candidate_hash, + index = ?payload.index, peer = ?req.peer, has_data = ?chunk.is_some(), "Serving chunk", ); - let response = match chunk { - None => v1::ChunkFetchingResponse::NoSuchChunk, - Some(chunk) => v1::ChunkFetchingResponse::Chunk(chunk.into()), - }; + let response = make_response(chunk); + + req.pending_response + .send_response(response) + .map_err(|_| JfyiError::SendResponse)?; - req.send_response(response).map_err(|_| JfyiError::SendResponse)?; Ok(result) } diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs index 3df662fe546c07f7a7f58d02d0ceafd98a7e1b6c..b41c493a10721bbdd988125d1c355ad9e4cc4824 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mock.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs @@ -23,9 +23,9 @@ use sp_keyring::Sr25519Keyring; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof}; use polkadot_primitives::{ - CandidateCommitments, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, PersistedValidationData, - SessionInfo, ValidatorIndex, + CandidateCommitments, CandidateDescriptor, CandidateHash, ChunkIndex, + CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, + PersistedValidationData, SessionInfo, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, dummy_validation_code, @@ -75,13 +75,16 @@ pub struct OccupiedCoreBuilder { pub group_responsible: GroupIndex, pub para_id: ParaId, pub relay_parent: Hash, + pub n_validators: usize, + pub chunk_index: ChunkIndex, } impl OccupiedCoreBuilder { pub fn build(self) -> (OccupiedCore, (CandidateHash, ErasureChunk)) { let pov = PoV { block_data: BlockData(vec![45, 46, 47]) }; let pov_hash = pov.hash(); - let (erasure_root, chunk) = get_valid_chunk_data(pov.clone()); + let (erasure_root, chunk) = + get_valid_chunk_data(pov.clone(), self.n_validators, self.chunk_index); let candidate_receipt = TestCandidateBuilder { para_id: self.para_id, pov_hash, @@ -133,8 +136,11 @@ impl TestCandidateBuilder { } // Get chunk for index 0 -pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) { - let fake_validator_count = 10; +pub fn get_valid_chunk_data( + pov: PoV, + n_validators: usize, + chunk_index: ChunkIndex, +) -> (Hash, ErasureChunk) { let persisted = PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), relay_parent_number: Default::default(), @@ -142,17 +148,17 @@ pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) { relay_parent_storage_root: Default::default(), }; let available_data = AvailableData { validation_data: persisted, pov: Arc::new(pov) }; - let chunks = obtain_chunks(fake_validator_count, &available_data).unwrap(); + let chunks = obtain_chunks(n_validators, &available_data).unwrap(); let branches = branches(chunks.as_ref()); let root = branches.root(); let chunk = branches .enumerate() .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), - index: ValidatorIndex(index as _), + index: ChunkIndex(index as _), proof: Proof::try_from(proof).unwrap(), }) - .next() - .expect("There really should be 10 chunks."); + .nth(chunk_index.0 as usize) + .expect("There really should be enough chunks."); (root, chunk) } diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index 214498979fb68307ce45705e396a7e306d53da87..b30e11a293c8d54c6b275c8759d3502ffa0323f0 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -17,9 +17,12 @@ use std::collections::HashSet; use futures::{executor, future, Future}; +use rstest::rstest; -use polkadot_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames}; -use polkadot_primitives::{Block, CoreState, Hash}; +use polkadot_node_network_protocol::request_response::{ + IncomingRequest, Protocol, ReqProtocolNames, +}; +use polkadot_primitives::{node_features, Block, CoreState, Hash, NodeFeatures}; use sp_keystore::KeystorePtr; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -35,67 +38,129 @@ pub(crate) mod mock; fn test_harness>( keystore: KeystorePtr, + req_protocol_names: ReqProtocolNames, test_fx: impl FnOnce(TestHarness) -> T, -) { - sp_tracing::try_init_simple(); +) -> std::result::Result<(), FatalError> { + sp_tracing::init_for_tests(); let pool = sp_core::testing::TaskExecutor::new(); let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); - let genesis_hash = Hash::repeat_byte(0xff); - let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None); let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::< + let (chunk_req_v1_receiver, chunk_req_v1_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&req_protocol_names); + let (chunk_req_v2_receiver, chunk_req_v2_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, >(&req_protocol_names); let subsystem = AvailabilityDistributionSubsystem::new( keystore, - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { pov_req_receiver, chunk_req_v1_receiver, chunk_req_v2_receiver }, + req_protocol_names, Default::default(), ); let subsystem = subsystem.run(context); - let test_fut = test_fx(TestHarness { virtual_overseer, pov_req_cfg, chunk_req_cfg, pool }); + let test_fut = test_fx(TestHarness { + virtual_overseer, + pov_req_cfg, + chunk_req_v1_cfg, + chunk_req_v2_cfg, + pool, + }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); - executor::block_on(future::join(test_fut, subsystem)).1.unwrap(); + executor::block_on(future::join(test_fut, subsystem)).1 +} + +pub fn node_features_with_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features } /// Simple basic check, whether the subsystem works as expected. /// /// Exceptional cases are tested as unit tests in `fetch_task`. -#[test] -fn check_basic() { - let state = TestState::default(); - test_harness(state.keystore.clone(), move |harness| state.run(harness)); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_basic(#[case] node_features: NodeFeatures, #[case] chunk_resp_protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } /// Check whether requester tries all validators in group. -#[test] -fn check_fetch_tries_all() { - let mut state = TestState::default(); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_fetch_tries_all( + #[case] node_features: NodeFeatures, + #[case] chunk_resp_protocol: Protocol, +) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let mut state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); for (_, v) in state.chunks.iter_mut() { // 4 validators in group, so this should still succeed: v.push(None); v.push(None); v.push(None); } - test_harness(state.keystore.clone(), move |harness| state.run(harness)); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } /// Check whether requester tries all validators in group /// /// Check that requester will retry the fetch on error on the next block still pending /// availability. -#[test] -fn check_fetch_retry() { - let mut state = TestState::default(); +#[rstest] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV1)] +#[case(NodeFeatures::EMPTY, Protocol::ChunkFetchingV2)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV1)] +#[case(node_features_with_mapping_enabled(), Protocol::ChunkFetchingV2)] +fn check_fetch_retry(#[case] node_features: NodeFeatures, #[case] chunk_resp_protocol: Protocol) { + let req_protocol_names = ReqProtocolNames::new(&Hash::repeat_byte(0xff), None); + let mut state = + TestState::new(node_features.clone(), req_protocol_names.clone(), chunk_resp_protocol); state .cores .insert(state.relay_chain[2], state.cores.get(&state.relay_chain[1]).unwrap().clone()); @@ -126,5 +191,17 @@ fn check_fetch_retry() { v.push(None); v.push(None); } - test_harness(state.keystore.clone(), move |harness| state.run(harness)); + + if node_features == node_features_with_mapping_enabled() && + chunk_resp_protocol == Protocol::ChunkFetchingV1 + { + // For this specific case, chunk fetching is not possible, because the ValidatorIndex is not + // equal to the ChunkIndex and the peer does not send back the actual ChunkIndex. + let _ = test_harness(state.keystore.clone(), req_protocol_names, move |harness| { + state.run_assert_timeout(harness) + }); + } else { + test_harness(state.keystore.clone(), req_protocol_names, move |harness| state.run(harness)) + .unwrap(); + } } diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index 93411511e763af437d7813c9e5d83bac4609959e..ecc3eefbf3da30e0bfd5275be30318ddaa2192ba 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -19,9 +19,9 @@ use std::{ time::Duration, }; -use network::ProtocolName; +use network::{request_responses::OutgoingResponse, ProtocolName, RequestFailure}; use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; -use polkadot_node_subsystem_util::TimeoutExt; +use polkadot_node_subsystem_util::{availability_chunks::availability_chunk_index, TimeoutExt}; use futures::{ channel::{mpsc, oneshot}, @@ -35,7 +35,7 @@ use sp_core::{testing::TaskExecutor, traits::SpawnNamed}; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::request_response::{ - v1, IncomingRequest, OutgoingRequest, Requests, + v1, v2, IncomingRequest, OutgoingRequest, Protocol, ReqProtocolNames, Requests, }; use polkadot_node_primitives::ErasureChunk; use polkadot_node_subsystem::{ @@ -47,8 +47,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateHash, CoreState, ExecutorParams, GroupIndex, Hash, Id as ParaId, NodeFeatures, - ScheduledCore, SessionInfo, ValidatorIndex, + CandidateHash, ChunkIndex, CoreIndex, CoreState, ExecutorParams, GroupIndex, Hash, + Id as ParaId, NodeFeatures, ScheduledCore, SessionInfo, ValidatorIndex, }; use test_helpers::mock::{make_ferdie_keystore, new_leaf}; @@ -59,7 +59,8 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>, pub keystore: KeystorePtr, + pub node_features: NodeFeatures, + pub chunk_response_protocol: Protocol, + pub req_protocol_names: ReqProtocolNames, + pub our_chunk_index: ChunkIndex, } -impl Default for TestState { - fn default() -> Self { +impl TestState { + /// Initialize a default test state. + pub fn new( + node_features: NodeFeatures, + req_protocol_names: ReqProtocolNames, + chunk_response_protocol: Protocol, + ) -> Self { let relay_chain: Vec<_> = (1u8..10).map(Hash::repeat_byte).collect(); let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); @@ -97,6 +107,14 @@ impl Default for TestState { let session_info = make_session_info(); + let our_chunk_index = availability_chunk_index( + Some(&node_features), + session_info.validators.len(), + CoreIndex(1), + ValidatorIndex(0), + ) + .unwrap(); + let (cores, chunks) = { let mut cores = HashMap::new(); let mut chunks = HashMap::new(); @@ -123,6 +141,8 @@ impl Default for TestState { group_responsible: GroupIndex(i as _), para_id: *para_id, relay_parent: *relay_parent, + n_validators: session_info.validators.len(), + chunk_index: our_chunk_index, } .build(); (CoreState::Occupied(core), chunk) @@ -132,8 +152,8 @@ impl Default for TestState { // Skip chunks for our own group (won't get fetched): let mut chunks_other_groups = p_chunks.into_iter(); chunks_other_groups.next(); - for (validator_index, chunk) in chunks_other_groups { - chunks.insert((validator_index, chunk.index), vec![Some(chunk)]); + for (candidate, chunk) in chunks_other_groups { + chunks.insert((candidate, ValidatorIndex(0)), vec![Some(chunk)]); } } (cores, chunks) @@ -145,18 +165,27 @@ impl Default for TestState { session_info, cores, keystore, + node_features, + chunk_response_protocol, + req_protocol_names, + our_chunk_index, } } -} -impl TestState { /// Run, but fail after some timeout. pub async fn run(self, harness: TestHarness) { // Make sure test won't run forever. - let f = self.run_inner(harness).timeout(Duration::from_secs(10)); + let f = self.run_inner(harness).timeout(Duration::from_secs(5)); assert!(f.await.is_some(), "Test ran into timeout"); } + /// Run, and assert an expected timeout. + pub async fn run_assert_timeout(self, harness: TestHarness) { + // Make sure test won't run forever. + let f = self.run_inner(harness).timeout(Duration::from_secs(5)); + assert!(f.await.is_none(), "Test should have run into timeout"); + } + /// Run tests with the given mock values in `TestState`. /// /// This will simply advance through the simulated chain and examines whether the subsystem @@ -214,15 +243,41 @@ impl TestState { )) => { for req in reqs { // Forward requests: - let in_req = to_incoming_req(&harness.pool, req); - harness - .chunk_req_cfg - .inbound_queue - .as_mut() - .unwrap() - .send(in_req.into_raw()) - .await - .unwrap(); + match self.chunk_response_protocol { + Protocol::ChunkFetchingV1 => { + let in_req = to_incoming_req_v1( + &harness.pool, + req, + self.req_protocol_names.get_name(Protocol::ChunkFetchingV1), + ); + + harness + .chunk_req_v1_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(in_req.into_raw()) + .await + .unwrap(); + }, + Protocol::ChunkFetchingV2 => { + let in_req = to_incoming_req_v2( + &harness.pool, + req, + self.req_protocol_names.get_name(Protocol::ChunkFetchingV2), + ); + + harness + .chunk_req_v2_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(in_req.into_raw()) + .await + .unwrap(); + }, + _ => panic!("Unexpected protocol"), + } } }, AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryChunk( @@ -240,13 +295,16 @@ impl TestState { AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, + validator_index, tx, .. }) => { assert!( - self.valid_chunks.contains(&(candidate_hash, chunk.index)), + self.valid_chunks.contains(&(candidate_hash, validator_index)), "Only valid chunks should ever get stored." ); + assert_eq!(self.our_chunk_index, chunk.index); + tx.send(Ok(())).expect("Receiver is expected to be alive"); gum::trace!(target: LOG_TARGET, "'Stored' fetched chunk."); remaining_stores -= 1; @@ -265,14 +323,15 @@ impl TestState { tx.send(Ok(Some(ExecutorParams::default()))) .expect("Receiver should be alive."); }, - RuntimeApiRequest::NodeFeatures(_, si_tx) => { - si_tx.send(Ok(NodeFeatures::EMPTY)).expect("Receiver should be alive."); - }, RuntimeApiRequest::AvailabilityCores(tx) => { gum::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash"); tx.send(Ok(self.cores[&hash].clone())) .expect("Receiver should still be alive"); }, + RuntimeApiRequest::NodeFeatures(_, tx) => { + tx.send(Ok(self.node_features.clone())) + .expect("Receiver should still be alive"); + }, _ => { panic!("Unexpected runtime request: {:?}", req); }, @@ -286,7 +345,10 @@ impl TestState { .unwrap_or_default(); response_channel.send(Ok(ancestors)).expect("Receiver is expected to be alive"); }, - _ => {}, + + _ => { + panic!("Received unexpected message") + }, } } @@ -310,30 +372,47 @@ async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver) -> AllMess rx.next().await.expect("Test subsystem no longer live") } -fn to_incoming_req( +fn to_incoming_req_v1( executor: &TaskExecutor, outgoing: Requests, + protocol_name: ProtocolName, ) -> IncomingRequest { match outgoing { - Requests::ChunkFetchingV1(OutgoingRequest { payload, pending_response, .. }) => { - let (tx, rx): (oneshot::Sender, oneshot::Receiver<_>) = - oneshot::channel(); - executor.spawn( - "message-forwarding", - None, - async { - let response = rx.await; - let payload = response.expect("Unexpected canceled request").result; - pending_response - .send( - payload - .map_err(|_| network::RequestFailure::Refused) - .map(|r| (r, ProtocolName::from(""))), - ) - .expect("Sending response is expected to work"); - } - .boxed(), - ); + Requests::ChunkFetching(OutgoingRequest { + pending_response, + fallback_request: Some((fallback_request, fallback_protocol)), + .. + }) => { + assert_eq!(fallback_protocol, Protocol::ChunkFetchingV1); + + let tx = spawn_message_forwarding(executor, protocol_name, pending_response); + + IncomingRequest::new( + // We don't really care: + network::PeerId::random().into(), + fallback_request, + tx, + ) + }, + _ => panic!("Unexpected request!"), + } +} + +fn to_incoming_req_v2( + executor: &TaskExecutor, + outgoing: Requests, + protocol_name: ProtocolName, +) -> IncomingRequest { + match outgoing { + Requests::ChunkFetching(OutgoingRequest { + payload, + pending_response, + fallback_request: Some((_, fallback_protocol)), + .. + }) => { + assert_eq!(fallback_protocol, Protocol::ChunkFetchingV1); + + let tx = spawn_message_forwarding(executor, protocol_name, pending_response); IncomingRequest::new( // We don't really care: @@ -345,3 +424,26 @@ fn to_incoming_req( _ => panic!("Unexpected request!"), } } + +fn spawn_message_forwarding( + executor: &TaskExecutor, + protocol_name: ProtocolName, + pending_response: oneshot::Sender, ProtocolName), RequestFailure>>, +) -> oneshot::Sender { + let (tx, rx): (oneshot::Sender, oneshot::Receiver<_>) = + oneshot::channel(); + executor.spawn( + "message-forwarding", + None, + async { + let response = rx.await; + let payload = response.expect("Unexpected canceled request").result; + pending_response + .send(payload.map_err(|_| RequestFailure::Refused).map(|r| (r, protocol_name))) + .expect("Sending response is expected to work"); + } + .boxed(), + ); + + tx +} diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index dd0e0c432345ed7006fa4a18fbb05bc9efca8538..1c2b5f4968ad2e52776aff5c992895d026836eda 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" -fatality = "0.0.6" +fatality = "0.1.1" thiserror = { workspace = true } async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } @@ -25,15 +25,16 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } [dev-dependencies] assert_matches = "1.4.0" -env_logger = "0.11" futures-timer = "3.0.2" +rstest = "0.18.2" log = { workspace = true, default-features = true } +sp-tracing = { path = "../../../../substrate/primitives/tracing" } sp-core = { path = "../../../../substrate/primitives/core" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index d9bdc1a2d944d71231d0679fd5e5ef79a52cc782..c734ac99e870df128c5443e9cb3b83371bc1156f 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -23,7 +23,7 @@ use polkadot_subsystem_bench::{ availability::{ - benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, + benchmark_availability_read, prepare_test, DataAvailabilityReadOptions, Strategy, TestDataAvailability, TestState, }, configuration::TestConfiguration, @@ -37,7 +37,7 @@ const BENCH_COUNT: usize = 10; fn main() -> Result<(), String> { let mut messages = vec![]; - let options = DataAvailabilityReadOptions { fetch_from_backers: true }; + let options = DataAvailabilityReadOptions { strategy: Strategy::FullFromBackers }; let mut config = TestConfiguration::default(); config.num_blocks = 3; config.generate_pov_sizes(); @@ -51,11 +51,7 @@ fn main() -> Result<(), String> { std::io::stdout().flush().unwrap(); let (mut env, _cfgs) = prepare_test(&state, TestDataAvailability::Read(options.clone()), false); - env.runtime().block_on(benchmark_availability_read( - "data_availability_read", - &mut env, - &state, - )) + env.runtime().block_on(benchmark_availability_read(&mut env, &state)) }) .collect(); println!("\rDone!{}", " ".repeat(BENCH_COUNT)); @@ -74,7 +70,7 @@ fn main() -> Result<(), String> { ("Received from peers", 307203.0000, 0.001), ("Sent to peers", 1.6667, 0.001), ])); - messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8338, 0.1)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8412, 0.1)])); if messages.is_empty() { Ok(()) diff --git a/polkadot/node/network/availability-recovery/src/error.rs b/polkadot/node/network/availability-recovery/src/error.rs index 47277a521b81ee72261ab8d71ff9f0cf97bf56ad..eaec4cbc9d9426fce260a4fdc2744506bf34c830 100644 --- a/polkadot/node/network/availability-recovery/src/error.rs +++ b/polkadot/node/network/availability-recovery/src/error.rs @@ -16,20 +16,34 @@ //! The `Error` and `Result` types used by the subsystem. +use crate::LOG_TARGET; +use fatality::{fatality, Nested}; use futures::channel::oneshot; -use thiserror::Error; +use polkadot_node_network_protocol::request_response::incoming; +use polkadot_node_subsystem::{RecoveryError, SubsystemError}; +use polkadot_primitives::Hash; /// Error type used by the Availability Recovery subsystem. -#[derive(Debug, Error)] +#[fatality(splitable)] pub enum Error { - #[error(transparent)] - Subsystem(#[from] polkadot_node_subsystem::SubsystemError), + #[fatal] + #[error("Spawning subsystem task failed: {0}")] + SpawnTask(#[source] SubsystemError), + + /// Receiving subsystem message from overseer failed. + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + #[fatal] #[error("failed to query full data from store")] CanceledQueryFullData(#[source] oneshot::Canceled), - #[error("failed to query session info")] - CanceledSessionInfo(#[source] oneshot::Canceled), + #[error("`SessionInfo` is `None` at {0}")] + SessionInfoUnavailable(Hash), + + #[error("failed to query node features from runtime")] + RequestNodeFeatures(#[source] polkadot_node_subsystem_util::runtime::Error), #[error("failed to send response")] CanceledResponseSender, @@ -40,8 +54,38 @@ pub enum Error { #[error(transparent)] Erasure(#[from] polkadot_erasure_coding::Error), + #[fatal] #[error(transparent)] - Util(#[from] polkadot_node_subsystem_util::Error), + Oneshot(#[from] oneshot::Canceled), + + #[fatal(forward)] + #[error("Error during recovery: {0}")] + Recovery(#[from] RecoveryError), + + #[fatal(forward)] + #[error("Retrieving next incoming request failed: {0}")] + IncomingRequest(#[from] incoming::Error), } pub type Result = std::result::Result; + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error, unless the error is fatal for the entire +/// subsystem. +pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + jfyi.log(); + Ok(()) + }, + } +} + +impl JfyiError { + /// Log a `JfyiError`. + pub fn log(self) { + gum::warn!(target: LOG_TARGET, "{}", self); + } +} diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 94b9d9546cdecd4a244bc3de9e10c1db4c3c066d..167125f987ab8f9af4f1d8b91ebd33d80bd05039 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -19,7 +19,7 @@ #![warn(missing_docs)] use std::{ - collections::{HashMap, VecDeque}, + collections::{BTreeMap, VecDeque}, iter::Iterator, num::NonZeroUsize, pin::Pin, @@ -34,31 +34,41 @@ use futures::{ stream::{FuturesUnordered, StreamExt}, task::{Context, Poll}, }; +use sc_network::ProtocolName; use schnellru::{ByLength, LruMap}; -use task::{FetchChunks, FetchChunksParams, FetchFull, FetchFullParams}; +use task::{ + FetchChunks, FetchChunksParams, FetchFull, FetchFullParams, FetchSystematicChunks, + FetchSystematicChunksParams, +}; -use fatality::Nested; use polkadot_erasure_coding::{ - branch_hash, branches, obtain_chunks_v1, recovery_threshold, Error as ErasureEncodingError, + branches, obtain_chunks_v1, recovery_threshold, systematic_recovery_threshold, + Error as ErasureEncodingError, }; use task::{RecoveryParams, RecoveryStrategy, RecoveryTask}; +use error::{log_error, Error, FatalError, Result}; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, IncomingRequestReceiver}, + request_response::{ + v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, IsRequest, ReqProtocolNames, + }, UnifiedReputationChange as Rep, }; -use polkadot_node_primitives::{AvailableData, ErasureChunk}; +use polkadot_node_primitives::AvailableData; use polkadot_node_subsystem::{ errors::RecoveryError, jaeger, messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, - SubsystemContext, SubsystemError, SubsystemResult, + SubsystemContext, SubsystemError, +}; +use polkadot_node_subsystem_util::{ + availability_chunks::availability_chunk_indices, + runtime::{ExtendedSessionInfo, RuntimeInfo}, }; -use polkadot_node_subsystem_util::request_session_info; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash, HashT, - SessionIndex, SessionInfo, ValidatorIndex, + node_features, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, GroupIndex, + Hash, SessionIndex, ValidatorIndex, }; mod error; @@ -70,6 +80,8 @@ pub use metrics::Metrics; #[cfg(test)] mod tests; +type RecoveryResult = std::result::Result; + const LOG_TARGET: &str = "parachain::availability-recovery"; // Size of the LRU cache where we keep recovered data. @@ -77,19 +89,35 @@ const LRU_SIZE: u32 = 16; const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); -/// PoV size limit in bytes for which prefer fetching from backers. -const SMALL_POV_LIMIT: usize = 128 * 1024; +/// PoV size limit in bytes for which prefer fetching from backers. (conservative, Polkadot for now) +pub(crate) const CONSERVATIVE_FETCH_CHUNKS_THRESHOLD: usize = 1 * 1024 * 1024; +/// PoV size limit in bytes for which prefer fetching from backers. (Kusama and all testnets) +pub const FETCH_CHUNKS_THRESHOLD: usize = 4 * 1024 * 1024; #[derive(Clone, PartialEq)] /// The strategy we use to recover the PoV. pub enum RecoveryStrategyKind { - /// We always try the backing group first, then fallback to validator chunks. - BackersFirstAlways, /// We try the backing group first if PoV size is lower than specified, then fallback to /// validator chunks. BackersFirstIfSizeLower(usize), + /// We try the backing group first if PoV size is lower than specified, then fallback to + /// systematic chunks. Regular chunk recovery as a last resort. + BackersFirstIfSizeLowerThenSystematicChunks(usize), + + /// The following variants are only helpful for integration tests. + /// + /// We always try the backing group first, then fallback to validator chunks. + #[allow(dead_code)] + BackersFirstAlways, /// We always recover using validator chunks. + #[allow(dead_code)] ChunksAlways, + /// First try the backing group. Then systematic chunks. + #[allow(dead_code)] + BackersThenSystematicChunks, + /// Always recover using systematic chunks, fall back to regular chunks. + #[allow(dead_code)] + SystematicChunks, } /// The Availability Recovery Subsystem. @@ -107,11 +135,15 @@ pub struct AvailabilityRecoverySubsystem { metrics: Metrics, /// The type of check to perform after available data was recovered. post_recovery_check: PostRecoveryCheck, + /// Full protocol name for ChunkFetchingV1. + req_v1_protocol_name: ProtocolName, + /// Full protocol name for ChunkFetchingV2. + req_v2_protocol_name: ProtocolName, } #[derive(Clone, PartialEq, Debug)] /// The type of check to perform after available data was recovered. -pub enum PostRecoveryCheck { +enum PostRecoveryCheck { /// Reencode the data and check erasure root. For validators. Reencode, /// Only check the pov hash. For collators only. @@ -119,56 +151,18 @@ pub enum PostRecoveryCheck { } /// Expensive erasure coding computations that we want to run on a blocking thread. -pub enum ErasureTask { +enum ErasureTask { /// Reconstructs `AvailableData` from chunks given `n_validators`. Reconstruct( usize, - HashMap, - oneshot::Sender>, + BTreeMap>, + oneshot::Sender>, ), /// Re-encode `AvailableData` into erasure chunks in order to verify the provided root hash of /// the Merkle tree. Reencode(usize, Hash, AvailableData, oneshot::Sender>), } -const fn is_unavailable( - received_chunks: usize, - requesting_chunks: usize, - unrequested_validators: usize, - threshold: usize, -) -> bool { - received_chunks + requesting_chunks + unrequested_validators < threshold -} - -/// Check validity of a chunk. -fn is_chunk_valid(params: &RecoveryParams, chunk: &ErasureChunk) -> bool { - let anticipated_hash = - match branch_hash(¶ms.erasure_root, chunk.proof(), chunk.index.0 as usize) { - Ok(hash) => hash, - Err(e) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - error = ?e, - "Invalid Merkle proof", - ); - return false - }, - }; - let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk); - if anticipated_hash != erasure_chunk_hash { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Merkle proof mismatch" - ); - return false - } - true -} - /// Re-encode the data into erasure chunks in order to verify /// the root hash of the provided Merkle tree, which is built /// on-top of the encoded chunks. @@ -212,12 +206,12 @@ fn reconstructed_data_matches_root( /// Accumulate all awaiting sides for some particular `AvailableData`. struct RecoveryHandle { candidate_hash: CandidateHash, - remote: RemoteHandle>, - awaiting: Vec>>, + remote: RemoteHandle, + awaiting: Vec>, } impl Future for RecoveryHandle { - type Output = Option<(CandidateHash, Result)>; + type Output = Option<(CandidateHash, RecoveryResult)>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut indices_to_remove = Vec::new(); @@ -271,7 +265,7 @@ enum CachedRecovery { impl CachedRecovery { /// Convert back to `Result` to deliver responses. - fn into_result(self) -> Result { + fn into_result(self) -> RecoveryResult { match self { Self::Valid(d) => Ok(d), Self::Invalid => Err(RecoveryError::Invalid), @@ -279,9 +273,9 @@ impl CachedRecovery { } } -impl TryFrom> for CachedRecovery { +impl TryFrom for CachedRecovery { type Error = (); - fn try_from(o: Result) -> Result { + fn try_from(o: RecoveryResult) -> std::result::Result { match o { Ok(d) => Ok(Self::Valid(d)), Err(RecoveryError::Invalid) => Ok(Self::Invalid), @@ -303,6 +297,9 @@ struct State { /// An LRU cache of recently recovered data. availability_lru: LruMap, + + /// Cached runtime info. + runtime_info: RuntimeInfo, } impl Default for State { @@ -311,6 +308,7 @@ impl Default for State { ongoing_recoveries: FuturesUnordered::new(), live_block: (0, Hash::default()), availability_lru: LruMap::new(ByLength::new(LRU_SIZE)), + runtime_info: RuntimeInfo::new(None), } } } @@ -327,9 +325,10 @@ impl AvailabilityRecoverySubsystem { } /// Handles a signal from the overseer. -async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemResult { +/// Returns true if subsystem receives a deadly signal. +async fn handle_signal(state: &mut State, signal: OverseerSignal) -> bool { match signal { - OverseerSignal::Conclude => Ok(true), + OverseerSignal::Conclude => true, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. }) => { // if activated is non-empty, set state.live_block to the highest block in `activated` if let Some(activated) = activated { @@ -338,9 +337,9 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe } } - Ok(false) + false }, - OverseerSignal::BlockFinalized(_, _) => Ok(false), + OverseerSignal::BlockFinalized(_, _) => false, } } @@ -349,27 +348,11 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe async fn launch_recovery_task( state: &mut State, ctx: &mut Context, - session_info: SessionInfo, - receipt: CandidateReceipt, - response_sender: oneshot::Sender>, - metrics: &Metrics, + response_sender: oneshot::Sender, recovery_strategies: VecDeque::Sender>>>, - bypass_availability_store: bool, - post_recovery_check: PostRecoveryCheck, -) -> error::Result<()> { - let candidate_hash = receipt.hash(); - let params = RecoveryParams { - validator_authority_keys: session_info.discovery_keys.clone(), - n_validators: session_info.validators.len(), - threshold: recovery_threshold(session_info.validators.len())?, - candidate_hash, - erasure_root: receipt.descriptor.erasure_root, - metrics: metrics.clone(), - bypass_availability_store, - post_recovery_check, - pov_hash: receipt.descriptor.pov_hash, - }; - + params: RecoveryParams, +) -> Result<()> { + let candidate_hash = params.candidate_hash; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); let (remote, remote_handle) = recovery_task.run().remote_handle(); @@ -380,15 +363,8 @@ async fn launch_recovery_task( awaiting: vec![response_sender], }); - if let Err(e) = ctx.spawn("recovery-task", Box::pin(remote)) { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Failed to spawn a recovery task", - ); - } - - Ok(()) + ctx.spawn("recovery-task", Box::pin(remote)) + .map_err(|err| Error::SpawnTask(err)) } /// Handles an availability recovery request. @@ -399,13 +375,16 @@ async fn handle_recover( receipt: CandidateReceipt, session_index: SessionIndex, backing_group: Option, - response_sender: oneshot::Sender>, + response_sender: oneshot::Sender, metrics: &Metrics, erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, post_recovery_check: PostRecoveryCheck, -) -> error::Result<()> { + maybe_core_index: Option, + req_v1_protocol_name: ProtocolName, + req_v2_protocol_name: ProtocolName, +) -> Result<()> { let candidate_hash = receipt.hash(); let span = jaeger::Span::new(candidate_hash, "availability-recovery") @@ -414,14 +393,7 @@ async fn handle_recover( if let Some(result) = state.availability_lru.get(&candidate_hash).cloned().map(|v| v.into_result()) { - if let Err(e) = response_sender.send(result) { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Error responding with an availability recovery result", - ); - } - return Ok(()) + return response_sender.send(result).map_err(|_| Error::CanceledResponseSender) } if let Some(i) = @@ -432,100 +404,182 @@ async fn handle_recover( } let _span = span.child("not-cached"); - let session_info = request_session_info(state.live_block.1, session_index, ctx.sender()) - .await - .await - .map_err(error::Error::CanceledSessionInfo)??; + let session_info_res = state + .runtime_info + .get_session_info_by_index(ctx.sender(), state.live_block.1, session_index) + .await; let _span = span.child("session-info-ctx-received"); - match session_info { - Some(session_info) => { + match session_info_res { + Ok(ExtendedSessionInfo { session_info, node_features, .. }) => { + let mut backer_group = None; + let n_validators = session_info.validators.len(); + let systematic_threshold = systematic_recovery_threshold(n_validators)?; let mut recovery_strategies: VecDeque< Box::Sender>>, - > = VecDeque::with_capacity(2); + > = VecDeque::with_capacity(3); if let Some(backing_group) = backing_group { if let Some(backing_validators) = session_info.validator_groups.get(backing_group) { let mut small_pov_size = true; - if let RecoveryStrategyKind::BackersFirstIfSizeLower(small_pov_limit) = - recovery_strategy_kind - { - // Get our own chunk size to get an estimate of the PoV size. - let chunk_size: Result, error::Error> = - query_chunk_size(ctx, candidate_hash).await; - if let Ok(Some(chunk_size)) = chunk_size { - let pov_size_estimate = - chunk_size.saturating_mul(session_info.validators.len()) / 3; - small_pov_size = pov_size_estimate < small_pov_limit; - - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - pov_size_estimate, - small_pov_limit, - enabled = small_pov_size, - "Prefer fetch from backing group", - ); - } else { - // we have a POV limit but were not able to query the chunk size, so - // don't use the backing group. - small_pov_size = false; - } + match recovery_strategy_kind { + RecoveryStrategyKind::BackersFirstIfSizeLower(fetch_chunks_threshold) | + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks( + fetch_chunks_threshold, + ) => { + // Get our own chunk size to get an estimate of the PoV size. + let chunk_size: Result> = + query_chunk_size(ctx, candidate_hash).await; + if let Ok(Some(chunk_size)) = chunk_size { + let pov_size_estimate = chunk_size * systematic_threshold; + small_pov_size = pov_size_estimate < fetch_chunks_threshold; + + if small_pov_size { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + pov_size_estimate, + fetch_chunks_threshold, + "Prefer fetch from backing group", + ); + } + } else { + // we have a POV limit but were not able to query the chunk size, so + // don't use the backing group. + small_pov_size = false; + } + }, + _ => {}, }; match (&recovery_strategy_kind, small_pov_size) { (RecoveryStrategyKind::BackersFirstAlways, _) | - (RecoveryStrategyKind::BackersFirstIfSizeLower(_), true) => recovery_strategies.push_back( - Box::new(FetchFull::new(FetchFullParams { - validators: backing_validators.to_vec(), - erasure_task_tx: erasure_task_tx.clone(), - })), - ), + (RecoveryStrategyKind::BackersFirstIfSizeLower(_), true) | + ( + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks(_), + true, + ) | + (RecoveryStrategyKind::BackersThenSystematicChunks, _) => + recovery_strategies.push_back(Box::new(FetchFull::new( + FetchFullParams { validators: backing_validators.to_vec() }, + ))), _ => {}, }; + + backer_group = Some(backing_validators); + } + } + + let chunk_mapping_enabled = if let Some(&true) = node_features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + true + } else { + false + }; + + // We can only attempt systematic recovery if we received the core index of the + // candidate and chunk mapping is enabled. + if let Some(core_index) = maybe_core_index { + if matches!( + recovery_strategy_kind, + RecoveryStrategyKind::BackersThenSystematicChunks | + RecoveryStrategyKind::SystematicChunks | + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks(_) + ) && chunk_mapping_enabled + { + let chunk_indices = + availability_chunk_indices(Some(node_features), n_validators, core_index)?; + + let chunk_indices: VecDeque<_> = chunk_indices + .iter() + .enumerate() + .map(|(v_index, c_index)| { + ( + *c_index, + ValidatorIndex( + u32::try_from(v_index) + .expect("validator count should not exceed u32"), + ), + ) + }) + .collect(); + + // Only get the validators according to the threshold. + let validators = chunk_indices + .clone() + .into_iter() + .filter(|(c_index, _)| { + usize::try_from(c_index.0) + .expect("usize is at least u32 bytes on all modern targets.") < + systematic_threshold + }) + .collect(); + + recovery_strategies.push_back(Box::new(FetchSystematicChunks::new( + FetchSystematicChunksParams { + validators, + backers: backer_group.map(|v| v.to_vec()).unwrap_or_else(|| vec![]), + }, + ))); } } recovery_strategies.push_back(Box::new(FetchChunks::new(FetchChunksParams { n_validators: session_info.validators.len(), - erasure_task_tx, }))); + let session_info = session_info.clone(); + + let n_validators = session_info.validators.len(); + launch_recovery_task( state, ctx, - session_info, - receipt, response_sender, - metrics, recovery_strategies, - bypass_availability_store, - post_recovery_check, + RecoveryParams { + validator_authority_keys: session_info.discovery_keys.clone(), + n_validators, + threshold: recovery_threshold(n_validators)?, + systematic_threshold, + candidate_hash, + erasure_root: receipt.descriptor.erasure_root, + metrics: metrics.clone(), + bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, + req_v1_protocol_name, + req_v2_protocol_name, + chunk_mapping_enabled, + erasure_task_tx, + }, ) .await }, - None => { - gum::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block); + Err(_) => { response_sender .send(Err(RecoveryError::Unavailable)) - .map_err(|_| error::Error::CanceledResponseSender)?; - Ok(()) + .map_err(|_| Error::CanceledResponseSender)?; + + Err(Error::SessionInfoUnavailable(state.live_block.1)) }, } } -/// Queries a chunk from av-store. +/// Queries the full `AvailableData` from av-store. #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] async fn query_full_data( ctx: &mut Context, candidate_hash: CandidateHash, -) -> error::Result> { +) -> Result> { let (tx, rx) = oneshot::channel(); ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx)) .await; - rx.await.map_err(error::Error::CanceledQueryFullData) + rx.await.map_err(Error::CanceledQueryFullData) } /// Queries a chunk from av-store. @@ -533,12 +587,12 @@ async fn query_full_data( async fn query_chunk_size( ctx: &mut Context, candidate_hash: CandidateHash, -) -> error::Result> { +) -> Result> { let (tx, rx) = oneshot::channel(); ctx.send_message(AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx)) .await; - rx.await.map_err(error::Error::CanceledQueryFullData) + rx.await.map_err(Error::CanceledQueryFullData) } #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] @@ -547,64 +601,80 @@ impl AvailabilityRecoverySubsystem { /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash /// instead of reencoding the available data. pub fn for_collator( + fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower( + fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), + ), bypass_availability_store: true, post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } - /// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to - /// request data from backers. - pub fn with_fast_path( + /// Create an optimised new instance of `AvailabilityRecoverySubsystem` suitable for validator + /// nodes, which: + /// - for small POVs (over the `fetch_chunks_threshold` or the + /// `CONSERVATIVE_FETCH_CHUNKS_THRESHOLD`), it attempts full recovery from backers, if backing + /// group supplied. + /// - for large POVs, attempts systematic recovery, if core_index supplied and + /// AvailabilityChunkMapping node feature is enabled. + /// - as a last resort, attempt regular chunk recovery from all validators. + pub fn for_validator( + fetch_chunks_threshold: Option, req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, + recovery_strategy_kind: + RecoveryStrategyKind::BackersFirstIfSizeLowerThenSystematicChunks( + fetch_chunks_threshold.unwrap_or(CONSERVATIVE_FETCH_CHUNKS_THRESHOLD), + ), bypass_availability_store: false, post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } - /// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks - pub fn with_chunks_only( + /// Customise the recovery strategy kind + /// Currently only useful for tests. + #[cfg(any(test, feature = "subsystem-benchmarks"))] + pub fn with_recovery_strategy_kind( req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, metrics: Metrics, + recovery_strategy_kind: RecoveryStrategyKind, ) -> Self { Self { - recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, - bypass_availability_store: false, - post_recovery_check: PostRecoveryCheck::Reencode, - req_receiver, - metrics, - } - } - - /// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is - /// above a threshold. - pub fn with_chunks_if_pov_large( - req_receiver: IncomingRequestReceiver, - metrics: Metrics, - ) -> Self { - Self { - recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), + recovery_strategy_kind, bypass_availability_store: false, post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, + req_v1_protocol_name: req_protocol_names + .get_name(request_v1::ChunkFetchingRequest::PROTOCOL), + req_v2_protocol_name: req_protocol_names + .get_name(request_v2::ChunkFetchingRequest::PROTOCOL), } } /// Starts the inner subsystem loop. - pub async fn run(self, mut ctx: Context) -> SubsystemResult<()> { + pub async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { let mut state = State::default(); let Self { mut req_receiver, @@ -612,6 +682,8 @@ impl AvailabilityRecoverySubsystem { recovery_strategy_kind, bypass_availability_store, post_recovery_check, + req_v1_protocol_name, + req_v2_protocol_name, } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); @@ -647,53 +719,44 @@ impl AvailabilityRecoverySubsystem { loop { let recv_req = req_receiver.recv(|| vec![COST_INVALID_REQUEST]).fuse(); pin_mut!(recv_req); - futures::select! { + let res = futures::select! { erasure_task = erasure_task_rx.next() => { match erasure_task { Some(task) => { - let send_result = to_pool + to_pool .next() .expect("Pool size is `NonZeroUsize`; qed") .send(task) .await - .map_err(|_| RecoveryError::ChannelClosed); - - if let Err(err) = send_result { - gum::warn!( - target: LOG_TARGET, - ?err, - "Failed to send erasure coding task", - ); - } + .map_err(|_| RecoveryError::ChannelClosed) }, None => { - gum::debug!( - target: LOG_TARGET, - "Erasure task channel closed", - ); - - return Err(SubsystemError::with_origin("availability-recovery", RecoveryError::ChannelClosed)) + Err(RecoveryError::ChannelClosed) } - } + }.map_err(Into::into) } - v = ctx.recv().fuse() => { - match v? { - FromOrchestra::Signal(signal) => if handle_signal( - &mut state, - signal, - ).await? { - gum::debug!(target: LOG_TARGET, "subsystem concluded"); - return Ok(()); - } - FromOrchestra::Communication { msg } => { - match msg { - AvailabilityRecoveryMessage::RecoverAvailableData( - receipt, - session_index, - maybe_backing_group, - response_sender, - ) => { - if let Err(e) = handle_recover( + signal = ctx.recv().fuse() => { + match signal { + Ok(signal) => { + match signal { + FromOrchestra::Signal(signal) => if handle_signal( + &mut state, + signal, + ).await { + gum::debug!(target: LOG_TARGET, "subsystem concluded"); + return Ok(()); + } else { + Ok(()) + }, + FromOrchestra::Communication { + msg: AvailabilityRecoveryMessage::RecoverAvailableData( + receipt, + session_index, + maybe_backing_group, + maybe_core_index, + response_sender, + ) + } => handle_recover( &mut state, &mut ctx, receipt, @@ -704,21 +767,18 @@ impl AvailabilityRecoverySubsystem { erasure_task_tx.clone(), recovery_strategy_kind.clone(), bypass_availability_store, - post_recovery_check.clone() - ).await { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Error handling a recovery request", - ); - } - } + post_recovery_check.clone(), + maybe_core_index, + req_v1_protocol_name.clone(), + req_v2_protocol_name.clone(), + ).await } - } + }, + Err(e) => Err(Error::SubsystemReceive(e)) } } in_req = recv_req => { - match in_req.into_nested().map_err(|fatal| SubsystemError::with_origin("availability-recovery", fatal))? { + match in_req { Ok(req) => { if bypass_availability_store { gum::debug!( @@ -726,40 +786,42 @@ impl AvailabilityRecoverySubsystem { "Skipping request to availability-store.", ); let _ = req.send_response(None.into()); - continue - } - match query_full_data(&mut ctx, req.payload.candidate_hash).await { - Ok(res) => { - let _ = req.send_response(res.into()); - } - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - "Failed to query available data.", - ); - - let _ = req.send_response(None.into()); + Ok(()) + } else { + match query_full_data(&mut ctx, req.payload.candidate_hash).await { + Ok(res) => { + let _ = req.send_response(res.into()); + Ok(()) + } + Err(e) => { + let _ = req.send_response(None.into()); + Err(e) + } } } } - Err(jfyi) => { - gum::debug!( - target: LOG_TARGET, - error = ?jfyi, - "Decoding incoming request failed" - ); - continue - } + Err(e) => Err(Error::IncomingRequest(e)) } } output = state.ongoing_recoveries.select_next_some() => { + let mut res = Ok(()); if let Some((candidate_hash, result)) = output { + if let Err(ref e) = result { + res = Err(Error::Recovery(e.clone())); + } + if let Ok(recovery) = CachedRecovery::try_from(result) { state.availability_lru.insert(candidate_hash, recovery); } } + + res } + }; + + // Only bubble up fatal errors, but log all of them. + if let Err(e) = res { + log_error(Err(e))?; } } } @@ -827,7 +889,13 @@ async fn erasure_task_thread( Some(ErasureTask::Reconstruct(n_validators, chunks, sender)) => { let _ = sender.send(polkadot_erasure_coding::reconstruct_v1( n_validators, - chunks.values().map(|c| (&c.chunk[..], c.index.0 as usize)), + chunks.iter().map(|(c_index, chunk)| { + ( + &chunk[..], + usize::try_from(c_index.0) + .expect("usize is at least u32 bytes on all modern targets."), + ) + }), )); }, Some(ErasureTask::Reencode(n_validators, root, available_data, sender)) => { diff --git a/polkadot/node/network/availability-recovery/src/metrics.rs b/polkadot/node/network/availability-recovery/src/metrics.rs index 9f4cddc57e43a93089fbb08ec4a18dfd77ffea8d..4e269df55027b69fbff7604ac85d980f4fd36c6f 100644 --- a/polkadot/node/network/availability-recovery/src/metrics.rs +++ b/polkadot/node/network/availability-recovery/src/metrics.rs @@ -14,9 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use polkadot_node_subsystem::prometheus::HistogramVec; use polkadot_node_subsystem_util::metrics::{ self, - prometheus::{self, Counter, CounterVec, Histogram, Opts, PrometheusError, Registry, U64}, + prometheus::{ + self, prometheus::HistogramTimer, Counter, CounterVec, Histogram, Opts, PrometheusError, + Registry, U64, + }, }; /// Availability Distribution metrics. @@ -28,26 +32,61 @@ struct MetricsInner { /// Number of sent chunk requests. /// /// Gets incremented on each sent chunk requests. - chunk_requests_issued: Counter, + /// + /// Split by chunk type: + /// - `regular_chunks` + /// - `systematic_chunks` + chunk_requests_issued: CounterVec, + /// Total number of bytes recovered /// /// Gets incremented on each successful recovery recovered_bytes_total: Counter, + /// A counter for finished chunk requests. /// - /// Split by result: + /// Split by the chunk type (`regular_chunks` or `systematic_chunks`) + /// + /// Also split by result: /// - `no_such_chunk` ... peer did not have the requested chunk /// - `timeout` ... request timed out. - /// - `network_error` ... Some networking issue except timeout + /// - `error` ... Some networking issue except timeout /// - `invalid` ... Chunk was received, but not valid. /// - `success` chunk_requests_finished: CounterVec, + /// A counter for successful chunk requests, split by the network protocol version. + chunk_request_protocols: CounterVec, + + /// Number of sent available data requests. + full_data_requests_issued: Counter, + + /// Counter for finished available data requests. + /// + /// Split by the result type: + /// + /// - `no_such_data` ... peer did not have the requested data + /// - `timeout` ... request timed out. + /// - `error` ... Some networking issue except timeout + /// - `invalid` ... data was received, but not valid. + /// - `success` + full_data_requests_finished: CounterVec, + /// The duration of request to response. - time_chunk_request: Histogram, + /// + /// Split by chunk type (`regular_chunks` or `systematic_chunks`). + time_chunk_request: HistogramVec, /// The duration between the pure recovery and verification. - time_erasure_recovery: Histogram, + /// + /// Split by recovery type (`regular_chunks`, `systematic_chunks` or `full_from_backers`). + time_erasure_recovery: HistogramVec, + + /// How much time it takes to reconstruct the available data from chunks. + /// + /// Split by chunk type (`regular_chunks` or `systematic_chunks`), as the algorithms are + /// different. + time_erasure_reconstruct: HistogramVec, /// How much time it takes to re-encode the data into erasure chunks in order to verify /// the root hash of the provided Merkle tree. See `reconstructed_data_matches_root`. @@ -58,6 +97,10 @@ struct MetricsInner { time_full_recovery: Histogram, /// Number of full recoveries that have been finished one way or the other. + /// + /// Split by recovery `strategy_type` (`full_from_backers, systematic_chunks, regular_chunks, + /// all`). `all` is used for failed recoveries that tried all available strategies. + /// Also split by `result` type. full_recoveries_finished: CounterVec, /// Number of full recoveries that have been started on this subsystem. @@ -73,87 +116,175 @@ impl Metrics { Metrics(None) } - /// Increment counter on fetched labels. - pub fn on_chunk_request_issued(&self) { + /// Increment counter for chunk requests. + pub fn on_chunk_request_issued(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_issued.inc() + metrics.chunk_requests_issued.with_label_values(&[chunk_type]).inc() + } + } + + /// Increment counter for full data requests. + pub fn on_full_request_issued(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_issued.inc() } } /// A chunk request timed out. - pub fn on_chunk_request_timeout(&self) { + pub fn on_chunk_request_timeout(&self, chunk_type: &str) { + if let Some(metrics) = &self.0 { + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "timeout"]) + .inc() + } + } + + /// A full data request timed out. + pub fn on_full_request_timeout(&self) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["timeout"]).inc() + metrics.full_data_requests_finished.with_label_values(&["timeout"]).inc() } } /// A chunk request failed because validator did not have its chunk. - pub fn on_chunk_request_no_such_chunk(&self) { + pub fn on_chunk_request_no_such_chunk(&self, chunk_type: &str) { + if let Some(metrics) = &self.0 { + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "no_such_chunk"]) + .inc() + } + } + + /// A full data request failed because the validator did not have it. + pub fn on_full_request_no_such_data(&self) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["no_such_chunk"]).inc() + metrics.full_data_requests_finished.with_label_values(&["no_such_data"]).inc() } } /// A chunk request failed for some non timeout related network error. - pub fn on_chunk_request_error(&self) { + pub fn on_chunk_request_error(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["error"]).inc() + metrics.chunk_requests_finished.with_label_values(&[chunk_type, "error"]).inc() + } + } + + /// A full data request failed for some non timeout related network error. + pub fn on_full_request_error(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["error"]).inc() } } /// A chunk request succeeded, but was not valid. - pub fn on_chunk_request_invalid(&self) { + pub fn on_chunk_request_invalid(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["invalid"]).inc() + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "invalid"]) + .inc() + } + } + + /// A full data request succeeded, but was not valid. + pub fn on_full_request_invalid(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["invalid"]).inc() } } /// A chunk request succeeded. - pub fn on_chunk_request_succeeded(&self) { + pub fn on_chunk_request_succeeded(&self, chunk_type: &str) { if let Some(metrics) = &self.0 { - metrics.chunk_requests_finished.with_label_values(&["success"]).inc() + metrics + .chunk_requests_finished + .with_label_values(&[chunk_type, "success"]) + .inc() + } + } + + /// A chunk response was received on the v1 protocol. + pub fn on_chunk_response_v1(&self) { + if let Some(metrics) = &self.0 { + metrics.chunk_request_protocols.with_label_values(&["v1"]).inc() + } + } + + /// A chunk response was received on the v2 protocol. + pub fn on_chunk_response_v2(&self) { + if let Some(metrics) = &self.0 { + metrics.chunk_request_protocols.with_label_values(&["v2"]).inc() + } + } + + /// A full data request succeeded. + pub fn on_full_request_succeeded(&self) { + if let Some(metrics) = &self.0 { + metrics.full_data_requests_finished.with_label_values(&["success"]).inc() } } /// Get a timer to time request/response duration. - pub fn time_chunk_request(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.time_chunk_request.start_timer()) + pub fn time_chunk_request(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_chunk_request.with_label_values(&[chunk_type]).start_timer() + }) } /// Get a timer to time erasure code recover. - pub fn time_erasure_recovery(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.time_erasure_recovery.start_timer()) + pub fn time_erasure_recovery(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_erasure_recovery.with_label_values(&[chunk_type]).start_timer() + }) + } + + /// Get a timer for available data reconstruction. + pub fn time_erasure_reconstruct(&self, chunk_type: &str) -> Option { + self.0.as_ref().map(|metrics| { + metrics.time_erasure_reconstruct.with_label_values(&[chunk_type]).start_timer() + }) } /// Get a timer to time chunk encoding. - pub fn time_reencode_chunks(&self) -> Option { + pub fn time_reencode_chunks(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_reencode_chunks.start_timer()) } /// Get a timer to measure the time of the complete recovery process. - pub fn time_full_recovery(&self) -> Option { + pub fn time_full_recovery(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_full_recovery.start_timer()) } /// A full recovery succeeded. - pub fn on_recovery_succeeded(&self, bytes: usize) { + pub fn on_recovery_succeeded(&self, strategy_type: &str, bytes: usize) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["success"]).inc(); + metrics + .full_recoveries_finished + .with_label_values(&["success", strategy_type]) + .inc(); metrics.recovered_bytes_total.inc_by(bytes as u64) } } /// A full recovery failed (data not available). - pub fn on_recovery_failed(&self) { + pub fn on_recovery_failed(&self, strategy_type: &str) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["failure"]).inc() + metrics + .full_recoveries_finished + .with_label_values(&["failure", strategy_type]) + .inc() } } /// A full recovery failed (data was recovered, but invalid). - pub fn on_recovery_invalid(&self) { + pub fn on_recovery_invalid(&self, strategy_type: &str) { if let Some(metrics) = &self.0 { - metrics.full_recoveries_finished.with_label_values(&["invalid"]).inc() + metrics + .full_recoveries_finished + .with_label_values(&["invalid", strategy_type]) + .inc() } } @@ -169,9 +300,17 @@ impl metrics::Metrics for Metrics { fn try_register(registry: &Registry) -> Result { let metrics = MetricsInner { chunk_requests_issued: prometheus::register( + CounterVec::new( + Opts::new("polkadot_parachain_availability_recovery_chunk_requests_issued", + "Total number of issued chunk requests."), + &["type"] + )?, + registry, + )?, + full_data_requests_issued: prometheus::register( Counter::new( - "polkadot_parachain_availability_recovery_chunk_requests_issued", - "Total number of issued chunk requests.", + "polkadot_parachain_availability_recovery_full_data_requests_issued", + "Total number of issued full data requests.", )?, registry, )?, @@ -188,22 +327,49 @@ impl metrics::Metrics for Metrics { "polkadot_parachain_availability_recovery_chunk_requests_finished", "Total number of chunk requests finished.", ), + &["result", "type"], + )?, + registry, + )?, + chunk_request_protocols: prometheus::register( + CounterVec::new( + Opts::new( + "polkadot_parachain_availability_recovery_chunk_request_protocols", + "Total number of successful chunk requests, mapped by the protocol version (v1 or v2).", + ), + &["protocol"], + )?, + registry, + )?, + full_data_requests_finished: prometheus::register( + CounterVec::new( + Opts::new( + "polkadot_parachain_availability_recovery_full_data_requests_finished", + "Total number of full data requests finished.", + ), &["result"], )?, registry, )?, time_chunk_request: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( "polkadot_parachain_availability_recovery_time_chunk_request", "Time spent waiting for a response to a chunk request", - ))?, + ), &["type"])?, registry, )?, time_erasure_recovery: prometheus::register( - prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( "polkadot_parachain_availability_recovery_time_erasure_recovery", "Time spent to recover the erasure code and verify the merkle root by re-encoding as erasure chunks", - ))?, + ), &["type"])?, + registry, + )?, + time_erasure_reconstruct: prometheus::register( + prometheus::HistogramVec::new(prometheus::HistogramOpts::new( + "polkadot_parachain_availability_recovery_time_erasure_reconstruct", + "Time spent to reconstruct the data from chunks", + ), &["type"])?, registry, )?, time_reencode_chunks: prometheus::register( @@ -226,7 +392,7 @@ impl metrics::Metrics for Metrics { "polkadot_parachain_availability_recovery_recoveries_finished", "Total number of recoveries that finished.", ), - &["result"], + &["result", "strategy_type"], )?, registry, )?, diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs deleted file mode 100644 index c300c221da5c6da8f40e8a6db3dede59ba207a58..0000000000000000000000000000000000000000 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ /dev/null @@ -1,861 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Recovery task and associated strategies. - -#![warn(missing_docs)] - -use crate::{ - futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - PostRecoveryCheck, LOG_TARGET, -}; -use futures::{channel::oneshot, SinkExt}; -use parity_scale_codec::Encode; -#[cfg(not(test))] -use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; -use polkadot_node_network_protocol::request_response::{ - self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, -}; -use polkadot_node_primitives::{AvailableData, ErasureChunk}; -use polkadot_node_subsystem::{ - messages::{AvailabilityStoreMessage, NetworkBridgeTxMessage}, - overseer, RecoveryError, -}; -use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash, ValidatorIndex}; -use rand::seq::SliceRandom; -use sc_network::{IfDisconnected, OutboundFailure, RequestFailure}; -use std::{ - collections::{HashMap, VecDeque}, - time::Duration, -}; - -// How many parallel recovery tasks should be running at once. -const N_PARALLEL: usize = 50; - -/// Time after which we consider a request to have failed -/// -/// and we should try more peers. Note in theory the request times out at the network level, -/// measurements have shown, that in practice requests might actually take longer to fail in -/// certain occasions. (The very least, authority discovery is not part of the timeout.) -/// -/// For the time being this value is the same as the timeout on the networking layer, but as this -/// timeout is more soft than the networking one, it might make sense to pick different values as -/// well. -#[cfg(not(test))] -const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; -#[cfg(test)] -const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); - -#[async_trait::async_trait] -/// Common trait for runnable recovery strategies. -pub trait RecoveryStrategy: Send { - /// Main entry point of the strategy. - async fn run( - &mut self, - state: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result; - - /// Return the name of the strategy for logging purposes. - fn display_name(&self) -> &'static str; -} - -/// Recovery parameters common to all strategies in a `RecoveryTask`. -pub struct RecoveryParams { - /// Discovery ids of `validators`. - pub validator_authority_keys: Vec, - - /// Number of validators. - pub n_validators: usize, - - /// The number of chunks needed. - pub threshold: usize, - - /// A hash of the relevant candidate. - pub candidate_hash: CandidateHash, - - /// The root of the erasure encoding of the candidate. - pub erasure_root: Hash, - - /// Metrics to report. - pub metrics: Metrics, - - /// Do not request data from availability-store. Useful for collators. - pub bypass_availability_store: bool, - - /// The type of check to perform after available data was recovered. - pub post_recovery_check: PostRecoveryCheck, - - /// The blake2-256 hash of the PoV. - pub pov_hash: Hash, -} - -/// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the -/// same `RecoveryTask`. -pub struct State { - /// Chunks received so far. - received_chunks: HashMap, -} - -impl State { - fn new() -> Self { - Self { received_chunks: HashMap::new() } - } - - fn insert_chunk(&mut self, validator: ValidatorIndex, chunk: ErasureChunk) { - self.received_chunks.insert(validator, chunk); - } - - fn chunk_count(&self) -> usize { - self.received_chunks.len() - } - - /// Retrieve the local chunks held in the av-store (either 0 or 1). - async fn populate_from_av_store( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - ) -> Vec { - let (tx, rx) = oneshot::channel(); - sender - .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) - .await; - - match rx.await { - Ok(chunks) => { - // This should either be length 1 or 0. If we had the whole data, - // we wouldn't have reached this stage. - let chunk_indices: Vec<_> = chunks.iter().map(|c| c.index).collect(); - - for chunk in chunks { - if is_chunk_valid(params, &chunk) { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Found valid chunk on disk" - ); - self.insert_chunk(chunk.index, chunk); - } else { - gum::error!( - target: LOG_TARGET, - "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" - ); - }; - } - - chunk_indices - }, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - "Failed to reach the availability store" - ); - - vec![] - }, - } - } - - /// Launch chunk requests in parallel, according to the parameters. - async fn launch_parallel_chunk_requests( - &mut self, - params: &RecoveryParams, - sender: &mut Sender, - desired_requests_count: usize, - validators: &mut VecDeque, - requesting_chunks: &mut FuturesUndead< - Result, (ValidatorIndex, RequestError)>, - >, - ) where - Sender: overseer::AvailabilityRecoverySenderTrait, - { - let candidate_hash = ¶ms.candidate_hash; - let already_requesting_count = requesting_chunks.len(); - - let mut requests = Vec::with_capacity(desired_requests_count - already_requesting_count); - - while requesting_chunks.len() < desired_requests_count { - if let Some(validator_index) = validators.pop_back() { - let validator = params.validator_authority_keys[validator_index.0 as usize].clone(); - gum::trace!( - target: LOG_TARGET, - ?validator, - ?validator_index, - ?candidate_hash, - "Requesting chunk", - ); - - // Request data. - let raw_request = req_res::v1::ChunkFetchingRequest { - candidate_hash: params.candidate_hash, - index: validator_index, - }; - - let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request); - requests.push(Requests::ChunkFetchingV1(req)); - - params.metrics.on_chunk_request_issued(); - let timer = params.metrics.time_chunk_request(); - - requesting_chunks.push(Box::pin(async move { - let _timer = timer; - match res.await { - Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => - Ok(Some(chunk.recombine_into_chunk(&raw_request))), - Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => Ok(None), - Err(e) => Err((validator_index, e)), - } - })); - } else { - break - } - } - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - requests, - IfDisconnected::TryConnect, - )) - .await; - } - - /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. - async fn wait_for_chunks( - &mut self, - params: &RecoveryParams, - validators: &mut VecDeque, - requesting_chunks: &mut FuturesUndead< - Result, (ValidatorIndex, RequestError)>, - >, - can_conclude: impl Fn(usize, usize, usize, &RecoveryParams, usize) -> bool, - ) -> (usize, usize) { - let metrics = ¶ms.metrics; - - let mut total_received_responses = 0; - let mut error_count = 0; - - // Wait for all current requests to conclude or time-out, or until we reach enough chunks. - // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will - // return in that case for `launch_parallel_requests` to fill up slots again. - while let Some(request_result) = - requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await - { - total_received_responses += 1; - - match request_result { - Ok(Some(chunk)) => - if is_chunk_valid(params, &chunk) { - metrics.on_chunk_request_succeeded(); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - validator_index = ?chunk.index, - "Received valid chunk", - ); - self.insert_chunk(chunk.index, chunk); - } else { - metrics.on_chunk_request_invalid(); - error_count += 1; - }, - Ok(None) => { - metrics.on_chunk_request_no_such_chunk(); - error_count += 1; - }, - Err((validator_index, e)) => { - error_count += 1; - - gum::trace!( - target: LOG_TARGET, - candidate_hash= ?params.candidate_hash, - err = ?e, - ?validator_index, - "Failure requesting chunk", - ); - - match e { - RequestError::InvalidResponse(_) => { - metrics.on_chunk_request_invalid(); - - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - err = ?e, - ?validator_index, - "Chunk fetching response was invalid", - ); - }, - RequestError::NetworkError(err) => { - // No debug logs on general network errors - that became very spammy - // occasionally. - if let RequestFailure::Network(OutboundFailure::Timeout) = err { - metrics.on_chunk_request_timeout(); - } else { - metrics.on_chunk_request_error(); - } - - validators.push_front(validator_index); - }, - RequestError::Canceled(_) => { - metrics.on_chunk_request_error(); - - validators.push_front(validator_index); - }, - } - }, - } - - // Stop waiting for requests when we either can already recover the data - // or have gotten firm 'No' responses from enough validators. - if can_conclude( - validators.len(), - requesting_chunks.total_len(), - self.chunk_count(), - params, - error_count, - ) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?params.candidate_hash, - received_chunks_count = ?self.chunk_count(), - requested_chunks_count = ?requesting_chunks.len(), - threshold = ?params.threshold, - "Can conclude availability for a candidate", - ); - break - } - } - - (total_received_responses, error_count) - } -} - -/// A stateful reconstruction of availability data in reference to -/// a candidate hash. -pub struct RecoveryTask { - sender: Sender, - params: RecoveryParams, - strategies: VecDeque>>, - state: State, -} - -impl RecoveryTask -where - Sender: overseer::AvailabilityRecoverySenderTrait, -{ - /// Instantiate a new recovery task. - pub fn new( - sender: Sender, - params: RecoveryParams, - strategies: VecDeque>>, - ) -> Self { - Self { sender, params, strategies, state: State::new() } - } - - async fn in_availability_store(&mut self) -> Option { - if !self.params.bypass_availability_store { - let (tx, rx) = oneshot::channel(); - self.sender - .send_message(AvailabilityStoreMessage::QueryAvailableData( - self.params.candidate_hash, - tx, - )) - .await; - - match rx.await { - Ok(Some(data)) => return Some(data), - Ok(None) => {}, - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Failed to reach the availability store", - ) - }, - } - } - - None - } - - /// Run this recovery task to completion. It will loop through the configured strategies - /// in-order and return whenever the first one recovers the full `AvailableData`. - pub async fn run(mut self) -> Result { - if let Some(data) = self.in_availability_store().await { - return Ok(data) - } - - self.params.metrics.on_recovery_started(); - - let _timer = self.params.metrics.time_full_recovery(); - - while let Some(mut current_strategy) = self.strategies.pop_front() { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Starting `{}` strategy", - current_strategy.display_name(), - ); - - let res = current_strategy.run(&mut self.state, &mut self.sender, &self.params).await; - - match res { - Err(RecoveryError::Unavailable) => - if self.strategies.front().is_some() { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Recovery strategy `{}` did not conclude. Trying the next one.", - current_strategy.display_name(), - ); - continue - }, - Err(err) => { - match &err { - RecoveryError::Invalid => self.params.metrics.on_recovery_invalid(), - _ => self.params.metrics.on_recovery_failed(), - } - return Err(err) - }, - Ok(data) => { - self.params.metrics.on_recovery_succeeded(data.encoded_size()); - return Ok(data) - }, - } - } - - // We have no other strategies to try. - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?self.params.candidate_hash, - "Recovery of available data failed.", - ); - self.params.metrics.on_recovery_failed(); - - Err(RecoveryError::Unavailable) - } -} - -/// `RecoveryStrategy` that sequentially tries to fetch the full `AvailableData` from -/// already-connected validators in the configured validator set. -pub struct FetchFull { - params: FetchFullParams, -} - -pub struct FetchFullParams { - /// Validators that will be used for fetching the data. - pub validators: Vec, - /// Channel to the erasure task handler. - pub erasure_task_tx: futures::channel::mpsc::Sender, -} - -impl FetchFull { - /// Create a new `FetchFull` recovery strategy. - pub fn new(mut params: FetchFullParams) -> Self { - params.validators.shuffle(&mut rand::thread_rng()); - Self { params } - } -} - -#[async_trait::async_trait] -impl RecoveryStrategy for FetchFull { - fn display_name(&self) -> &'static str { - "Full recovery from backers" - } - - async fn run( - &mut self, - _: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result { - loop { - // Pop the next validator, and proceed to next fetch_chunks_task if we're out. - let validator_index = - self.params.validators.pop().ok_or_else(|| RecoveryError::Unavailable)?; - - // Request data. - let (req, response) = OutgoingRequest::new( - Recipient::Authority( - common_params.validator_authority_keys[validator_index.0 as usize].clone(), - ), - req_res::v1::AvailableDataFetchingRequest { - candidate_hash: common_params.candidate_hash, - }, - ); - - sender - .send_message(NetworkBridgeTxMessage::SendRequests( - vec![Requests::AvailableDataFetchingV1(req)], - IfDisconnected::ImmediateError, - )) - .await; - - match response.await { - Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let maybe_data = match common_params.post_recovery_check { - PostRecoveryCheck::Reencode => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? - }, - PostRecoveryCheck::PovHash => - (data.pov.hash() == common_params.pov_hash).then_some(data), - }; - - match maybe_data { - Some(data) => { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); - - return Ok(data) - }, - None => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); - - // it doesn't help to report the peer with req/res. - // we'll try the next backer. - }, - }; - }, - Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, - Err(e) => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - err = ?e, - "Error fetching full available data." - ), - } - } - } -} - -/// `RecoveryStrategy` that requests chunks from validators, in parallel. -pub struct FetchChunks { - /// How many requests have been unsuccessful so far. - error_count: usize, - /// Total number of responses that have been received, including failed ones. - total_received_responses: usize, - /// Collection of in-flight requests. - requesting_chunks: FuturesUndead, (ValidatorIndex, RequestError)>>, - /// A random shuffling of the validators which indicates the order in which we connect to the - /// validators and request the chunk from them. - validators: VecDeque, - /// Channel to the erasure task handler. - erasure_task_tx: futures::channel::mpsc::Sender, -} - -/// Parameters specific to the `FetchChunks` strategy. -pub struct FetchChunksParams { - /// Total number of validators. - pub n_validators: usize, - /// Channel to the erasure task handler. - pub erasure_task_tx: futures::channel::mpsc::Sender, -} - -impl FetchChunks { - /// Instantiate a new strategy. - pub fn new(params: FetchChunksParams) -> Self { - let mut shuffling: Vec<_> = (0..params.n_validators) - .map(|i| ValidatorIndex(i.try_into().expect("number of validators must fit in a u32"))) - .collect(); - shuffling.shuffle(&mut rand::thread_rng()); - - Self { - error_count: 0, - total_received_responses: 0, - requesting_chunks: FuturesUndead::new(), - validators: shuffling.into(), - erasure_task_tx: params.erasure_task_tx, - } - } - - fn is_unavailable( - unrequested_validators: usize, - in_flight_requests: usize, - chunk_count: usize, - threshold: usize, - ) -> bool { - is_unavailable(chunk_count, in_flight_requests, unrequested_validators, threshold) - } - - /// Desired number of parallel requests. - /// - /// For the given threshold (total required number of chunks) get the desired number of - /// requests we want to have running in parallel at this time. - fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { - // Upper bound for parallel requests. - // We want to limit this, so requests can be processed within the timeout and we limit the - // following feedback loop: - // 1. Requests fail due to timeout - // 2. We request more chunks to make up for it - // 3. Bandwidth is spread out even more, so we get even more timeouts - // 4. We request more chunks to make up for it ... - let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); - // How many chunks are still needed? - let remaining_chunks = threshold.saturating_sub(chunk_count); - // What is the current error rate, so we can make up for it? - let inv_error_rate = - self.total_received_responses.checked_div(self.error_count).unwrap_or(0); - // Actual number of requests we want to have in flight in parallel: - std::cmp::min( - max_requests_boundary, - remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), - ) - } - - async fn attempt_recovery( - &mut self, - state: &mut State, - common_params: &RecoveryParams, - ) -> Result { - let recovery_duration = common_params.metrics.time_erasure_recovery(); - - // Send request to reconstruct available data from chunks. - let (avilable_data_tx, available_data_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reconstruct( - common_params.n_validators, - // Safe to leave an empty vec in place, as we're stopping the recovery process if - // this reconstruct fails. - std::mem::take(&mut state.received_chunks), - avilable_data_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let available_data_response = - available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - match available_data_response { - Ok(data) => { - let maybe_data = match common_params.post_recovery_check { - PostRecoveryCheck::Reencode => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); - None - }) - }, - PostRecoveryCheck::PovHash => - (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - pov_hash = ?common_params.pov_hash, - "Data recovery error - PoV hash mismatch", - ); - None - }), - }; - - if let Some(data) = maybe_data { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery from chunks complete", - ); - - Ok(data) - } else { - recovery_duration.map(|rd| rd.stop_and_discard()); - - Err(RecoveryError::Invalid) - } - }, - Err(err) => { - recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - ?err, - "Data recovery error ", - ); - - Err(RecoveryError::Invalid) - }, - } - } -} - -#[async_trait::async_trait] -impl RecoveryStrategy for FetchChunks { - fn display_name(&self) -> &'static str { - "Fetch chunks" - } - - async fn run( - &mut self, - state: &mut State, - sender: &mut Sender, - common_params: &RecoveryParams, - ) -> Result { - // First query the store for any chunks we've got. - if !common_params.bypass_availability_store { - let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; - self.validators.retain(|i| !local_chunk_indices.contains(i)); - } - - // No need to query the validators that have the chunks we already received. - self.validators.retain(|i| !state.received_chunks.contains_key(i)); - - loop { - // If received_chunks has more than threshold entries, attempt to recover the data. - // If that fails, or a re-encoding of it doesn't match the expected erasure root, - // return Err(RecoveryError::Invalid). - // Do this before requesting any chunks because we may have enough of them coming from - // past RecoveryStrategies. - if state.chunk_count() >= common_params.threshold { - return self.attempt_recovery(state, common_params).await - } - - if Self::is_unavailable( - self.validators.len(), - self.requesting_chunks.total_len(), - state.chunk_count(), - common_params.threshold, - ) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - received = %state.chunk_count(), - requesting = %self.requesting_chunks.len(), - total_requesting = %self.requesting_chunks.total_len(), - n_validators = %common_params.n_validators, - "Data recovery from chunks is not possible", - ); - - return Err(RecoveryError::Unavailable) - } - - let desired_requests_count = - self.get_desired_request_count(state.chunk_count(), common_params.threshold); - let already_requesting_count = self.requesting_chunks.len(); - gum::debug!( - target: LOG_TARGET, - ?common_params.candidate_hash, - ?desired_requests_count, - error_count= ?self.error_count, - total_received = ?self.total_received_responses, - threshold = ?common_params.threshold, - ?already_requesting_count, - "Requesting availability chunks for a candidate", - ); - state - .launch_parallel_chunk_requests( - common_params, - sender, - desired_requests_count, - &mut self.validators, - &mut self.requesting_chunks, - ) - .await; - - let (total_responses, error_count) = state - .wait_for_chunks( - common_params, - &mut self.validators, - &mut self.requesting_chunks, - |unrequested_validators, reqs, chunk_count, params, _error_count| { - chunk_count >= params.threshold || - Self::is_unavailable( - unrequested_validators, - reqs, - chunk_count, - params.threshold, - ) - }, - ) - .await; - - self.total_received_responses += total_responses; - self.error_count += error_count; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use polkadot_erasure_coding::recovery_threshold; - - #[test] - fn parallel_request_calculation_works_as_expected() { - let num_validators = 100; - let threshold = recovery_threshold(num_validators).unwrap(); - let (erasure_task_tx, _erasure_task_rx) = futures::channel::mpsc::channel(16); - - let mut fetch_chunks_task = - FetchChunks::new(FetchChunksParams { n_validators: 100, erasure_task_tx }); - assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); - fetch_chunks_task.error_count = 1; - fetch_chunks_task.total_received_responses = 1; - // We saturate at threshold (34): - assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); - - fetch_chunks_task.total_received_responses = 2; - // With given error rate - still saturating: - assert_eq!(fetch_chunks_task.get_desired_request_count(1, threshold), threshold); - fetch_chunks_task.total_received_responses += 8; - // error rate: 1/10 - // remaining chunks needed: threshold (34) - 9 - // expected: 24 * (1+ 1/10) = (next greater integer) = 27 - assert_eq!(fetch_chunks_task.get_desired_request_count(9, threshold), 27); - fetch_chunks_task.error_count = 0; - // With error count zero - we should fetch exactly as needed: - assert_eq!(fetch_chunks_task.get_desired_request_count(10, threshold), threshold - 10); - } -} diff --git a/polkadot/node/network/availability-recovery/src/task/mod.rs b/polkadot/node/network/availability-recovery/src/task/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..800a82947d6f30e0073646aa1760b405f637ec8f --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/mod.rs @@ -0,0 +1,197 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Main recovery task logic. Runs recovery strategies. + +#![warn(missing_docs)] + +mod strategy; + +pub use self::strategy::{ + FetchChunks, FetchChunksParams, FetchFull, FetchFullParams, FetchSystematicChunks, + FetchSystematicChunksParams, RecoveryStrategy, State, +}; + +#[cfg(test)] +pub use self::strategy::{REGULAR_CHUNKS_REQ_RETRY_LIMIT, SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT}; + +use crate::{metrics::Metrics, ErasureTask, PostRecoveryCheck, LOG_TARGET}; + +use parity_scale_codec::Encode; +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, overseer, RecoveryError}; +use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; +use sc_network::ProtocolName; + +use futures::channel::{mpsc, oneshot}; +use std::collections::VecDeque; + +/// Recovery parameters common to all strategies in a `RecoveryTask`. +#[derive(Clone)] +pub struct RecoveryParams { + /// Discovery ids of `validators`. + pub validator_authority_keys: Vec, + + /// Number of validators. + pub n_validators: usize, + + /// The number of regular chunks needed. + pub threshold: usize, + + /// The number of systematic chunks needed. + pub systematic_threshold: usize, + + /// A hash of the relevant candidate. + pub candidate_hash: CandidateHash, + + /// The root of the erasure encoding of the candidate. + pub erasure_root: Hash, + + /// Metrics to report. + pub metrics: Metrics, + + /// Do not request data from availability-store. Useful for collators. + pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, + + /// Protocol name for ChunkFetchingV1. + pub req_v1_protocol_name: ProtocolName, + + /// Protocol name for ChunkFetchingV2. + pub req_v2_protocol_name: ProtocolName, + + /// Whether or not chunk mapping is enabled. + pub chunk_mapping_enabled: bool, + + /// Channel to the erasure task handler. + pub erasure_task_tx: mpsc::Sender, +} + +/// A stateful reconstruction of availability data in reference to +/// a candidate hash. +pub struct RecoveryTask { + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + state: State, +} + +impl RecoveryTask +where + Sender: overseer::AvailabilityRecoverySenderTrait, +{ + /// Instantiate a new recovery task. + pub fn new( + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + ) -> Self { + Self { sender, params, strategies, state: State::new() } + } + + async fn in_availability_store(&mut self) -> Option { + if !self.params.bypass_availability_store { + let (tx, rx) = oneshot::channel(); + self.sender + .send_message(AvailabilityStoreMessage::QueryAvailableData( + self.params.candidate_hash, + tx, + )) + .await; + + match rx.await { + Ok(Some(data)) => return Some(data), + Ok(None) => {}, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Failed to reach the availability store", + ) + }, + } + } + + None + } + + /// Run this recovery task to completion. It will loop through the configured strategies + /// in-order and return whenever the first one recovers the full `AvailableData`. + pub async fn run(mut self) -> Result { + if let Some(data) = self.in_availability_store().await { + return Ok(data) + } + + self.params.metrics.on_recovery_started(); + + let _timer = self.params.metrics.time_full_recovery(); + + while let Some(current_strategy) = self.strategies.pop_front() { + let display_name = current_strategy.display_name(); + let strategy_type = current_strategy.strategy_type(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Starting `{}` strategy", + display_name + ); + + let res = current_strategy.run(&mut self.state, &mut self.sender, &self.params).await; + + match res { + Err(RecoveryError::Unavailable) => + if self.strategies.front().is_some() { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery strategy `{}` did not conclude. Trying the next one.", + display_name + ); + continue + }, + Err(err) => { + match &err { + RecoveryError::Invalid => + self.params.metrics.on_recovery_invalid(strategy_type), + _ => self.params.metrics.on_recovery_failed(strategy_type), + } + return Err(err) + }, + Ok(data) => { + self.params.metrics.on_recovery_succeeded(strategy_type, data.encoded_size()); + return Ok(data) + }, + } + } + + // We have no other strategies to try. + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?self.params.candidate_hash, + "Recovery of available data failed.", + ); + + self.params.metrics.on_recovery_failed("all"); + + Err(RecoveryError::Unavailable) + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs new file mode 100644 index 0000000000000000000000000000000000000000..b6376a5b543ed75d511571ec48d2fee8cac6a7d4 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs @@ -0,0 +1,335 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + futures_undead::FuturesUndead, + task::{ + strategy::{ + do_post_recovery_check, is_unavailable, OngoingRequests, N_PARALLEL, + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + }, + RecoveryParams, State, + }, + ErasureTask, RecoveryStrategy, LOG_TARGET, +}; + +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{overseer, RecoveryError}; +use polkadot_primitives::ValidatorIndex; + +use futures::{channel::oneshot, SinkExt}; +use rand::seq::SliceRandom; +use std::collections::VecDeque; + +/// Parameters specific to the `FetchChunks` strategy. +pub struct FetchChunksParams { + pub n_validators: usize, +} + +/// `RecoveryStrategy` that requests chunks from validators, in parallel. +pub struct FetchChunks { + /// How many requests have been unsuccessful so far. + error_count: usize, + /// Total number of responses that have been received, including failed ones. + total_received_responses: usize, + /// A shuffled array of validator indices. + validators: VecDeque, + /// Collection of in-flight requests. + requesting_chunks: OngoingRequests, +} + +impl FetchChunks { + /// Instantiate a new strategy. + pub fn new(params: FetchChunksParams) -> Self { + // Shuffle the validators to make sure that we don't request chunks from the same + // validators over and over. + let mut validators: VecDeque = + (0..params.n_validators).map(|i| ValidatorIndex(i as u32)).collect(); + validators.make_contiguous().shuffle(&mut rand::thread_rng()); + + Self { + error_count: 0, + total_received_responses: 0, + validators, + requesting_chunks: FuturesUndead::new(), + } + } + + fn is_unavailable( + unrequested_validators: usize, + in_flight_requests: usize, + chunk_count: usize, + threshold: usize, + ) -> bool { + is_unavailable(chunk_count, in_flight_requests, unrequested_validators, threshold) + } + + /// Desired number of parallel requests. + /// + /// For the given threshold (total required number of chunks) get the desired number of + /// requests we want to have running in parallel at this time. + fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { + // Upper bound for parallel requests. + // We want to limit this, so requests can be processed within the timeout and we limit the + // following feedback loop: + // 1. Requests fail due to timeout + // 2. We request more chunks to make up for it + // 3. Bandwidth is spread out even more, so we get even more timeouts + // 4. We request more chunks to make up for it ... + let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); + // How many chunks are still needed? + let remaining_chunks = threshold.saturating_sub(chunk_count); + // What is the current error rate, so we can make up for it? + let inv_error_rate = + self.total_received_responses.checked_div(self.error_count).unwrap_or(0); + // Actual number of requests we want to have in flight in parallel: + std::cmp::min( + max_requests_boundary, + remaining_chunks + remaining_chunks.checked_div(inv_error_rate).unwrap_or(0), + ) + } + + async fn attempt_recovery( + &mut self, + state: &mut State, + common_params: &RecoveryParams, + ) -> Result { + let recovery_duration = common_params + .metrics + .time_erasure_recovery(RecoveryStrategy::::strategy_type(self)); + + // Send request to reconstruct available data from chunks. + let (avilable_data_tx, available_data_rx) = oneshot::channel(); + + let mut erasure_task_tx = common_params.erasure_task_tx.clone(); + erasure_task_tx + .send(ErasureTask::Reconstruct( + common_params.n_validators, + // Safe to leave an empty vec in place, as we're stopping the recovery process if + // this reconstruct fails. + std::mem::take(&mut state.received_chunks) + .into_iter() + .map(|(c_index, chunk)| (c_index, chunk.chunk)) + .collect(), + avilable_data_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + let available_data_response = + available_data_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; + + match available_data_response { + // Attempt post-recovery check. + Ok(data) => do_post_recovery_check(common_params, data) + .await + .map_err(|e| { + recovery_duration.map(|rd| rd.stop_and_discard()); + e + }) + .map(|data| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery from chunks complete", + ); + data + }), + Err(err) => { + recovery_duration.map(|rd| rd.stop_and_discard()); + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + ?err, + "Data recovery error", + ); + + Err(RecoveryError::Invalid) + }, + } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchChunks { + fn display_name(&self) -> &'static str { + "Fetch chunks" + } + + fn strategy_type(&self) -> &'static str { + "regular_chunks" + } + + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + // First query the store for any chunks we've got. + if !common_params.bypass_availability_store { + let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; + self.validators.retain(|validator_index| { + !local_chunk_indices.iter().any(|(v_index, _)| v_index == validator_index) + }); + } + + // No need to query the validators that have the chunks we already received or that we know + // don't have the data from previous strategies. + self.validators.retain(|v_index| { + !state.received_chunks.values().any(|c| v_index == &c.validator_index) && + state.can_retry_request( + &(common_params.validator_authority_keys[v_index.0 as usize].clone(), *v_index), + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + ) + }); + + // Safe to `take` here, as we're consuming `self` anyway and we're not using the + // `validators` field in other methods. + let mut validators_queue: VecDeque<_> = std::mem::take(&mut self.validators) + .into_iter() + .map(|validator_index| { + ( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + validator_index, + ) + }) + .collect(); + + loop { + // If received_chunks has more than threshold entries, attempt to recover the data. + // If that fails, or a re-encoding of it doesn't match the expected erasure root, + // return Err(RecoveryError::Invalid). + // Do this before requesting any chunks because we may have enough of them coming from + // past RecoveryStrategies. + if state.chunk_count() >= common_params.threshold { + return self.attempt_recovery::(state, common_params).await + } + + if Self::is_unavailable( + validators_queue.len(), + self.requesting_chunks.total_len(), + state.chunk_count(), + common_params.threshold, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + received = %state.chunk_count(), + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + "Data recovery from chunks is not possible", + ); + + return Err(RecoveryError::Unavailable) + } + + let desired_requests_count = + self.get_desired_request_count(state.chunk_count(), common_params.threshold); + let already_requesting_count = self.requesting_chunks.len(); + gum::debug!( + target: LOG_TARGET, + ?common_params.candidate_hash, + ?desired_requests_count, + error_count= ?self.error_count, + total_received = ?self.total_received_responses, + threshold = ?common_params.threshold, + ?already_requesting_count, + "Requesting availability chunks for a candidate", + ); + + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + state + .launch_parallel_chunk_requests( + strategy_type, + common_params, + sender, + desired_requests_count, + &mut validators_queue, + &mut self.requesting_chunks, + ) + .await; + + let (total_responses, error_count) = state + .wait_for_chunks( + strategy_type, + common_params, + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + &mut validators_queue, + &mut self.requesting_chunks, + &mut vec![], + |unrequested_validators, + in_flight_reqs, + chunk_count, + _systematic_chunk_count| { + chunk_count >= common_params.threshold || + Self::is_unavailable( + unrequested_validators, + in_flight_reqs, + chunk_count, + common_params.threshold, + ) + }, + ) + .await; + + self.total_received_responses += total_responses; + self.error_count += error_count; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::recovery_threshold; + + #[test] + fn test_get_desired_request_count() { + let n_validators = 100; + let threshold = recovery_threshold(n_validators).unwrap(); + + let mut fetch_chunks_task = FetchChunks::new(FetchChunksParams { n_validators }); + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + fetch_chunks_task.error_count = 1; + fetch_chunks_task.total_received_responses = 1; + // We saturate at threshold (34): + assert_eq!(fetch_chunks_task.get_desired_request_count(0, threshold), threshold); + + // We saturate at the parallel limit. + assert_eq!(fetch_chunks_task.get_desired_request_count(0, N_PARALLEL + 2), N_PARALLEL); + + fetch_chunks_task.total_received_responses = 2; + // With given error rate - still saturating: + assert_eq!(fetch_chunks_task.get_desired_request_count(1, threshold), threshold); + fetch_chunks_task.total_received_responses = 10; + // error rate: 1/10 + // remaining chunks needed: threshold (34) - 9 + // expected: 24 * (1+ 1/10) = (next greater integer) = 27 + assert_eq!(fetch_chunks_task.get_desired_request_count(9, threshold), 27); + // We saturate at the parallel limit. + assert_eq!(fetch_chunks_task.get_desired_request_count(9, N_PARALLEL + 9), N_PARALLEL); + + fetch_chunks_task.error_count = 0; + // With error count zero - we should fetch exactly as needed: + assert_eq!(fetch_chunks_task.get_desired_request_count(10, threshold), threshold - 10); + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/full.rs b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs new file mode 100644 index 0000000000000000000000000000000000000000..1d7fbe8ea3c8da42ab3227750852c02d328b6dd8 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs @@ -0,0 +1,174 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + task::{RecoveryParams, RecoveryStrategy, State}, + ErasureTask, PostRecoveryCheck, LOG_TARGET, +}; + +use polkadot_node_network_protocol::request_response::{ + self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, +}; +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, overseer, RecoveryError}; +use polkadot_primitives::ValidatorIndex; +use sc_network::{IfDisconnected, OutboundFailure, RequestFailure}; + +use futures::{channel::oneshot, SinkExt}; +use rand::seq::SliceRandom; + +/// Parameters specific to the `FetchFull` strategy. +pub struct FetchFullParams { + /// Validators that will be used for fetching the data. + pub validators: Vec, +} + +/// `RecoveryStrategy` that sequentially tries to fetch the full `AvailableData` from +/// already-connected validators in the configured validator set. +pub struct FetchFull { + params: FetchFullParams, +} + +impl FetchFull { + /// Create a new `FetchFull` recovery strategy. + pub fn new(mut params: FetchFullParams) -> Self { + params.validators.shuffle(&mut rand::thread_rng()); + Self { params } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy for FetchFull { + fn display_name(&self) -> &'static str { + "Full recovery from backers" + } + + fn strategy_type(&self) -> &'static str { + "full_from_backers" + } + + async fn run( + mut self: Box, + _: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + loop { + // Pop the next validator. + let validator_index = + self.params.validators.pop().ok_or_else(|| RecoveryError::Unavailable)?; + + // Request data. + let (req, response) = OutgoingRequest::new( + Recipient::Authority( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + ), + req_res::v1::AvailableDataFetchingRequest { + candidate_hash: common_params.candidate_hash, + }, + ); + + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + vec![Requests::AvailableDataFetchingV1(req)], + IfDisconnected::ImmediateError, + )) + .await; + + common_params.metrics.on_full_request_issued(); + + match response.await { + Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { + let recovery_duration = + common_params.metrics.time_erasure_recovery(strategy_type); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + let mut erasure_task_tx = common_params.erasure_task_tx.clone(); + + erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; + + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); + + common_params.metrics.on_full_request_succeeded(); + return Ok(data) + }, + None => { + common_params.metrics.on_full_request_invalid(); + recovery_duration.map(|rd| rd.stop_and_discard()); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + } + }, + Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => { + common_params.metrics.on_full_request_no_such_data(); + }, + Err(e) => { + match &e { + RequestError::Canceled(_) => common_params.metrics.on_full_request_error(), + RequestError::InvalidResponse(_) => + common_params.metrics.on_full_request_invalid(), + RequestError::NetworkError(req_failure) => { + if let RequestFailure::Network(OutboundFailure::Timeout) = req_failure { + common_params.metrics.on_full_request_timeout(); + } else { + common_params.metrics.on_full_request_error(); + } + }, + }; + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + err = ?e, + "Error fetching full available data." + ); + }, + } + } + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..fb31ff6aa77920ff49d22571898e03fe93d9654b --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs @@ -0,0 +1,1558 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Recovery strategies. + +mod chunks; +mod full; +mod systematic; + +pub use self::{ + chunks::{FetchChunks, FetchChunksParams}, + full::{FetchFull, FetchFullParams}, + systematic::{FetchSystematicChunks, FetchSystematicChunksParams}, +}; +use crate::{ + futures_undead::FuturesUndead, ErasureTask, PostRecoveryCheck, RecoveryParams, LOG_TARGET, +}; + +use futures::{channel::oneshot, SinkExt}; +use parity_scale_codec::Decode; +use polkadot_erasure_coding::branch_hash; +#[cfg(not(test))] +use polkadot_node_network_protocol::request_response::CHUNK_REQUEST_TIMEOUT; +use polkadot_node_network_protocol::request_response::{ + self as req_res, outgoing::RequestError, OutgoingRequest, Recipient, Requests, +}; +use polkadot_node_primitives::{AvailableData, ErasureChunk}; +use polkadot_node_subsystem::{ + messages::{AvailabilityStoreMessage, NetworkBridgeTxMessage}, + overseer, RecoveryError, +}; +use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, ChunkIndex, HashT, ValidatorIndex}; +use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; +use std::{ + collections::{BTreeMap, HashMap, VecDeque}, + time::Duration, +}; + +// How many parallel chunk fetching requests should be running at once. +const N_PARALLEL: usize = 50; + +/// Time after which we consider a request to have failed +/// +/// and we should try more peers. Note in theory the request times out at the network level, +/// measurements have shown, that in practice requests might actually take longer to fail in +/// certain occasions. (The very least, authority discovery is not part of the timeout.) +/// +/// For the time being this value is the same as the timeout on the networking layer, but as this +/// timeout is more soft than the networking one, it might make sense to pick different values as +/// well. +#[cfg(not(test))] +const TIMEOUT_START_NEW_REQUESTS: Duration = CHUNK_REQUEST_TIMEOUT; +#[cfg(test)] +const TIMEOUT_START_NEW_REQUESTS: Duration = Duration::from_millis(100); + +/// The maximum number of times systematic chunk recovery will try making a request for a given +/// (validator,chunk) pair, if the error was not fatal. Added so that we don't get stuck in an +/// infinite retry loop. +pub const SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT: u32 = 2; +/// The maximum number of times regular chunk recovery will try making a request for a given +/// (validator,chunk) pair, if the error was not fatal. Added so that we don't get stuck in an +/// infinite retry loop. +pub const REGULAR_CHUNKS_REQ_RETRY_LIMIT: u32 = 5; + +// Helpful type alias for tracking ongoing chunk requests. +type OngoingRequests = FuturesUndead<( + AuthorityDiscoveryId, + ValidatorIndex, + Result<(Option, ProtocolName), RequestError>, +)>; + +const fn is_unavailable( + received_chunks: usize, + requesting_chunks: usize, + unrequested_validators: usize, + threshold: usize, +) -> bool { + received_chunks + requesting_chunks + unrequested_validators < threshold +} + +/// Check validity of a chunk. +fn is_chunk_valid(params: &RecoveryParams, chunk: &ErasureChunk) -> bool { + let anticipated_hash = + match branch_hash(¶ms.erasure_root, chunk.proof(), chunk.index.0 as usize) { + Ok(hash) => hash, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + error = ?e, + "Invalid Merkle proof", + ); + return false + }, + }; + let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk); + if anticipated_hash != erasure_chunk_hash { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + "Merkle proof mismatch" + ); + return false + } + true +} + +/// Perform the validity checks after recovery. +async fn do_post_recovery_check( + params: &RecoveryParams, + data: AvailableData, +) -> Result { + let mut erasure_task_tx = params.erasure_task_tx.clone(); + match params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + erasure_task_tx + .send(ErasureTask::Reencode( + params.n_validators, + params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.ok_or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + erasure_root = ?params.erasure_root, + "Data recovery error - root mismatch", + ); + RecoveryError::Invalid + }) + }, + PostRecoveryCheck::PovHash => { + let pov = data.pov.clone(); + (pov.hash() == params.pov_hash).then_some(data).ok_or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + expected_pov_hash = ?params.pov_hash, + actual_pov_hash = ?pov.hash(), + "Data recovery error - PoV hash mismatch", + ); + RecoveryError::Invalid + }) + }, + } +} + +#[async_trait::async_trait] +/// Common trait for runnable recovery strategies. +pub trait RecoveryStrategy: Send { + /// Main entry point of the strategy. + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result; + + /// Return the name of the strategy for logging purposes. + fn display_name(&self) -> &'static str; + + /// Return the strategy type for use as a metric label. + fn strategy_type(&self) -> &'static str; +} + +/// Utility type used for recording the result of requesting a chunk from a validator. +enum ErrorRecord { + NonFatal(u32), + Fatal, +} + +/// Helper struct used for the `received_chunks` mapping. +/// Compared to `ErasureChunk`, it doesn't need to hold the `ChunkIndex` (because it's the key used +/// for the map) and proof, but needs to hold the `ValidatorIndex` instead. +struct Chunk { + /// The erasure-encoded chunk of data belonging to the candidate block. + chunk: Vec, + /// The validator index that corresponds to this chunk. Not always the same as the chunk index. + validator_index: ValidatorIndex, +} + +/// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the +/// same `RecoveryTask`. +pub struct State { + /// Chunks received so far. + /// This MUST be a `BTreeMap` in order for systematic recovery to work (the algorithm assumes + /// that chunks are ordered by their index). If we ever switch this to some non-ordered + /// collection, we need to add a sort step to the systematic recovery. + received_chunks: BTreeMap, + + /// A record of errors returned when requesting a chunk from a validator. + recorded_errors: HashMap<(AuthorityDiscoveryId, ValidatorIndex), ErrorRecord>, +} + +impl State { + pub fn new() -> Self { + Self { received_chunks: BTreeMap::new(), recorded_errors: HashMap::new() } + } + + fn insert_chunk(&mut self, chunk_index: ChunkIndex, chunk: Chunk) { + self.received_chunks.insert(chunk_index, chunk); + } + + fn chunk_count(&self) -> usize { + self.received_chunks.len() + } + + fn systematic_chunk_count(&self, systematic_threshold: usize) -> usize { + self.received_chunks + .range(ChunkIndex(0)..ChunkIndex(systematic_threshold as u32)) + .count() + } + + fn record_error_fatal( + &mut self, + authority_id: AuthorityDiscoveryId, + validator_index: ValidatorIndex, + ) { + self.recorded_errors.insert((authority_id, validator_index), ErrorRecord::Fatal); + } + + fn record_error_non_fatal( + &mut self, + authority_id: AuthorityDiscoveryId, + validator_index: ValidatorIndex, + ) { + self.recorded_errors + .entry((authority_id, validator_index)) + .and_modify(|record| { + if let ErrorRecord::NonFatal(ref mut count) = record { + *count = count.saturating_add(1); + } + }) + .or_insert(ErrorRecord::NonFatal(1)); + } + + fn can_retry_request( + &self, + key: &(AuthorityDiscoveryId, ValidatorIndex), + retry_threshold: u32, + ) -> bool { + match self.recorded_errors.get(key) { + None => true, + Some(entry) => match entry { + ErrorRecord::Fatal => false, + ErrorRecord::NonFatal(count) if *count < retry_threshold => true, + ErrorRecord::NonFatal(_) => false, + }, + } + } + + /// Retrieve the local chunks held in the av-store (should be either 0 or 1). + async fn populate_from_av_store( + &mut self, + params: &RecoveryParams, + sender: &mut Sender, + ) -> Vec<(ValidatorIndex, ChunkIndex)> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx)) + .await; + + match rx.await { + Ok(chunks) => { + // This should either be length 1 or 0. If we had the whole data, + // we wouldn't have reached this stage. + let chunk_indices: Vec<_> = chunks + .iter() + .map(|(validator_index, chunk)| (*validator_index, chunk.index)) + .collect(); + + for (validator_index, chunk) in chunks { + if is_chunk_valid(params, &chunk) { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + chunk_index = ?chunk.index, + "Found valid chunk on disk" + ); + self.insert_chunk( + chunk.index, + Chunk { chunk: chunk.chunk, validator_index }, + ); + } else { + gum::error!( + target: LOG_TARGET, + "Loaded invalid chunk from disk! Disk/Db corruption _very_ likely - please fix ASAP!" + ); + }; + } + + chunk_indices + }, + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + "Failed to reach the availability store" + ); + + vec![] + }, + } + } + + /// Launch chunk requests in parallel, according to the parameters. + async fn launch_parallel_chunk_requests( + &mut self, + strategy_type: &str, + params: &RecoveryParams, + sender: &mut Sender, + desired_requests_count: usize, + validators: &mut VecDeque<(AuthorityDiscoveryId, ValidatorIndex)>, + requesting_chunks: &mut OngoingRequests, + ) where + Sender: overseer::AvailabilityRecoverySenderTrait, + { + let candidate_hash = params.candidate_hash; + let already_requesting_count = requesting_chunks.len(); + + let to_launch = desired_requests_count - already_requesting_count; + let mut requests = Vec::with_capacity(to_launch); + + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + "Attempting to launch {} requests", + to_launch + ); + + while requesting_chunks.len() < desired_requests_count { + if let Some((authority_id, validator_index)) = validators.pop_back() { + gum::trace!( + target: LOG_TARGET, + ?authority_id, + ?validator_index, + ?candidate_hash, + "Requesting chunk", + ); + + // Request data. + let raw_request_v2 = + req_res::v2::ChunkFetchingRequest { candidate_hash, index: validator_index }; + let raw_request_v1 = req_res::v1::ChunkFetchingRequest::from(raw_request_v2); + + let (req, res) = OutgoingRequest::new_with_fallback( + Recipient::Authority(authority_id.clone()), + raw_request_v2, + raw_request_v1, + ); + requests.push(Requests::ChunkFetching(req)); + + params.metrics.on_chunk_request_issued(strategy_type); + let timer = params.metrics.time_chunk_request(strategy_type); + let v1_protocol_name = params.req_v1_protocol_name.clone(); + let v2_protocol_name = params.req_v2_protocol_name.clone(); + + let chunk_mapping_enabled = params.chunk_mapping_enabled; + let authority_id_clone = authority_id.clone(); + + requesting_chunks.push(Box::pin(async move { + let _timer = timer; + let res = match res.await { + Ok((bytes, protocol)) => + if v2_protocol_name == protocol { + match req_res::v2::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(req_res::v2::ChunkFetchingResponse::Chunk(chunk)) => + Ok((Some(chunk.into()), protocol)), + Ok(req_res::v2::ChunkFetchingResponse::NoSuchChunk) => + Ok((None, protocol)), + Err(e) => Err(RequestError::InvalidResponse(e)), + } + } else if v1_protocol_name == protocol { + // V1 protocol version must not be used when chunk mapping node + // feature is enabled, because we can't know the real index of the + // returned chunk. + // This case should never be reached as long as the + // `AvailabilityChunkMapping` feature is only enabled after the + // v1 version is removed. Still, log this. + if chunk_mapping_enabled { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + authority_id = ?authority_id_clone, + "Another validator is responding on /req_chunk/1 protocol while the availability chunk \ + mapping feature is enabled in the runtime. All validators must switch to /req_chunk/2." + ); + } + + match req_res::v1::ChunkFetchingResponse::decode(&mut &bytes[..]) { + Ok(req_res::v1::ChunkFetchingResponse::Chunk(chunk)) => Ok(( + Some(chunk.recombine_into_chunk(&raw_request_v1)), + protocol, + )), + Ok(req_res::v1::ChunkFetchingResponse::NoSuchChunk) => + Ok((None, protocol)), + Err(e) => Err(RequestError::InvalidResponse(e)), + } + } else { + Err(RequestError::NetworkError(RequestFailure::UnknownProtocol)) + }, + + Err(e) => Err(e), + }; + + (authority_id, validator_index, res) + })); + } else { + break + } + } + + if requests.len() != 0 { + sender + .send_message(NetworkBridgeTxMessage::SendRequests( + requests, + IfDisconnected::TryConnect, + )) + .await; + } + } + + /// Wait for a sufficient amount of chunks to reconstruct according to the provided `params`. + async fn wait_for_chunks( + &mut self, + strategy_type: &str, + params: &RecoveryParams, + retry_threshold: u32, + validators: &mut VecDeque<(AuthorityDiscoveryId, ValidatorIndex)>, + requesting_chunks: &mut OngoingRequests, + // If supplied, these validators will be used as a backup for requesting chunks. They + // should hold all chunks. Each of them will only be used to query one chunk. + backup_validators: &mut Vec, + // Function that returns `true` when this strategy can conclude. Either if we got enough + // chunks or if it's impossible. + mut can_conclude: impl FnMut( + // Number of validators left in the queue + usize, + // Number of in flight requests + usize, + // Number of valid chunks received so far + usize, + // Number of valid systematic chunks received so far + usize, + ) -> bool, + ) -> (usize, usize) { + let metrics = ¶ms.metrics; + + let mut total_received_responses = 0; + let mut error_count = 0; + + // Wait for all current requests to conclude or time-out, or until we reach enough chunks. + // We also declare requests undead, once `TIMEOUT_START_NEW_REQUESTS` is reached and will + // return in that case for `launch_parallel_requests` to fill up slots again. + while let Some(res) = requesting_chunks.next_with_timeout(TIMEOUT_START_NEW_REQUESTS).await + { + total_received_responses += 1; + + let (authority_id, validator_index, request_result) = res; + + let mut is_error = false; + + match request_result { + Ok((maybe_chunk, protocol)) => { + match protocol { + name if name == params.req_v1_protocol_name => + params.metrics.on_chunk_response_v1(), + name if name == params.req_v2_protocol_name => + params.metrics.on_chunk_response_v2(), + _ => {}, + } + + match maybe_chunk { + Some(chunk) => + if is_chunk_valid(params, &chunk) { + metrics.on_chunk_request_succeeded(strategy_type); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?authority_id, + ?validator_index, + "Received valid chunk", + ); + self.insert_chunk( + chunk.index, + Chunk { chunk: chunk.chunk, validator_index }, + ); + } else { + metrics.on_chunk_request_invalid(strategy_type); + error_count += 1; + // Record that we got an invalid chunk so that subsequent strategies + // don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + is_error = true; + }, + None => { + metrics.on_chunk_request_no_such_chunk(strategy_type); + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?authority_id, + ?validator_index, + "Validator did not have the chunk", + ); + error_count += 1; + // Record that the validator did not have this chunk so that subsequent + // strategies don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + is_error = true; + }, + } + }, + Err(err) => { + error_count += 1; + + gum::trace!( + target: LOG_TARGET, + candidate_hash= ?params.candidate_hash, + ?err, + ?authority_id, + ?validator_index, + "Failure requesting chunk", + ); + + is_error = true; + + match err { + RequestError::InvalidResponse(_) => { + metrics.on_chunk_request_invalid(strategy_type); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?params.candidate_hash, + ?err, + ?authority_id, + ?validator_index, + "Chunk fetching response was invalid", + ); + + // Record that we got an invalid chunk so that this or + // subsequent strategies don't try requesting this again. + self.record_error_fatal(authority_id.clone(), validator_index); + }, + RequestError::NetworkError(err) => { + // No debug logs on general network errors - that became very + // spammy occasionally. + if let RequestFailure::Network(OutboundFailure::Timeout) = err { + metrics.on_chunk_request_timeout(strategy_type); + } else { + metrics.on_chunk_request_error(strategy_type); + } + + // Record that we got a non-fatal error so that this or + // subsequent strategies will retry requesting this only a + // limited number of times. + self.record_error_non_fatal(authority_id.clone(), validator_index); + }, + RequestError::Canceled(_) => { + metrics.on_chunk_request_error(strategy_type); + + // Record that we got a non-fatal error so that this or + // subsequent strategies will retry requesting this only a + // limited number of times. + self.record_error_non_fatal(authority_id.clone(), validator_index); + }, + } + }, + } + + if is_error { + // First, see if we can retry the request. + if self.can_retry_request(&(authority_id.clone(), validator_index), retry_threshold) + { + validators.push_front((authority_id, validator_index)); + } else { + // Otherwise, try requesting from a backer as a backup, if we've not already + // requested the same chunk from it. + + let position = backup_validators.iter().position(|v| { + !self.recorded_errors.contains_key(&(v.clone(), validator_index)) + }); + if let Some(position) = position { + // Use swap_remove because it's faster and we don't care about order here. + let backer = backup_validators.swap_remove(position); + validators.push_front((backer, validator_index)); + } + } + } + + if can_conclude( + validators.len(), + requesting_chunks.total_len(), + self.chunk_count(), + self.systematic_chunk_count(params.systematic_threshold), + ) { + gum::debug!( + target: LOG_TARGET, + validators_len = validators.len(), + candidate_hash = ?params.candidate_hash, + received_chunks_count = ?self.chunk_count(), + requested_chunks_count = ?requesting_chunks.len(), + threshold = ?params.threshold, + "Can conclude availability recovery strategy", + ); + break + } + } + + (total_received_responses, error_count) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{tests::*, Metrics, RecoveryStrategy, RecoveryTask}; + use assert_matches::assert_matches; + use futures::{ + channel::mpsc::{self, UnboundedReceiver}, + executor, future, Future, FutureExt, StreamExt, + }; + use parity_scale_codec::Error as DecodingError; + use polkadot_erasure_coding::{recovery_threshold, systematic_recovery_threshold}; + use polkadot_node_network_protocol::request_response::Protocol; + use polkadot_node_primitives::{BlockData, PoV}; + use polkadot_node_subsystem::{AllMessages, TimeoutExt}; + use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, sender_receiver, TestSubsystemSender, + }; + use polkadot_primitives::{CandidateHash, HeadData, PersistedValidationData}; + use polkadot_primitives_test_helpers::dummy_hash; + use sp_keyring::Sr25519Keyring; + use std::sync::Arc; + + const TIMEOUT: Duration = Duration::from_secs(1); + + impl Default for RecoveryParams { + fn default() -> Self { + let validators = vec![ + Sr25519Keyring::Ferdie, + Sr25519Keyring::Alice.into(), + Sr25519Keyring::Bob.into(), + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::One, + Sr25519Keyring::Two, + ]; + let (erasure_task_tx, _erasure_task_rx) = mpsc::channel(10); + + Self { + validator_authority_keys: validator_authority_id(&validators), + n_validators: validators.len(), + threshold: recovery_threshold(validators.len()).unwrap(), + systematic_threshold: systematic_recovery_threshold(validators.len()).unwrap(), + candidate_hash: CandidateHash(dummy_hash()), + erasure_root: dummy_hash(), + metrics: Metrics::new_dummy(), + bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, + pov_hash: dummy_hash(), + req_v1_protocol_name: "/req_chunk/1".into(), + req_v2_protocol_name: "/req_chunk/2".into(), + chunk_mapping_enabled: true, + erasure_task_tx, + } + } + } + + impl RecoveryParams { + fn create_chunks(&mut self) -> Vec { + let available_data = dummy_available_data(); + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + self.n_validators, + &available_data, + |_, _| {}, + ); + + self.erasure_root = erasure_root; + self.pov_hash = available_data.pov.hash(); + + chunks + } + } + + fn dummy_available_data() -> AvailableData { + let validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + AvailableData { + validation_data, + pov: Arc::new(PoV { block_data: BlockData(vec![42; 64]) }), + } + } + + fn test_harness, TestFut: Future>( + receiver_future: impl FnOnce(UnboundedReceiver) -> RecvFut, + test: impl FnOnce(TestSubsystemSender) -> TestFut, + ) { + let (sender, receiver) = sender_receiver(); + + let test_fut = test(sender); + let receiver_future = receiver_future(receiver); + + futures::pin_mut!(test_fut); + futures::pin_mut!(receiver_future); + + executor::block_on(future::join(test_fut, receiver_future)).1 + } + + #[test] + fn test_recorded_errors() { + let retry_threshold = 2; + let mut state = State::new(); + + let alice = Sr25519Keyring::Alice.public(); + let bob = Sr25519Keyring::Bob.public(); + let eve = Sr25519Keyring::Eve.public(); + + assert!(state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + assert!(state.can_retry_request(&(alice.into(), 0.into()), 0)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(!state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + state.record_error_non_fatal(alice.into(), 0.into()); + assert!(!state.can_retry_request(&(alice.into(), 0.into()), retry_threshold)); + + assert!(state.can_retry_request(&(alice.into(), 0.into()), 5)); + + state.record_error_fatal(bob.into(), 1.into()); + assert!(!state.can_retry_request(&(bob.into(), 1.into()), retry_threshold)); + state.record_error_non_fatal(bob.into(), 1.into()); + assert!(!state.can_retry_request(&(bob.into(), 1.into()), retry_threshold)); + + assert!(state.can_retry_request(&(eve.into(), 4.into()), 0)); + assert!(state.can_retry_request(&(eve.into(), 4.into()), retry_threshold)); + } + + #[test] + fn test_populate_from_av_store() { + let params = RecoveryParams::default(); + + // Failed to reach the av store + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + drop(tx); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 0); + assert_eq!(local_chunk_indices.len(), 0); + }, + ); + } + + // Found invalid chunk + { + let mut params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + let chunks = params.create_chunks(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + let mut chunk = chunks[0].clone(); + chunk.index = 3.into(); + tx.send(vec![(2.into(), chunk)]).unwrap(); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 0); + assert_eq!(local_chunk_indices.len(), 1); + }, + ); + } + + // Found valid chunk + { + let mut params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut state = State::new(); + let chunks = params.create_chunks(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAllChunks(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(vec![(4.into(), chunks[1].clone())]).unwrap(); + }); + }, + |mut sender| async move { + let local_chunk_indices = + state.populate_from_av_store(¶ms, &mut sender).await; + + assert_eq!(state.chunk_count(), 1); + assert_eq!(local_chunk_indices.len(), 1); + }, + ); + } + } + + #[test] + fn test_launch_parallel_chunk_requests() { + let params = RecoveryParams::default(); + let alice: AuthorityDiscoveryId = Sr25519Keyring::Alice.public().into(); + let bob: AuthorityDiscoveryId = Sr25519Keyring::Bob.public().into(); + let eve: AuthorityDiscoveryId = Sr25519Keyring::Eve.public().into(); + + // No validators to request from. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 3, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 0); + }, + ); + } + + // Has validators but no need to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), ValidatorIndex(1))); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 0, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 0); + }, + ); + } + + // Has validators but no need to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push(async { todo!() }.boxed()); + ongoing_reqs.soft_cancel(); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), ValidatorIndex(1))); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 0, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 1); + assert_eq!(ongoing_reqs.len(), 0); + }, + ); + } + + // Needs to request more. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push(async { todo!() }.boxed()); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push(async { todo!() }.boxed()); + let mut validators = VecDeque::new(); + validators.push_back((alice.clone(), 0.into())); + validators.push_back((bob, 1.into())); + validators.push_back((eve, 2.into())); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(requests, _)) if requests.len() +== 3 ); + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 10, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 5); + assert_eq!(ongoing_reqs.len(), 4); + }, + ); + } + + // Check network protocol versioning. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + validators.push_back((alice, 0.into())); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + match receiver.next().timeout(TIMEOUT).await.unwrap().unwrap() { + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests( + mut requests, + _, + )) => { + assert_eq!(requests.len(), 1); + // By default, we should use the new protocol version with a fallback on + // the older one. + let (protocol, request) = requests.remove(0).encode_request(); + assert_eq!(protocol, Protocol::ChunkFetchingV2); + assert_eq!( + request.fallback_request.unwrap().1, + Protocol::ChunkFetchingV1 + ); + }, + _ => unreachable!(), + } + }, + |mut sender| async move { + state + .launch_parallel_chunk_requests( + "regular", + ¶ms, + &mut sender, + 10, + &mut validators, + &mut ongoing_reqs, + ) + .await; + + assert_eq!(ongoing_reqs.total_len(), 1); + assert_eq!(ongoing_reqs.len(), 1); + }, + ); + } + } + + #[test] + fn test_wait_for_chunks() { + let params = RecoveryParams::default(); + let retry_threshold = 2; + + // No ongoing requests. + { + let params = params.clone(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + let mut validators = VecDeque::new(); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 0); + assert_eq!(error_count, 0); + assert_eq!(state.chunk_count(), 0); + }, + ); + } + + // Complex scenario. + { + let mut params = params.clone(); + let chunks = params.create_chunks(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[0].clone(), + 0.into(), + Ok((Some(chunks[0].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[1].clone(), + 1.into(), + Ok((Some(chunks[1].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[2].clone(), + 2.into(), + Ok((None, "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[3].clone(), + 3.into(), + Err(RequestError::from(DecodingError::from("err"))), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let mut validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 5); + assert_eq!(error_count, 3); + assert_eq!(state.chunk_count(), 2); + + let mut expected_validators: VecDeque<_> = (4..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + assert_eq!(validators, expected_validators); + + // This time we'll go over the recoverable error threshold. + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 1); + assert_eq!(error_count, 1); + assert_eq!(state.chunk_count(), 2); + + validators.pop_front(); + let mut expected_validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::AliceStash.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + assert_eq!(validators, expected_validators); + + // Check that can_conclude returning true terminates the loop. + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut vec![], + |_, _, _, _| true, + ) + .await; + assert_eq!(total_responses, 0); + assert_eq!(error_count, 0); + assert_eq!(state.chunk_count(), 2); + + assert_eq!(validators, expected_validators); + }, + ); + } + + // Complex scenario with backups in the backing group. + { + let mut params = params.clone(); + let chunks = params.create_chunks(); + let mut state = State::new(); + let mut ongoing_reqs = OngoingRequests::new(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[0].clone(), + 0.into(), + Ok((Some(chunks[0].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.soft_cancel(); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[1].clone(), + 1.into(), + Ok((Some(chunks[1].clone()), "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[2].clone(), + 2.into(), + Ok((None, "".into())), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[3].clone(), + 3.into(), + Err(RequestError::from(DecodingError::from("err"))), + )) + .boxed(), + ); + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + let mut validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + validators.push_back(( + Sr25519Keyring::Eve.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + + let mut backup_backers = vec![ + params.validator_authority_keys[2].clone(), + params.validator_authority_keys[0].clone(), + params.validator_authority_keys[4].clone(), + params.validator_authority_keys[3].clone(), + Sr25519Keyring::AliceStash.public().into(), + Sr25519Keyring::BobStash.public().into(), + ]; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |_| async move { + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut backup_backers, + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 5); + assert_eq!(error_count, 3); + assert_eq!(state.chunk_count(), 2); + + let mut expected_validators: VecDeque<_> = (5..params.n_validators as u32) + .map(|i| (params.validator_authority_keys[i as usize].clone(), i.into())) + .collect(); + expected_validators.push_back(( + Sr25519Keyring::Eve.public().into(), + ValidatorIndex(params.n_validators as u32), + )); + // We picked a backer as a backup for chunks 2 and 3. + expected_validators + .push_front((params.validator_authority_keys[0].clone(), 2.into())); + expected_validators + .push_front((params.validator_authority_keys[2].clone(), 3.into())); + expected_validators + .push_front((params.validator_authority_keys[4].clone(), 4.into())); + + assert_eq!(validators, expected_validators); + + // This time we'll go over the recoverable error threshold for chunk 4. + ongoing_reqs.push( + future::ready(( + params.validator_authority_keys[4].clone(), + 4.into(), + Err(RequestError::NetworkError(RequestFailure::NotConnected)), + )) + .boxed(), + ); + + validators.pop_front(); + + let (total_responses, error_count) = state + .wait_for_chunks( + "regular", + ¶ms, + retry_threshold, + &mut validators, + &mut ongoing_reqs, + &mut backup_backers, + |_, _, _, _| false, + ) + .await; + assert_eq!(total_responses, 1); + assert_eq!(error_count, 1); + assert_eq!(state.chunk_count(), 2); + + expected_validators.pop_front(); + expected_validators + .push_front((Sr25519Keyring::AliceStash.public().into(), 4.into())); + + assert_eq!(validators, expected_validators); + }, + ); + } + } + + #[test] + fn test_recovery_strategy_run() { + let params = RecoveryParams::default(); + + struct GoodStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy for GoodStrategy { + fn display_name(&self) -> &'static str { + "GoodStrategy" + } + + fn strategy_type(&self) -> &'static str { + "good_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Ok(dummy_available_data()) + } + } + + struct UnavailableStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy + for UnavailableStrategy + { + fn display_name(&self) -> &'static str { + "UnavailableStrategy" + } + + fn strategy_type(&self) -> &'static str { + "unavailable_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Err(RecoveryError::Unavailable) + } + } + + struct InvalidStrategy; + #[async_trait::async_trait] + impl RecoveryStrategy + for InvalidStrategy + { + fn display_name(&self) -> &'static str { + "InvalidStrategy" + } + + fn strategy_type(&self) -> &'static str { + "invalid_strategy" + } + + async fn run( + mut self: Box, + _state: &mut State, + _sender: &mut Sender, + _common_params: &RecoveryParams, + ) -> Result { + Err(RecoveryError::Invalid) + } + } + + // No recovery strategies. + { + let mut params = params.clone(); + let strategies = VecDeque::new(); + params.bypass_availability_store = true; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap_err(), RecoveryError::Unavailable); + }, + ); + } + + // If we have the data in av-store, returns early. + { + let params = params.clone(); + let strategies = VecDeque::new(); + let candidate_hash = params.candidate_hash; + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + + // Strategy returning `RecoveryError::Invalid`` will short-circuit the entire task. + { + let mut params = params.clone(); + params.bypass_availability_store = true; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(InvalidStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + // Shouldn't send any requests. + assert!(receiver.next().timeout(TIMEOUT).await.unwrap().is_none()); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap_err(), RecoveryError::Invalid); + }, + ); + } + + // Strategy returning `Unavailable` will fall back to the next one. + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + + // More complex scenario. + { + let params = params.clone(); + let candidate_hash = params.candidate_hash; + let mut strategies: VecDeque>> = + VecDeque::new(); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(UnavailableStrategy)); + strategies.push_back(Box::new(GoodStrategy)); + strategies.push_back(Box::new(InvalidStrategy)); + + test_harness( + |mut receiver: UnboundedReceiver| async move { + assert_matches!( + receiver.next().timeout(TIMEOUT).await.unwrap().unwrap(), + AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryAvailableData(hash, tx)) => { + assert_eq!(hash, candidate_hash); + tx.send(Some(dummy_available_data())).unwrap(); + }); + }, + |sender| async move { + let task = RecoveryTask::new(sender, params, strategies); + + assert_eq!(task.run().await.unwrap(), dummy_available_data()); + }, + ); + } + } + + #[test] + fn test_is_unavailable() { + assert_eq!(is_unavailable(0, 0, 0, 0), false); + assert_eq!(is_unavailable(2, 2, 2, 0), false); + // Already reached the threshold. + assert_eq!(is_unavailable(3, 0, 10, 3), false); + assert_eq!(is_unavailable(3, 2, 0, 3), false); + assert_eq!(is_unavailable(3, 2, 10, 3), false); + // It's still possible to reach the threshold + assert_eq!(is_unavailable(0, 0, 10, 3), false); + assert_eq!(is_unavailable(0, 0, 3, 3), false); + assert_eq!(is_unavailable(1, 1, 1, 3), false); + // Not possible to reach the threshold + assert_eq!(is_unavailable(0, 0, 0, 3), true); + assert_eq!(is_unavailable(2, 3, 2, 10), true); + } +} diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs new file mode 100644 index 0000000000000000000000000000000000000000..677bc2d1375aa75900123c39f1fbe59c8902ea69 --- /dev/null +++ b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs @@ -0,0 +1,343 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + futures_undead::FuturesUndead, + task::{ + strategy::{ + do_post_recovery_check, is_unavailable, OngoingRequests, N_PARALLEL, + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + }, + RecoveryParams, RecoveryStrategy, State, + }, + LOG_TARGET, +}; + +use polkadot_node_primitives::AvailableData; +use polkadot_node_subsystem::{overseer, RecoveryError}; +use polkadot_primitives::{ChunkIndex, ValidatorIndex}; + +use std::collections::VecDeque; + +/// Parameters needed for fetching systematic chunks. +pub struct FetchSystematicChunksParams { + /// Validators that hold the systematic chunks. + pub validators: Vec<(ChunkIndex, ValidatorIndex)>, + /// Validators in the backing group, to be used as a backup for requesting systematic chunks. + pub backers: Vec, +} + +/// `RecoveryStrategy` that attempts to recover the systematic chunks from the validators that +/// hold them, in order to bypass the erasure code reconstruction step, which is costly. +pub struct FetchSystematicChunks { + /// Systematic recovery threshold. + threshold: usize, + /// Validators that hold the systematic chunks. + validators: Vec<(ChunkIndex, ValidatorIndex)>, + /// Backers to be used as a backup. + backers: Vec, + /// Collection of in-flight requests. + requesting_chunks: OngoingRequests, +} + +impl FetchSystematicChunks { + /// Instantiate a new systematic chunks strategy. + pub fn new(params: FetchSystematicChunksParams) -> Self { + Self { + threshold: params.validators.len(), + validators: params.validators, + backers: params.backers, + requesting_chunks: FuturesUndead::new(), + } + } + + fn is_unavailable( + unrequested_validators: usize, + in_flight_requests: usize, + systematic_chunk_count: usize, + threshold: usize, + ) -> bool { + is_unavailable( + systematic_chunk_count, + in_flight_requests, + unrequested_validators, + threshold, + ) + } + + /// Desired number of parallel requests. + /// + /// For the given threshold (total required number of chunks) get the desired number of + /// requests we want to have running in parallel at this time. + fn get_desired_request_count(&self, chunk_count: usize, threshold: usize) -> usize { + // Upper bound for parallel requests. + let max_requests_boundary = std::cmp::min(N_PARALLEL, threshold); + // How many chunks are still needed? + let remaining_chunks = threshold.saturating_sub(chunk_count); + // Actual number of requests we want to have in flight in parallel: + // We don't have to make up for any error rate, as an error fetching a systematic chunk + // results in failure of the entire strategy. + std::cmp::min(max_requests_boundary, remaining_chunks) + } + + async fn attempt_systematic_recovery( + &mut self, + state: &mut State, + common_params: &RecoveryParams, + ) -> Result { + let strategy_type = RecoveryStrategy::::strategy_type(self); + let recovery_duration = common_params.metrics.time_erasure_recovery(strategy_type); + let reconstruct_duration = common_params.metrics.time_erasure_reconstruct(strategy_type); + let chunks = state + .received_chunks + .range( + ChunkIndex(0).. + ChunkIndex( + u32::try_from(self.threshold) + .expect("validator count should not exceed u32"), + ), + ) + .map(|(_, chunk)| chunk.chunk.clone()) + .collect::>(); + + let available_data = polkadot_erasure_coding::reconstruct_from_systematic_v1( + common_params.n_validators, + chunks, + ); + + match available_data { + Ok(data) => { + drop(reconstruct_duration); + + // Attempt post-recovery check. + do_post_recovery_check(common_params, data) + .await + .map_err(|e| { + recovery_duration.map(|rd| rd.stop_and_discard()); + e + }) + .map(|data| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery from systematic chunks complete", + ); + data + }) + }, + Err(err) => { + reconstruct_duration.map(|rd| rd.stop_and_discard()); + recovery_duration.map(|rd| rd.stop_and_discard()); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + ?err, + "Systematic data recovery error", + ); + + Err(RecoveryError::Invalid) + }, + } + } +} + +#[async_trait::async_trait] +impl RecoveryStrategy + for FetchSystematicChunks +{ + fn display_name(&self) -> &'static str { + "Fetch systematic chunks" + } + + fn strategy_type(&self) -> &'static str { + "systematic_chunks" + } + + async fn run( + mut self: Box, + state: &mut State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result { + // First query the store for any chunks we've got. + if !common_params.bypass_availability_store { + let local_chunk_indices = state.populate_from_av_store(common_params, sender).await; + + for (_, our_c_index) in &local_chunk_indices { + // If we are among the systematic validators but hold an invalid chunk, we cannot + // perform the systematic recovery. Fall through to the next strategy. + if self.validators.iter().any(|(c_index, _)| c_index == our_c_index) && + !state.received_chunks.contains_key(our_c_index) + { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + chunk_index = ?our_c_index, + "Systematic chunk recovery is not possible. We are among the systematic validators but hold an invalid chunk", + ); + return Err(RecoveryError::Unavailable) + } + } + } + + // No need to query the validators that have the chunks we already received or that we know + // don't have the data from previous strategies. + self.validators.retain(|(c_index, v_index)| { + !state.received_chunks.contains_key(c_index) && + state.can_retry_request( + &(common_params.validator_authority_keys[v_index.0 as usize].clone(), *v_index), + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + ) + }); + + let mut systematic_chunk_count = state + .received_chunks + .range(ChunkIndex(0)..ChunkIndex(self.threshold as u32)) + .count(); + + // Safe to `take` here, as we're consuming `self` anyway and we're not using the + // `validators` or `backers` fields in other methods. + let mut validators_queue: VecDeque<_> = std::mem::take(&mut self.validators) + .into_iter() + .map(|(_, validator_index)| { + ( + common_params.validator_authority_keys[validator_index.0 as usize].clone(), + validator_index, + ) + }) + .collect(); + let mut backers: Vec<_> = std::mem::take(&mut self.backers) + .into_iter() + .map(|validator_index| { + common_params.validator_authority_keys[validator_index.0 as usize].clone() + }) + .collect(); + + loop { + // If received_chunks has `systematic_chunk_threshold` entries, attempt to recover the + // data. + if systematic_chunk_count >= self.threshold { + return self.attempt_systematic_recovery::(state, common_params).await + } + + if Self::is_unavailable( + validators_queue.len(), + self.requesting_chunks.total_len(), + systematic_chunk_count, + self.threshold, + ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + %systematic_chunk_count, + requesting = %self.requesting_chunks.len(), + total_requesting = %self.requesting_chunks.total_len(), + n_validators = %common_params.n_validators, + systematic_threshold = ?self.threshold, + "Data recovery from systematic chunks is not possible", + ); + + return Err(RecoveryError::Unavailable) + } + + let desired_requests_count = + self.get_desired_request_count(systematic_chunk_count, self.threshold); + let already_requesting_count = self.requesting_chunks.len(); + gum::debug!( + target: LOG_TARGET, + ?common_params.candidate_hash, + ?desired_requests_count, + total_received = ?systematic_chunk_count, + systematic_threshold = ?self.threshold, + ?already_requesting_count, + "Requesting systematic availability chunks for a candidate", + ); + + let strategy_type = RecoveryStrategy::::strategy_type(&*self); + + state + .launch_parallel_chunk_requests( + strategy_type, + common_params, + sender, + desired_requests_count, + &mut validators_queue, + &mut self.requesting_chunks, + ) + .await; + + let _ = state + .wait_for_chunks( + strategy_type, + common_params, + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + &mut validators_queue, + &mut self.requesting_chunks, + &mut backers, + |unrequested_validators, + in_flight_reqs, + // Don't use this chunk count, as it may contain non-systematic chunks. + _chunk_count, + new_systematic_chunk_count| { + systematic_chunk_count = new_systematic_chunk_count; + + let is_unavailable = Self::is_unavailable( + unrequested_validators, + in_flight_reqs, + systematic_chunk_count, + self.threshold, + ); + + systematic_chunk_count >= self.threshold || is_unavailable + }, + ) + .await; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_erasure_coding::systematic_recovery_threshold; + + #[test] + fn test_get_desired_request_count() { + let num_validators = 100; + let threshold = systematic_recovery_threshold(num_validators).unwrap(); + + let systematic_chunks_task = FetchSystematicChunks::new(FetchSystematicChunksParams { + validators: vec![(1.into(), 1.into()); num_validators], + backers: vec![], + }); + assert_eq!(systematic_chunks_task.get_desired_request_count(0, threshold), threshold); + assert_eq!(systematic_chunks_task.get_desired_request_count(5, threshold), threshold - 5); + assert_eq!( + systematic_chunks_task.get_desired_request_count(num_validators * 2, threshold), + 0 + ); + assert_eq!(systematic_chunks_task.get_desired_request_count(0, N_PARALLEL * 2), N_PARALLEL); + assert_eq!(systematic_chunks_task.get_desired_request_count(N_PARALLEL, N_PARALLEL + 2), 2); + } +} diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 909f6a25f46b1eca3074095b6f54f0415d62c562..d0a4a2d8b60e8a969b8d8bf73731afd28de83093 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -14,38 +14,133 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{sync::Arc, time::Duration}; +use crate::task::{REGULAR_CHUNKS_REQ_RETRY_LIMIT, SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT}; + +use super::*; +use std::{result::Result, sync::Arc, time::Duration}; use assert_matches::assert_matches; use futures::{executor, future}; use futures_timer::Delay; +use rstest::rstest; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ - self as req_res, v1::AvailableDataFetchingRequest, IncomingRequest, Protocol, Recipient, - ReqProtocolNames, Requests, + self as req_res, + v1::{AvailableDataFetchingRequest, ChunkResponse}, + IncomingRequest, Protocol, Recipient, ReqProtocolNames, Requests, }; -use polkadot_node_subsystem_test_helpers::derive_erasure_chunks_with_proofs_and_root; - -use super::*; -use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; - -use polkadot_node_primitives::{BlockData, PoV, Proof}; +use polkadot_node_primitives::{BlockData, ErasureChunk, PoV, Proof}; use polkadot_node_subsystem::messages::{ AllMessages, NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers::{ - make_subsystem_context, mock::new_leaf, TestSubsystemContextHandle, + derive_erasure_chunks_with_proofs_and_root, make_subsystem_context, mock::new_leaf, + TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - AuthorityDiscoveryId, Block, Hash, HeadData, IndexedVec, PersistedValidationData, ValidatorId, + node_features, AuthorityDiscoveryId, Block, ExecutorParams, Hash, HeadData, IndexedVec, + NodeFeatures, PersistedValidationData, SessionInfo, ValidatorId, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; +use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; +use sp_keyring::Sr25519Keyring; type VirtualOverseer = TestSubsystemContextHandle; +// Implement some helper constructors for the AvailabilityRecoverySubsystem + +/// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to +/// request data from backers. +fn with_fast_path( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersFirstAlways, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests only chunks +fn with_chunks_only( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::ChunksAlways, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests chunks if PoV is +/// above a threshold. +fn with_chunks_if_pov_large( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersFirstIfSizeLower(FETCH_CHUNKS_THRESHOLD), + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which requests systematic chunks if +/// PoV is above a threshold. +fn with_systematic_chunks_if_pov_large( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::for_validator( + Some(FETCH_CHUNKS_THRESHOLD), + req_receiver, + req_protocol_names, + metrics, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which first requests full data +/// from backers, with a fallback to recover from systematic chunks. +fn with_fast_path_then_systematic_chunks( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::BackersThenSystematicChunks, + ) +} + +/// Create a new instance of `AvailabilityRecoverySubsystem` which first attempts to request +/// systematic chunks, with a fallback to requesting regular chunks. +fn with_systematic_chunks( + req_receiver: IncomingRequestReceiver, + req_protocol_names: &ReqProtocolNames, + metrics: Metrics, +) -> AvailabilityRecoverySubsystem { + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + req_receiver, + req_protocol_names, + metrics, + RecoveryStrategyKind::SystematicChunks, + ) +} + // Deterministic genesis hash for protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); @@ -61,14 +156,11 @@ fn request_receiver( receiver.0 } -fn test_harness>( +fn test_harness>( subsystem: AvailabilityRecoverySubsystem, - test: impl FnOnce(VirtualOverseer) -> T, + test: impl FnOnce(VirtualOverseer) -> Fut, ) { - let _ = env_logger::builder() - .is_test(true) - .filter(Some("polkadot_availability_recovery"), log::LevelFilter::Trace) - .try_init(); + sp_tracing::init_for_tests(); let pool = sp_core::testing::TaskExecutor::new(); @@ -138,8 +230,6 @@ async fn overseer_recv( msg } -use sp_keyring::Sr25519Keyring; - #[derive(Debug)] enum Has { No, @@ -163,27 +253,127 @@ struct TestState { validators: Vec, validator_public: IndexedVec, validator_authority_id: Vec, + validator_groups: IndexedVec>, current: Hash, candidate: CandidateReceipt, session_index: SessionIndex, + core_index: CoreIndex, + node_features: NodeFeatures, persisted_validation_data: PersistedValidationData, available_data: AvailableData, - chunks: Vec, - invalid_chunks: Vec, + chunks: IndexedVec, + invalid_chunks: IndexedVec, } impl TestState { + fn new(node_features: NodeFeatures) -> Self { + let validators = vec![ + Sr25519Keyring::Ferdie, // <- this node, role: validator + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::One, + Sr25519Keyring::Two, + ]; + + let validator_public = validator_pubkeys(&validators); + let validator_authority_id = validator_authority_id(&validators); + let validator_groups = vec![ + vec![1.into(), 0.into(), 3.into(), 4.into()], + vec![5.into(), 6.into()], + vec![2.into()], + ]; + + let current = Hash::repeat_byte(1); + + let mut candidate = dummy_candidate_receipt(dummy_hash()); + + let session_index = 10; + + let persisted_validation_data = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: Default::default(), + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + + let pov = PoV { block_data: BlockData(vec![42; 64]) }; + + let available_data = AvailableData { + validation_data: persisted_validation_data.clone(), + pov: Arc::new(pov), + }; + + let core_index = CoreIndex(2); + + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + validators.len(), + &available_data, + |_, _| {}, + ); + let chunks = map_chunks(chunks, &node_features, validators.len(), core_index); + + // Mess around: + let invalid_chunks = chunks + .iter() + .cloned() + .map(|mut chunk| { + if chunk.chunk.len() >= 2 && chunk.chunk[0] != chunk.chunk[1] { + chunk.chunk[0] = chunk.chunk[1]; + } else if chunk.chunk.len() >= 1 { + chunk.chunk[0] = !chunk.chunk[0]; + } else { + chunk.proof = Proof::dummy_proof(); + } + chunk + }) + .collect(); + debug_assert_ne!(chunks, invalid_chunks); + + candidate.descriptor.erasure_root = erasure_root; + candidate.descriptor.relay_parent = Hash::repeat_byte(10); + candidate.descriptor.pov_hash = Hash::repeat_byte(3); + + Self { + validators, + validator_public, + validator_authority_id, + validator_groups: IndexedVec::>::try_from( + validator_groups, + ) + .unwrap(), + current, + candidate, + session_index, + core_index, + node_features, + persisted_validation_data, + available_data, + chunks, + invalid_chunks, + } + } + + fn with_empty_node_features() -> Self { + Self::new(NodeFeatures::EMPTY) + } + fn threshold(&self) -> usize { recovery_threshold(self.validators.len()).unwrap() } + fn systematic_threshold(&self) -> usize { + systematic_recovery_threshold(self.validators.len()).unwrap() + } + fn impossibility_threshold(&self) -> usize { self.validators.len() - self.threshold() + 1 } - async fn test_runtime_api(&self, virtual_overseer: &mut VirtualOverseer) { + async fn test_runtime_api_session_info(&self, virtual_overseer: &mut VirtualOverseer) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -199,8 +389,7 @@ impl TestState { tx.send(Ok(Some(SessionInfo { validators: self.validator_public.clone(), discovery_keys: self.validator_authority_id.clone(), - // all validators in the same group. - validator_groups: IndexedVec::>::from(vec![(0..self.validators.len()).map(|i| ValidatorIndex(i as _)).collect()]), + validator_groups: self.validator_groups.clone(), assignment_keys: vec![], n_cores: 0, zeroth_delay_tranche_width: 0, @@ -214,6 +403,38 @@ impl TestState { }))).unwrap(); } ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionExecutorParams( + session_index, + tx, + ) + )) => { + assert_eq!(relay_parent, self.current); + assert_eq!(session_index, self.session_index); + + tx.send(Ok(Some(ExecutorParams::new()))).unwrap(); + } + ); + } + + async fn test_runtime_api_node_features(&self, virtual_overseer: &mut VirtualOverseer) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _relay_parent, + RuntimeApiRequest::NodeFeatures( + _, + tx, + ) + )) => { + tx.send(Ok( + self.node_features.clone() + )).unwrap(); + } + ); } async fn respond_to_available_data_query( @@ -239,16 +460,19 @@ impl TestState { async fn respond_to_query_all_request( &self, virtual_overseer: &mut VirtualOverseer, - send_chunk: impl Fn(usize) -> bool, + send_chunk: impl Fn(ValidatorIndex) -> bool, ) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryAllChunks(_, tx) ) => { - let v = self.chunks.iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() + let v = self.chunks.iter().enumerate() + .filter_map(|(val_idx, c)| if send_chunk(ValidatorIndex(val_idx as u32)) { + Some((ValidatorIndex(val_idx as u32), c.clone())) + } else { + None + }) .collect(); let _ = tx.send(v); @@ -259,16 +483,19 @@ impl TestState { async fn respond_to_query_all_request_invalid( &self, virtual_overseer: &mut VirtualOverseer, - send_chunk: impl Fn(usize) -> bool, + send_chunk: impl Fn(ValidatorIndex) -> bool, ) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryAllChunks(_, tx) ) => { - let v = self.invalid_chunks.iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() + let v = self.invalid_chunks.iter().enumerate() + .filter_map(|(val_idx, c)| if send_chunk(ValidatorIndex(val_idx as u32)) { + Some((ValidatorIndex(val_idx as u32), c.clone())) + } else { + None + }) .collect(); let _ = tx.send(v); @@ -276,14 +503,16 @@ impl TestState { ) } - async fn test_chunk_requests( + async fn test_chunk_requests_inner( &self, req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, n: usize, - who_has: impl Fn(usize) -> Has, - ) -> Vec, ProtocolName), RequestFailure>>> { + mut who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + protocol: Protocol, + ) -> Vec, ProtocolName), RequestFailure>>> { // arbitrary order. let mut i = 0; let mut senders = Vec::new(); @@ -301,13 +530,19 @@ impl TestState { i += 1; assert_matches!( req, - Requests::ChunkFetchingV1(req) => { + Requests::ChunkFetching(req) => { assert_eq!(req.payload.candidate_hash, candidate_hash); - let validator_index = req.payload.index.0 as usize; + let validator_index = req.payload.index; + let chunk = self.chunks.get(validator_index).unwrap().clone(); + + if systematic_recovery { + assert!(chunk.index.0 as usize <= self.systematic_threshold(), "requested non-systematic chunk"); + } + let available_data = match who_has(validator_index) { Has::No => Ok(None), - Has::Yes => Ok(Some(self.chunks[validator_index].clone().into())), + Has::Yes => Ok(Some(chunk)), Has::NetworkError(e) => Err(e), Has::DoesNotReturn => { senders.push(req.pending_response); @@ -315,11 +550,29 @@ impl TestState { } }; - let _ = req.pending_response.send( + req.pending_response.send( available_data.map(|r| - (req_res::v1::ChunkFetchingResponse::from(r).encode(), req_protocol_names.get_name(Protocol::ChunkFetchingV1)) + ( + match protocol { + Protocol::ChunkFetchingV1 => + match r { + None => req_res::v1::ChunkFetchingResponse::NoSuchChunk, + Some(c) => req_res::v1::ChunkFetchingResponse::Chunk( + ChunkResponse { + chunk: c.chunk, + proof: c.proof + } + ) + }.encode(), + Protocol::ChunkFetchingV2 => + req_res::v2::ChunkFetchingResponse::from(r).encode(), + + _ => unreachable!() + }, + req_protocol_names.get_name(protocol) + ) ) - ); + ).unwrap(); } ) } @@ -329,16 +582,61 @@ impl TestState { senders } + async fn test_chunk_requests( + &self, + req_protocol_names: &ReqProtocolNames, + candidate_hash: CandidateHash, + virtual_overseer: &mut VirtualOverseer, + n: usize, + who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + ) -> Vec, ProtocolName), RequestFailure>>> { + self.test_chunk_requests_inner( + req_protocol_names, + candidate_hash, + virtual_overseer, + n, + who_has, + systematic_recovery, + Protocol::ChunkFetchingV2, + ) + .await + } + + // Use legacy network protocol version. + async fn test_chunk_requests_v1( + &self, + req_protocol_names: &ReqProtocolNames, + candidate_hash: CandidateHash, + virtual_overseer: &mut VirtualOverseer, + n: usize, + who_has: impl FnMut(ValidatorIndex) -> Has, + systematic_recovery: bool, + ) -> Vec, ProtocolName), RequestFailure>>> { + self.test_chunk_requests_inner( + req_protocol_names, + candidate_hash, + virtual_overseer, + n, + who_has, + systematic_recovery, + Protocol::ChunkFetchingV1, + ) + .await + } + async fn test_full_data_requests( &self, req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, who_has: impl Fn(usize) -> Has, - ) -> Vec, ProtocolName), RequestFailure>>> { + group_index: GroupIndex, + ) -> Vec, ProtocolName), RequestFailure>>> { let mut senders = Vec::new(); - for _ in 0..self.validators.len() { - // Receive a request for a chunk. + let expected_validators = self.validator_groups.get(group_index).unwrap(); + for _ in 0..expected_validators.len() { + // Receive a request for the full `AvailableData`. assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::NetworkBridgeTx( @@ -357,6 +655,7 @@ impl TestState { .iter() .position(|a| Recipient::Authority(a.clone()) == req.peer) .unwrap(); + assert!(expected_validators.contains(&ValidatorIndex(validator_index as u32))); let available_data = match who_has(validator_index) { Has::No => Ok(None), @@ -387,95 +686,67 @@ impl TestState { } } +impl Default for TestState { + fn default() -> Self { + // Enable the chunk mapping node feature. + let mut node_features = NodeFeatures::new(); + node_features + .resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + + Self::new(node_features) + } +} + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> IndexedVec { val_ids.iter().map(|v| v.public().into()).collect() } -fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec { +pub fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } -impl Default for TestState { - fn default() -> Self { - let validators = vec![ - Sr25519Keyring::Ferdie, // <- this node, role: validator - Sr25519Keyring::Alice, - Sr25519Keyring::Bob, - Sr25519Keyring::Charlie, - Sr25519Keyring::Dave, - ]; - - let validator_public = validator_pubkeys(&validators); - let validator_authority_id = validator_authority_id(&validators); - - let current = Hash::repeat_byte(1); - - let mut candidate = dummy_candidate_receipt(dummy_hash()); - - let session_index = 10; - - let persisted_validation_data = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: Default::default(), - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - let pov = PoV { block_data: BlockData(vec![42; 64]) }; - - let available_data = AvailableData { - validation_data: persisted_validation_data.clone(), - pov: Arc::new(pov), - }; - - let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( - validators.len(), - &available_data, - |_, _| {}, - ); - // Mess around: - let invalid_chunks = chunks - .iter() - .cloned() - .map(|mut chunk| { - if chunk.chunk.len() >= 2 && chunk.chunk[0] != chunk.chunk[1] { - chunk.chunk[0] = chunk.chunk[1]; - } else if chunk.chunk.len() >= 1 { - chunk.chunk[0] = !chunk.chunk[0]; - } else { - chunk.proof = Proof::dummy_proof(); - } - chunk - }) - .collect(); - debug_assert_ne!(chunks, invalid_chunks); - - candidate.descriptor.erasure_root = erasure_root; - candidate.descriptor.relay_parent = Hash::repeat_byte(10); - - Self { - validators, - validator_public, - validator_authority_id, - current, - candidate, - session_index, - persisted_validation_data, - available_data, - chunks, - invalid_chunks, - } - } +/// Map the chunks to the validators according to the availability chunk mapping algorithm. +fn map_chunks( + chunks: Vec, + node_features: &NodeFeatures, + n_validators: usize, + core_index: CoreIndex, +) -> IndexedVec { + let chunk_indices = + availability_chunk_indices(Some(node_features), n_validators, core_index).unwrap(); + + (0..n_validators) + .map(|val_idx| chunks[chunk_indices[val_idx].0 as usize].clone()) + .collect::>() + .into() } -#[test] -fn availability_is_recovered_from_chunks_if_no_group_provided() { +#[rstest] +#[case(true)] +#[case(false)] +fn availability_is_recovered_from_chunks_if_no_group_provided(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -495,12 +766,15 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -512,8 +786,9 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -533,16 +808,31 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { new_candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + threshold, + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + + // Even if the recovery is systematic, we'll always fall back to regular recovery, so keep + // this around. test_state .test_chunk_requests( &req_protocol_names, @@ -550,6 +840,7 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::No, + false, ) .await; @@ -559,16 +850,34 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { }); } -#[test] -fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only() { - let test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only( + #[case] systematic_recovery: bool, +) { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); - - test_harness(subsystem, |mut virtual_overseer| async move { + let test_state = TestState::default(); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -586,12 +895,15 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -603,8 +915,9 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -623,41 +936,80 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk AvailabilityRecoveryMessage::RecoverAvailableData( new_candidate.clone(), test_state.session_index, - None, + Some(GroupIndex(1)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; - test_state - .test_chunk_requests( - &req_protocol_names, - new_candidate.hash(), - &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, - ) - .await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + threshold * SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT as usize, + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + // Even if the recovery is systematic, we'll always fall back to regular recovery, so + // keep this around. + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold() - threshold, + |_| Has::No, + false, + ) + .await; + + // A request times out with `Unavailable` error. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold(), + |_| Has::No, + false, + ) + .await; - // A request times out with `Unavailable` error. - assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + // A request times out with `Unavailable` error. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } virtual_overseer }); } -#[test] -fn bad_merkle_path_leads_to_recovery_error() { - let mut test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn bad_merkle_path_leads_to_recovery_error(#[case] systematic_recovery: bool) { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let mut test_state = TestState::default(); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -677,25 +1029,40 @@ fn bad_merkle_path_leads_to_recovery_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); // Create some faulty chunks. - test_state.chunks[0].chunk = vec![0; 32]; - test_state.chunks[1].chunk = vec![1; 32]; - test_state.chunks[2].chunk = vec![2; 32]; - test_state.chunks[3].chunk = vec![3; 32]; - test_state.chunks[4].chunk = vec![4; 32]; + for chunk in test_state.chunks.iter_mut() { + chunk.chunk = vec![0; 32]; + } test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::No, + systematic_recovery, + ) + .await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, @@ -703,6 +1070,7 @@ fn bad_merkle_path_leads_to_recovery_error() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::Yes, + false, ) .await; @@ -712,14 +1080,24 @@ fn bad_merkle_path_leads_to_recovery_error() { }); } -#[test] -fn wrong_chunk_index_leads_to_recovery_error() { +#[rstest] +#[case(true)] +#[case(false)] +fn wrong_chunk_index_leads_to_recovery_error(#[case] systematic_recovery: bool) { let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -739,32 +1117,55 @@ fn wrong_chunk_index_leads_to_recovery_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; - let candidate_hash = test_state.candidate.hash(); + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; - // These chunks should fail the index check as they don't have the correct index for - // validator. - test_state.chunks[1] = test_state.chunks[0].clone(); - test_state.chunks[2] = test_state.chunks[0].clone(); - test_state.chunks[3] = test_state.chunks[0].clone(); - test_state.chunks[4] = test_state.chunks[0].clone(); + let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + // Chunks should fail the index check as they don't have the correct index. + + // *(test_state.chunks.get_mut(0.into()).unwrap()) = + // test_state.chunks.get(1.into()).unwrap().clone(); + let first_chunk = test_state.chunks.get(0.into()).unwrap().clone(); + for c_index in 1..test_state.chunks.len() { + *(test_state.chunks.get_mut(ValidatorIndex(c_index as u32)).unwrap()) = + first_chunk.clone(); + } + + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::Yes, + // We set this to false, as we know we will be requesting the wrong indices. + false, + ) + .await; + + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, + test_state.chunks.len() - 1, + |_| Has::Yes, + false, ) .await; @@ -774,14 +1175,30 @@ fn wrong_chunk_index_leads_to_recovery_error() { }); } -#[test] -fn invalid_erasure_coding_leads_to_invalid_error() { +#[rstest] +#[case(true)] +#[case(false)] +fn invalid_erasure_coding_leads_to_invalid_error(#[case] systematic_recovery: bool) { let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { let pov = PoV { block_data: BlockData(vec![69; 64]) }; @@ -795,7 +1212,12 @@ fn invalid_erasure_coding_leads_to_invalid_error() { |i, chunk| *chunk = vec![i as u8; 32], ); - test_state.chunks = bad_chunks; + test_state.chunks = map_chunks( + bad_chunks, + &test_state.node_features, + test_state.validators.len(), + test_state.core_index, + ); test_state.candidate.descriptor.erasure_root = bad_erasure_root; let candidate_hash = test_state.candidate.hash(); @@ -817,12 +1239,15 @@ fn invalid_erasure_coding_leads_to_invalid_error() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; @@ -832,8 +1257,9 @@ fn invalid_erasure_coding_leads_to_invalid_error() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -843,12 +1269,74 @@ fn invalid_erasure_coding_leads_to_invalid_error() { }); } +#[test] +fn invalid_pov_hash_leads_to_invalid_error() { + let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + let pov = PoV { block_data: BlockData(vec![69; 64]) }; + + test_state.candidate.descriptor.pov_hash = pov.hash(); + + let candidate_hash = test_state.candidate.hash(); + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.threshold(), + |_| Has::Yes, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Invalid); + virtual_overseer + }); +} + #[test] fn fast_path_backing_group_recovers() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + let subsystem = with_fast_path( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -870,12 +1358,14 @@ fn fast_path_backing_group_recovers() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -892,6 +1382,7 @@ fn fast_path_backing_group_recovers() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -901,14 +1392,47 @@ fn fast_path_backing_group_recovers() { }); } -#[test] -fn recovers_from_only_chunks_if_pov_large() { - let test_state = TestState::default(); +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn recovers_from_only_chunks_if_pov_large( + #[case] systematic_recovery: bool, + #[case] for_collator: bool, +) { + let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let (subsystem, threshold) = match (systematic_recovery, for_collator) { + (true, false) => ( + with_systematic_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + (false, false) => ( + with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + (false, true) => { + test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + ( + AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ) + }, + (_, _) => unreachable!(), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -928,12 +1452,15 @@ fn recovers_from_only_chunks_if_pov_large() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -942,20 +1469,23 @@ fn recovers_from_only_chunks_if_pov_large() { AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkSize(_, tx) ) => { - let _ = tx.send(Some(1000000)); + let _ = tx.send(Some(crate::FETCH_CHUNKS_THRESHOLD + 1)); } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -974,35 +1504,64 @@ fn recovers_from_only_chunks_if_pov_large() { AvailabilityRecoveryMessage::RecoverAvailableData( new_candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + Some(GroupIndex(1)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunkSize(_, tx) ) => { - let _ = tx.send(Some(1000000)); + let _ = tx.send(Some(crate::FETCH_CHUNKS_THRESHOLD + 1)); } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } - test_state - .test_chunk_requests( - &req_protocol_names, - new_candidate.hash(), - &mut virtual_overseer, - test_state.impossibility_threshold(), - |_| Has::No, - ) - .await; + if systematic_recovery { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold() * SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT as usize, + |_| Has::No, + systematic_recovery, + ) + .await; + if !for_collator { + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + // Even if the recovery is systematic, we'll always fall back to regular recovery. + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold() - threshold, + |_| Has::No, + false, + ) + .await; + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + new_candidate.hash(), + &mut virtual_overseer, + test_state.impossibility_threshold(), + |_| Has::No, + false, + ) + .await; + } // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); @@ -1010,14 +1569,40 @@ fn recovers_from_only_chunks_if_pov_large() { }); } -#[test] -fn fast_path_backing_group_recovers_if_pov_small() { - let test_state = TestState::default(); +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn fast_path_backing_group_recovers_if_pov_small( + #[case] systematic_recovery: bool, + #[case] for_collator: bool, +) { + let mut test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let subsystem = match (systematic_recovery, for_collator) { + (true, false) => with_systematic_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + + (false, false) => with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + (false, true) => { + test_state.candidate.descriptor.pov_hash = test_state.available_data.pov.hash(); + AvailabilityRecoverySubsystem::for_collator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ) + }, + (_, _) => unreachable!(), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1037,12 +1622,15 @@ fn fast_path_backing_group_recovers_if_pov_small() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1060,7 +1648,9 @@ fn fast_path_backing_group_recovers_if_pov_small() { } ); - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + if !for_collator { + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + } test_state .test_full_data_requests( @@ -1068,6 +1658,7 @@ fn fast_path_backing_group_recovers_if_pov_small() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -1077,14 +1668,31 @@ fn fast_path_backing_group_recovers_if_pov_small() { }); } -#[test] -fn no_answers_in_fast_path_causes_chunk_requests() { +#[rstest] +#[case(true)] +#[case(false)] +fn no_answers_in_fast_path_causes_chunk_requests(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_fast_path( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_fast_path_then_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1104,12 +1712,15 @@ fn no_answers_in_fast_path_causes_chunk_requests() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1127,6 +1738,7 @@ fn no_answers_in_fast_path_causes_chunk_requests() { candidate_hash, &mut virtual_overseer, who_has, + GroupIndex(0), ) .await; @@ -1137,8 +1749,9 @@ fn no_answers_in_fast_path_causes_chunk_requests() { &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold(), + threshold, |_| Has::Yes, + systematic_recovery, ) .await; @@ -1148,14 +1761,25 @@ fn no_answers_in_fast_path_causes_chunk_requests() { }); } -#[test] -fn task_canceled_when_receivers_dropped() { +#[rstest] +#[case(true)] +#[case(false)] +fn task_canceled_when_receivers_dropped(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1175,12 +1799,15 @@ fn task_canceled_when_receivers_dropped() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; for _ in 0..test_state.validators.len() { match virtual_overseer.recv().timeout(TIMEOUT).await { @@ -1193,14 +1820,24 @@ fn task_canceled_when_receivers_dropped() { }); } -#[test] -fn chunks_retry_until_all_nodes_respond() { +#[rstest] +#[case(true)] +#[case(false)] +fn chunks_retry_until_all_nodes_respond(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1219,30 +1856,51 @@ fn chunks_retry_until_all_nodes_respond() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + if systematic_recovery { + for _ in 0..SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| Has::timeout(), + true, + ) + .await; + } + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len() - test_state.threshold(), + test_state.impossibility_threshold(), |_| Has::timeout(), + false, ) .await; - // we get to go another round! + // We get to go another round! Actually, we get to go `REGULAR_CHUNKS_REQ_RETRY_LIMIT` + // number of times. test_state .test_chunk_requests( &req_protocol_names, @@ -1250,21 +1908,23 @@ fn chunks_retry_until_all_nodes_respond() { &mut virtual_overseer, test_state.impossibility_threshold(), |_| Has::No, + false, ) .await; - // Recovered data should match the original one. + // Recovery is impossible. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); virtual_overseer }); } #[test] -fn not_returning_requests_wont_stall_retrieval() { +fn network_bridge_not_returning_responses_wont_stall_retrieval() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_chunks_only( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -1286,12 +1946,15 @@ fn not_returning_requests_wont_stall_retrieval() { test_state.candidate.clone(), test_state.session_index, Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); @@ -1309,6 +1972,7 @@ fn not_returning_requests_wont_stall_retrieval() { &mut virtual_overseer, not_returning_count, |_| Has::DoesNotReturn, + false, ) .await; @@ -1320,6 +1984,7 @@ fn not_returning_requests_wont_stall_retrieval() { // Should start over: test_state.validators.len() + 3, |_| Has::timeout(), + false, ) .await; @@ -1331,6 +1996,7 @@ fn not_returning_requests_wont_stall_retrieval() { &mut virtual_overseer, test_state.threshold(), |_| Has::Yes, + false, ) .await; @@ -1340,14 +2006,24 @@ fn not_returning_requests_wont_stall_retrieval() { }); } -#[test] -fn all_not_returning_requests_still_recovers_on_return() { +#[rstest] +#[case(true)] +#[case(false)] +fn all_not_returning_requests_still_recovers_on_return(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1366,46 +2042,64 @@ fn all_not_returning_requests_still_recovers_on_return() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - Some(GroupIndex(0)), + None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + let n = if systematic_recovery { + test_state.systematic_threshold() + } else { + test_state.validators.len() + }; let senders = test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len(), + n, |_| Has::DoesNotReturn, + systematic_recovery, ) .await; future::join( async { Delay::new(Duration::from_millis(10)).await; - // Now retrieval should be able to recover. + // Now retrieval should be able progress. std::mem::drop(senders); }, - test_state.test_chunk_requests( - &req_protocol_names, - candidate_hash, - &mut virtual_overseer, - // Should start over: - test_state.validators.len() + 3, - |_| Has::timeout(), - ), + async { + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + // Should start over: + n, + |_| Has::timeout(), + systematic_recovery, + ) + .await + }, ) .await; + if systematic_recovery { + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + // we get to go another round! test_state .test_chunk_requests( @@ -1414,6 +2108,7 @@ fn all_not_returning_requests_still_recovers_on_return() { &mut virtual_overseer, test_state.threshold(), |_| Has::Yes, + false, ) .await; @@ -1423,14 +2118,24 @@ fn all_not_returning_requests_still_recovers_on_return() { }); } -#[test] -fn returns_early_if_we_have_the_data() { +#[rstest] +#[case(true)] +#[case(false)] +fn returns_early_if_we_have_the_data(#[case] systematic_recovery: bool) { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - request_receiver(&req_protocol_names), - Metrics::new_dummy(), - ); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( @@ -1450,12 +2155,15 @@ fn returns_early_if_we_have_the_data() { test_state.candidate.clone(), test_state.session_index, None, + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; test_state.respond_to_available_data_query(&mut virtual_overseer, true).await; assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); @@ -1464,11 +2172,12 @@ fn returns_early_if_we_have_the_data() { } #[test] -fn does_not_query_local_validator() { +fn returns_early_if_present_in_the_subsystem_cache() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_fast_path( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -1489,50 +2198,812 @@ fn does_not_query_local_validator() { AvailabilityRecoveryMessage::RecoverAvailableData( test_state.candidate.clone(), test_state.session_index, - None, + Some(GroupIndex(0)), + Some(test_state.core_index), tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; - test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; - test_state.respond_to_query_all_request(&mut virtual_overseer, |i| i == 0).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; let candidate_hash = test_state.candidate.hash(); + let who_has = |i| match i { + 3 => Has::Yes, + _ => Has::No, + }; + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state - .test_chunk_requests( + .test_full_data_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.validators.len(), - |i| if i == 0 { panic!("requested from local validator") } else { Has::timeout() }, + who_has, + GroupIndex(0), ) .await; - // second round, make sure it uses the local chunk. - test_state - .test_chunk_requests( - &req_protocol_names, + // Recovered data should match the original one. + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + + // A second recovery for the same candidate will return early as it'll be present in the + // cache. + let (tx, rx) = oneshot::channel(); + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + Some(GroupIndex(0)), + Some(test_state.core_index), + tx, + ), + ) + .await; + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn does_not_query_local_validator(#[case] systematic_recovery: bool) { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state + .respond_to_query_all_request(&mut virtual_overseer, |i| i.0 == 0) + .await; + + let candidate_hash = test_state.candidate.hash(); + + // second round, make sure it uses the local chunk. + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + threshold - 1, + |i| if i.0 == 0 { panic!("requested from local validator") } else { Has::Yes }, + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn invalid_local_chunk(#[case] systematic_recovery: bool) { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + + let validator_index_for_first_chunk = test_state + .chunks + .iter() + .enumerate() + .find_map(|(val_idx, chunk)| if chunk.index.0 == 0 { Some(val_idx) } else { None }) + .unwrap() as u32; + + test_state + .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| { + i.0 == validator_index_for_first_chunk + }) + .await; + + let candidate_hash = test_state.candidate.hash(); + + // If systematic recovery detects invalid local chunk, it'll directly go to regular + // recovery, if we were the one holding an invalid chunk. + if systematic_recovery { + test_state + .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| { + i.0 == validator_index_for_first_chunk + }) + .await; + } + + test_state + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.threshold(), + |i| { + if i.0 == validator_index_for_first_chunk { + panic!("requested from local validator") + } else { + Has::Yes + } + }, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[test] +fn systematic_chunks_are_not_requested_again_in_regular_recovery() { + // Run this test multiple times, as the order in which requests are made is random and we want + // to make sure that we catch regressions. + for _ in 0..TestState::default().chunks.len() { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let validator_index_for_first_chunk = test_state + .chunks + .iter() + .enumerate() + .find_map(|(val_idx, chunk)| if chunk.index.0 == 0 { Some(val_idx) } else { None }) + .unwrap() as u32; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold(), + |i| if i.0 == validator_index_for_first_chunk { Has::No } else { Has::Yes }, + true, + ) + .await; + + // Falls back to regular recovery, since one validator returned a fatal error. + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + 1, + |i| { + if (test_state.chunks.get(i).unwrap().index.0 as usize) < + test_state.systematic_threshold() + { + panic!("Already requested") + } else { + Has::Yes + } + }, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); + } +} + +#[rstest] +#[case(true, true)] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn chunk_indices_are_mapped_to_different_validators( + #[case] systematic_recovery: bool, + #[case] mapping_enabled: bool, +) { + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let test_state = match mapping_enabled { + true => TestState::default(), + false => TestState::with_empty_node_features(), + }; + let subsystem = match systematic_recovery { + true => with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + false => with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, _rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let mut chunk_indices: Vec<(u32, u32)> = vec![]; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::SendRequests( + requests, + _if_disconnected, + ) + ) => { + for req in requests { + assert_matches!( + req, + Requests::ChunkFetching(req) => { + assert_eq!(req.payload.candidate_hash, test_state.candidate.hash()); + + let validator_index = req.payload.index; + let chunk_index = test_state.chunks.get(validator_index).unwrap().index; + + if systematic_recovery && mapping_enabled { + assert!((chunk_index.0 as usize) <= test_state.systematic_threshold(), "requested non-systematic chunk"); + } + + chunk_indices.push((chunk_index.0, validator_index.0)); + } + ) + } + } + ); + + if mapping_enabled { + assert!(!chunk_indices.iter().any(|(c_index, v_index)| c_index == v_index)); + } else { + assert!(chunk_indices.iter().all(|(c_index, v_index)| c_index == v_index)); + } + + virtual_overseer + }); +} + +#[rstest] +#[case(true, false)] +#[case(false, true)] +#[case(false, false)] +fn number_of_request_retries_is_bounded( + #[case] systematic_recovery: bool, + #[case] should_fail: bool, +) { + let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + // We need the number of validators to be evenly divisible by the threshold for this test to be + // easier to write. + let n_validators = 6; + test_state.validators.truncate(n_validators); + test_state.validator_authority_id.truncate(n_validators); + let mut temp = test_state.validator_public.to_vec(); + temp.truncate(n_validators); + test_state.validator_public = temp.into(); + + let (chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + n_validators, + &test_state.available_data, + |_, _| {}, + ); + test_state.chunks = + map_chunks(chunks, &test_state.node_features, n_validators, test_state.core_index); + test_state.candidate.descriptor.erasure_root = erasure_root; + + let (subsystem, retry_limit) = match systematic_recovery { + false => ( + with_chunks_only( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + REGULAR_CHUNKS_REQ_RETRY_LIMIT, + ), + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT, + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let validator_count_per_iteration = if systematic_recovery { + test_state.systematic_threshold() + } else { + test_state.chunks.len() + }; + + // Network errors are considered non-fatal but should be retried a limited number of times. + for _ in 1..retry_limit { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + validator_count_per_iteration, + |_| Has::timeout(), + systematic_recovery, + ) + .await; + } + + if should_fail { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + validator_count_per_iteration, + |_| Has::timeout(), + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + } else { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.threshold(), + |_| Has::Yes, + systematic_recovery, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + } + + virtual_overseer + }); +} + +#[test] +fn systematic_recovery_retries_from_backers() { + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + let group_index = GroupIndex(2); + let group_size = test_state.validator_groups.get(group_index).unwrap().len(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + Some(group_index), + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + let mut cnt = 0; + + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + test_state.systematic_threshold(), + |_| { + let res = if cnt < group_size { Has::timeout() } else { Has::Yes }; + cnt += 1; + res + }, + true, + ) + .await; + + // Exhaust retries. + for _ in 0..(SYSTEMATIC_CHUNKS_REQ_RETRY_LIMIT - 1) { + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + group_size, + |_| Has::No, + true, + ) + .await; + } + + // Now, final chance is to try from a backer. + test_state + .test_chunk_requests( + &req_protocol_names, + test_state.candidate.hash(), + &mut virtual_overseer, + group_size, + |_| Has::Yes, + true, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + virtual_overseer + }); +} + +#[rstest] +#[case(true)] +#[case(false)] +fn test_legacy_network_protocol_with_mapping_disabled(#[case] systematic_recovery: bool) { + // In this case, when the mapping is disabled, recovery will work with both v2 and v1 requests, + // under the assumption that ValidatorIndex is always equal to ChunkIndex. However, systematic + // recovery will not be possible, it will fall back to regular recovery. + let test_state = TestState::with_empty_node_features(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + test_state + .test_chunk_requests_v1( + &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold() - 1, - |i| if i == 0 { panic!("requested from local validator") } else { Has::Yes }, + threshold, + |_| Has::Yes, + false, ) .await; + // Recovered data should match the original one. assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); virtual_overseer }); } +#[rstest] +#[case(true)] +#[case(false)] +fn test_legacy_network_protocol_with_mapping_enabled(#[case] systematic_recovery: bool) { + // In this case, when the mapping is enabled, we MUST only use v2. Recovery should fail for v1. + let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (subsystem, threshold) = match systematic_recovery { + true => ( + with_systematic_chunks( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.systematic_threshold(), + ), + false => ( + with_fast_path( + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ), + test_state.threshold(), + ), + }; + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + if systematic_recovery { + test_state + .test_chunk_requests_v1( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + threshold, + |_| Has::Yes, + systematic_recovery, + ) + .await; + + // Systematic recovery failed, trying regular recovery. + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + } + + test_state + .test_chunk_requests_v1( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.validators.len() - test_state.threshold(), + |_| Has::Yes, + false, + ) + .await; + + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + virtual_overseer + }); +} + #[test] -fn invalid_local_chunk_is_ignored() { +fn test_systematic_recovery_skipped_if_no_core_index() { let test_state = TestState::default(); let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + let subsystem = with_systematic_chunks( request_receiver(&req_protocol_names), + &req_protocol_names, Metrics::new_dummy(), ); @@ -1554,30 +3025,99 @@ fn invalid_local_chunk_is_ignored() { test_state.candidate.clone(), test_state.session_index, None, + None, tx, ), ) .await; - test_state.test_runtime_api(&mut virtual_overseer).await; + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + + let candidate_hash = test_state.candidate.hash(); + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + // Systematic recovery not possible without core index, falling back to regular recovery. test_state - .respond_to_query_all_request_invalid(&mut virtual_overseer, |i| i == 0) + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + test_state.validators.len() - test_state.threshold(), + |_| Has::No, + false, + ) .await; + // Make it fail, in order to assert that indeed regular recovery was attempted. If it were + // systematic recovery, we would have had one more attempt for regular reconstruction. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); + virtual_overseer + }); +} + +#[test] +fn test_systematic_recovery_skipped_if_mapping_disabled() { + let test_state = TestState::with_empty_node_features(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::for_validator( + None, + request_receiver(&req_protocol_names), + &req_protocol_names, + Metrics::new_dummy(), + ); + + test_harness(subsystem, |mut virtual_overseer| async move { + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( + test_state.current, + 1, + ))), + ) + .await; + + let (tx, rx) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + AvailabilityRecoveryMessage::RecoverAvailableData( + test_state.candidate.clone(), + test_state.session_index, + None, + Some(test_state.core_index), + tx, + ), + ) + .await; + + test_state.test_runtime_api_session_info(&mut virtual_overseer).await; + + test_state.test_runtime_api_node_features(&mut virtual_overseer).await; + let candidate_hash = test_state.candidate.hash(); + test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; + test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; + + // Systematic recovery not possible without core index, falling back to regular recovery. test_state .test_chunk_requests( &req_protocol_names, candidate_hash, &mut virtual_overseer, - test_state.threshold() - 1, - |i| if i == 0 { panic!("requested from local validator") } else { Has::Yes }, + test_state.validators.len() - test_state.threshold(), + |_| Has::No, + false, ) .await; - assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); + // Make it fail, in order to assert that indeed regular recovery was attempted. If it were + // systematic recovery, we would have had one more attempt for regular reconstruction. + assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); virtual_overseer }); } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 9c2423e7e5810c3406ab4f40c219dbd8001245bb..b609fb1e071957868c4bff91e02ca5d110aae9ee 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.79" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } polkadot-node-metrics = { path = "../../metrics" } @@ -24,7 +24,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } parking_lot = "0.12.1" bytes = "1" -fatality = "0.0.6" +fatality = "0.1.1" thiserror = { workspace = true } [dev-dependencies] diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index 5691c8413ad997daf80fcd551b4265031d563d88..17d6676b8430d8252cf5a8816a8117a80d0411df 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -177,7 +177,7 @@ fn send_message( // network used `Bytes` this would not be necessary. // // peer may have gotten disconnect by the time `send_message()` is called - // at which point the the sink is not available. + // at which point the sink is not available. let last_peer = peers.pop(); peers.into_iter().for_each(|peer| { if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index d5be6f01c33737a2b09bd39f40640d29c99ca94c..7b6dea748572ba1c24633b047227b92a9890c12c 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -301,7 +301,15 @@ where for req in reqs { match req { - Requests::ChunkFetchingV1(_) => metrics.on_message("chunk_fetching_v1"), + Requests::ChunkFetching(ref req) => { + // This is not the actual request that will succeed, as we don't know yet + // what that will be. It's only the primary request we tried. + if req.fallback_request.is_some() { + metrics.on_message("chunk_fetching_v2") + } else { + metrics.on_message("chunk_fetching_v1") + } + }, Requests::AvailableDataFetchingV1(_) => metrics.on_message("available_data_fetching_v1"), Requests::CollationFetchingV1(_) => metrics.on_message("collation_fetching_v1"), diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 2c7135742f56890a7ad147e95c545cbebde04c9b..d7291552738dbf4f6dd86e44bba6dd953beca55f 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-network-protocol = { path = "../protocol" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } -fatality = "0.0.6" +fatality = "0.1.1" thiserror = { workspace = true } tokio-util = "0.7.1" @@ -32,16 +32,16 @@ tokio-util = "0.7.1" log = { workspace = true, default-features = true } env_logger = "0.11" assert_matches = "1.4.0" +rstest = "0.18.2" sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sc-keystore = { path = "../../../../substrate/client/keystore" } sc-network = { path = "../../../../substrate/client/network" } -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } [features] default = [] -elastic-scaling-experimental = [] diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 879caf923285b3341a0bc28bc52371fb9c81ff69..80a85420b392b930f451689f22edffb4667a0588 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -261,10 +261,12 @@ struct State { /// `active_leaves`, the opposite doesn't hold true. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. In + /// never included in the fragment chains of active leaves which do. In /// particular, this means that if a given relay parent belongs to implicit /// ancestry of some active leaf, then it does support prospective parachains. - implicit_view: ImplicitView, + /// + /// It's `None` if the collator is not yet collating for a paraid. + implicit_view: Option, /// All active leaves observed by us, including both that do and do not /// support prospective parachains. This mapping works as a replacement for @@ -334,7 +336,7 @@ impl State { metrics, collating_on: Default::default(), peer_data: Default::default(), - implicit_view: Default::default(), + implicit_view: None, active_leaves: Default::default(), per_relay_parent: Default::default(), span_per_relay_parent: Default::default(), @@ -531,7 +533,7 @@ async fn distribute_collation( // Otherwise, it should be present in allowed ancestry of some leaf. // // It's collation-producer responsibility to verify that there exists - // a hypothetical membership in a fragment tree for candidate. + // a hypothetical membership in a fragment chain for the candidate. let interested = state .peer_data @@ -539,11 +541,12 @@ async fn distribute_collation( .filter(|(_, PeerData { view: v, .. })| match relay_parent_mode { ProspectiveParachainsMode::Disabled => v.contains(&candidate_relay_parent), ProspectiveParachainsMode::Enabled { .. } => v.iter().any(|block_hash| { - state - .implicit_view - .known_allowed_relay_parents_under(block_hash, Some(id)) - .unwrap_or_default() - .contains(&candidate_relay_parent) + state.implicit_view.as_ref().map(|implicit_view| { + implicit_view + .known_allowed_relay_parents_under(block_hash, Some(id)) + .unwrap_or_default() + .contains(&candidate_relay_parent) + }) == Some(true) }), }); @@ -830,6 +833,7 @@ async fn process_msg( match msg { CollateOn(id) => { state.collating_on = Some(id); + state.implicit_view = Some(ImplicitView::new(Some(id))); }, DistributeCollation { candidate_receipt, @@ -894,7 +898,7 @@ async fn process_msg( ); } }, - msg @ (ReportCollator(..) | Invalid(..) | Seconded(..) | Backed { .. }) => { + msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", @@ -920,7 +924,6 @@ async fn send_collation( let peer_id = request.peer_id(); let candidate_hash = receipt.hash(); - #[cfg(feature = "elastic-scaling-experimental")] let result = match parent_head_data { ParentHeadData::WithData { head_data, .. } => Ok(request_v2::CollationFetchingResponse::CollationWithParentHeadData { @@ -931,13 +934,6 @@ async fn send_collation( ParentHeadData::OnlyHash(_) => Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), }; - #[cfg(not(feature = "elastic-scaling-experimental"))] - let result = { - // suppress unused warning - let _parent_head_data = parent_head_data; - - Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)) - }; let response = OutgoingResponse { result, reputation_changes: Vec::new(), sent_feedback: Some(tx) }; @@ -1215,7 +1211,10 @@ async fn handle_peer_view_change( Some(ProspectiveParachainsMode::Disabled) => std::slice::from_ref(&added), Some(ProspectiveParachainsMode::Enabled { .. }) => state .implicit_view - .known_allowed_relay_parents_under(&added, state.collating_on) + .as_ref() + .and_then(|implicit_view| { + implicit_view.known_allowed_relay_parents_under(&added, state.collating_on) + }) .unwrap_or_default(), None => { gum::trace!( @@ -1353,21 +1352,22 @@ where state.per_relay_parent.insert(*leaf, PerRelayParent::new(mode)); if mode.is_enabled() { - state - .implicit_view - .activate_leaf(sender, *leaf) - .await - .map_err(Error::ImplicitViewFetchError)?; + if let Some(ref mut implicit_view) = state.implicit_view { + implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; - let allowed_ancestry = state - .implicit_view - .known_allowed_relay_parents_under(leaf, state.collating_on) - .unwrap_or_default(); - for block_hash in allowed_ancestry { - state - .per_relay_parent - .entry(*block_hash) - .or_insert_with(|| PerRelayParent::new(mode)); + let allowed_ancestry = implicit_view + .known_allowed_relay_parents_under(leaf, state.collating_on) + .unwrap_or_default(); + + for block_hash in allowed_ancestry { + state + .per_relay_parent + .entry(*block_hash) + .or_insert_with(|| PerRelayParent::new(mode)); + } } } } @@ -1378,7 +1378,11 @@ where // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. let pruned = if mode.is_enabled() { - state.implicit_view.deactivate_leaf(*leaf) + state + .implicit_view + .as_mut() + .map(|view| view.deactivate_leaf(*leaf)) + .unwrap_or_default() } else { vec![*leaf] }; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 689e03ce4737bcdd39d0651009f212e3cb364eea..412792bbecfbabb1644537e2b5a470d6f32ee0b4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -144,7 +144,6 @@ impl Default for TestState { impl TestState { /// Adds a few more scheduled cores to the state for the same para id /// compared to the default. - #[cfg(feature = "elastic-scaling-experimental")] pub fn with_elastic_scaling() -> Self { let mut state = Self::default(); let para_id = state.para_id; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 707053545630a0a11603ccda030200eed7b5cfe2..0a0a85fb1f2750a54c909b523ab54564652d3354 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -18,7 +18,7 @@ use super::*; -use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; +use polkadot_node_subsystem::messages::ChainApiMessage; use polkadot_primitives::{AsyncBackingParams, Header, OccupiedCore}; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = @@ -31,7 +31,6 @@ fn get_parent_hash(hash: Hash) -> Hash { /// Handle a view update. async fn update_view( virtual_overseer: &mut VirtualOverseer, - test_state: &TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. activated: u8, // How many new heads does this update contain? ) { @@ -61,21 +60,88 @@ async fn update_view( let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), - ) if parent == leaf_hash => { - tx.send(vec![(test_state.para_id, min_number)]).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) .take(ancestry_len as usize); let ancestry_numbers = (min_number..=leaf_number).rev(); let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + if let Some((hash, number)) = ancestry_iter.next() { + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::ChainApi(ChainApiMessage::BlockHeader(.., tx)) => { + let header = Header { + parent_hash: get_parent_hash(hash), + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::AsyncBackingParams( + tx + ) + ) + ) => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + tx.send(Ok(1)).unwrap(); + } + ); + + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::ChainApi( + ChainApiMessage::Ancestors { + k, + response_channel: tx, + .. + } + ) => { + assert_eq!(k, ASYNC_BACKING_PARAMETERS.allowed_ancestry_len as usize); + + tx.send(Ok(ancestry_hashes.clone().skip(1).into_iter().collect())).unwrap(); + } + ); + } + + for _ in ancestry_iter.clone() { + assert_matches!( + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + .., + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + tx.send(Ok(1)).unwrap(); + } + ); + } + while let Some((hash, number)) = ancestry_iter.next() { // May be `None` for the last element. let parent_hash = @@ -195,7 +261,7 @@ fn distribute_collation_from_implicit_view() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `b`, but the collation will be based on `c`. - update_view(virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(virtual_overseer, vec![(head_b, head_b_num)], 1).await; let validator_peer_ids = test_state.current_group_validator_peer_ids(); for (val, peer) in test_state @@ -258,7 +324,7 @@ fn distribute_collation_from_implicit_view() { // Head `c` goes out of view. // Build a different candidate for this relay parent and attempt to distribute it. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; let parent_head_data_hash = Hash::repeat_byte(0xBB); @@ -318,7 +384,7 @@ fn distribute_collation_up_to_limit() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { let pov = PoV { block_data: BlockData(vec![i as u8]) }; @@ -377,7 +443,6 @@ fn distribute_collation_up_to_limit() { /// Tests that collator send the parent head data in /// case the para is assigned to multiple cores (elastic scaling). #[test] -#[cfg(feature = "elastic-scaling-experimental")] fn send_parent_head_data_for_elastic_scaling() { let test_state = TestState::with_elastic_scaling(); @@ -402,7 +467,7 @@ fn send_parent_head_data_for_elastic_scaling() { CollatorProtocolMessage::CollateOn(test_state.para_id), ) .await; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await; let pov_data = PoV { block_data: BlockData(vec![1 as u8]) }; let candidate = TestCandidateBuilder { @@ -517,8 +582,8 @@ fn advertise_and_send_collation_by_hash() { CollatorProtocolMessage::CollateOn(test_state.para_id), ) .await; - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; - update_view(&mut virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, vec![(head_a, head_a_num)], 1).await; let candidates: Vec<_> = (0..2) .map(|i| { @@ -638,7 +703,7 @@ fn advertise_core_occupied() { overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) .await; // Activated leaf is `a`, but the collation will be based on `b`. - update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let candidate = TestCandidateBuilder { diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 8c3889a3554865c919f2eb33a8f86cce15317ff3..001df1fb3da9b24a3c1acffc049cc7433903aea8 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -121,19 +121,15 @@ impl PendingCollation { } } -/// v2 or v3 advertisement that was rejected by the backing -/// subsystem. Validator may fetch it later if its fragment -/// membership gets recognized before relay parent goes out of view. -#[derive(Debug, Clone)] -pub struct BlockedAdvertisement { - /// Peer that advertised the collation. - pub peer_id: PeerId, - /// Collator id. - pub collator_id: CollatorId, - /// The relay-parent of the candidate. - pub candidate_relay_parent: Hash, - /// Hash of the candidate. - pub candidate_hash: CandidateHash, +/// An identifier for a fetched collation that was blocked from being seconded because we don't have +/// access to the parent's HeadData. Can be retried once the candidate outputting this head data is +/// seconded. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct BlockedCollationId { + /// Para id. + pub para_id: ParaId, + /// Hash of the parent head data. + pub parent_head_data_hash: Hash, } /// Performs a sanity check between advertised and fetched collations. diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index ac8c060827f5a4519cd8e4930d1447a8bf4cbe47..9f037a983e51c33cb734ee65fc496541d1082bf2 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -59,15 +59,17 @@ use polkadot_primitives::{ use crate::error::{Error, FetchError, Result, SecondingError}; +use self::collation::BlockedCollationId; + use super::{modify_reputation, tick_stream, LOG_TARGET}; mod collation; mod metrics; use collation::{ - fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationFetchError, - CollationFetchRequest, CollationStatus, Collations, FetchedCollation, PendingCollation, - PendingCollationFetch, ProspectiveCandidate, + fetched_collation_sanity_check, CollationEvent, CollationFetchError, CollationFetchRequest, + CollationStatus, Collations, FetchedCollation, PendingCollation, PendingCollationFetch, + ProspectiveCandidate, }; #[cfg(test)] @@ -388,7 +390,7 @@ struct State { /// `active_leaves`, the opposite doesn't hold true. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. In + /// never included in the fragment chains of active leaves which do. In /// particular, this means that if a given relay parent belongs to implicit /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, @@ -421,14 +423,6 @@ struct State { /// Span per relay parent. span_per_relay_parent: HashMap, - /// Advertisements that were accepted as valid by collator protocol but rejected by backing. - /// - /// It's only legal to fetch collations that are either built on top of the root - /// of some fragment tree or have a parent node which represents backed candidate. - /// Otherwise, a validator will keep such advertisement in the memory and re-trigger - /// requests to backing on new backed candidates and activations. - blocked_advertisements: HashMap<(ParaId, Hash), Vec>, - /// When a timer in this `FuturesUnordered` triggers, we should dequeue the next request /// attempt in the corresponding `collations_per_relay_parent`. /// @@ -441,6 +435,12 @@ struct State { /// on validation. fetched_candidates: HashMap, + /// Collations which we haven't been able to second due to their parent not being known by + /// prospective-parachains. Mapped from the paraid and parent_head_hash to the fetched + /// collation data. Only needed for async backing. For elastic scaling, the fetched collation + /// must contain the full parent head data. + blocked_from_seconding: HashMap>, + /// Aggregated reputation change reputation: ReputationAggregator, } @@ -953,6 +953,8 @@ enum AdvertisementError { /// Advertisement is invalid. #[allow(dead_code)] Invalid(InsertAdvertisementError), + /// Seconding not allowed by backing subsystem + BlockedByBacking, } impl AdvertisementError { @@ -962,7 +964,7 @@ impl AdvertisementError { InvalidAssignment => Some(COST_WRONG_PARA), ProtocolMisuse => Some(COST_PROTOCOL_MISUSE), RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE), - UnknownPeer | SecondedLimitReached => None, + UnknownPeer | SecondedLimitReached | BlockedByBacking => None, } } } @@ -1001,57 +1003,55 @@ where }) } -/// Checks whether any of the advertisements are unblocked and attempts to fetch them. -async fn request_unblocked_collations(sender: &mut Sender, state: &mut State, blocked: I) -where - Sender: CollatorProtocolSenderTrait, - I: IntoIterator)>, -{ - let _timer = state.metrics.time_request_unblocked_collations(); +// Try seconding any collations which were waiting on the validation of their parent +#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] +async fn second_unblocked_collations( + ctx: &mut Context, + state: &mut State, + para_id: ParaId, + head_data: HeadData, + head_data_hash: Hash, +) { + if let Some(unblocked_collations) = state + .blocked_from_seconding + .remove(&BlockedCollationId { para_id, parent_head_data_hash: head_data_hash }) + { + if !unblocked_collations.is_empty() { + gum::debug!( + target: LOG_TARGET, + "Candidate outputting head data with hash {} unblocked {} collations for seconding.", + head_data_hash, + unblocked_collations.len() + ); + } - for (key, mut value) in blocked { - let (para_id, para_head) = key; - let blocked = std::mem::take(&mut value); - for blocked in blocked { - let is_seconding_allowed = can_second( - sender, - para_id, - blocked.candidate_relay_parent, - blocked.candidate_hash, - para_head, - ) - .await; + for mut unblocked_collation in unblocked_collations { + unblocked_collation.maybe_parent_head_data = Some(head_data.clone()); + let peer_id = unblocked_collation.collation_event.pending_collation.peer_id; + let relay_parent = unblocked_collation.candidate_receipt.descriptor.relay_parent; - if is_seconding_allowed { - let result = enqueue_collation( - sender, - state, - blocked.candidate_relay_parent, - para_id, - blocked.peer_id, - blocked.collator_id, - Some((blocked.candidate_hash, para_head)), - ) - .await; - if let Err(fetch_error) = result { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?blocked.candidate_relay_parent, - para_id = ?para_id, - peer_id = ?blocked.peer_id, - error = %fetch_error, - "Failed to request unblocked collation", - ); + if let Err(err) = kick_off_seconding(ctx, state, unblocked_collation).await { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?peer_id, + error = %err, + "Seconding aborted due to an error", + ); + + if err.is_malicious() { + // Report malicious peer. + modify_reputation( + &mut state.reputation, + ctx.sender(), + peer_id, + COST_REPORT_BAD, + ) + .await; } - } else { - // Keep the advertisement. - value.push(blocked); } } - - if !value.is_empty() { - state.blocked_advertisements.insert(key, value); - } } } @@ -1110,10 +1110,10 @@ where } if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { - // We need to queue the advertisement if we are not allowed to second it. + // Check if backing subsystem allows to second this candidate. // - // This is also only important when async backing is enabled. - let queue_advertisement = relay_parent_mode.is_enabled() && + // This is also only important when async backing or elastic scaling is enabled. + let seconding_not_allowed = relay_parent_mode.is_enabled() && !can_second( sender, collator_para_id, @@ -1123,26 +1123,8 @@ where ) .await; - if queue_advertisement { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - para_id = ?para_id, - ?candidate_hash, - "Seconding is not allowed by backing, queueing advertisement", - ); - state - .blocked_advertisements - .entry((collator_para_id, parent_head_data_hash)) - .or_default() - .push(BlockedAdvertisement { - peer_id, - collator_id: collator_id.clone(), - candidate_relay_parent: relay_parent, - candidate_hash, - }); - - return Ok(()) + if seconding_not_allowed { + return Err(AdvertisementError::BlockedByBacking) } } @@ -1358,20 +1340,17 @@ where state.span_per_relay_parent.remove(&removed); } } - // Remove blocked advertisements that left the view. - state.blocked_advertisements.retain(|_, ads| { - ads.retain(|ad| state.per_relay_parent.contains_key(&ad.candidate_relay_parent)); - !ads.is_empty() + // Remove blocked seconding requests that left the view. + state.blocked_from_seconding.retain(|_, collations| { + collations.retain(|collation| { + state + .per_relay_parent + .contains_key(&collation.candidate_receipt.descriptor.relay_parent) + }); + + !collations.is_empty() }); - // Re-trigger previously failed requests again. - // - // This makes sense for several reasons, one simple example: if a hypothetical depth - // for an advertisement initially exceeded the limit and the candidate was included - // in a new leaf. - let maybe_unblocked = std::mem::take(&mut state.blocked_advertisements); - // Could be optimized to only sanity check new leaves. - request_unblocked_collations(sender, state, maybe_unblocked).await; for (peer_id, peer_data) in state.peer_data.iter_mut() { peer_data.prune_old_advertisements( @@ -1508,6 +1487,8 @@ async fn process_msg( return }, }; + let output_head_data = receipt.commitments.head_data.clone(); + let output_head_data_hash = receipt.descriptor.para_head; let fetched_collation = FetchedCollation::from(&receipt.to_plain()); if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) @@ -1536,6 +1517,17 @@ async fn process_msg( rp_state.collations.status = CollationStatus::Seconded; rp_state.collations.note_seconded(); } + + // See if we've unblocked other collations for seconding. + second_unblocked_collations( + ctx, + state, + fetched_collation.para_id, + output_head_data, + output_head_data_hash, + ) + .await; + // If async backing is enabled, make an attempt to fetch next collation. let maybe_candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); @@ -1554,11 +1546,13 @@ async fn process_msg( ); } }, - Backed { para_id, para_head } => { - let maybe_unblocked = state.blocked_advertisements.remove_entry(&(para_id, para_head)); - request_unblocked_collations(ctx.sender(), state, maybe_unblocked).await; - }, Invalid(parent, candidate_receipt) => { + // Remove collations which were blocked from seconding and had this candidate as parent. + state.blocked_from_seconding.remove(&BlockedCollationId { + para_id: candidate_receipt.descriptor.para_id, + parent_head_data_hash: candidate_receipt.descriptor.para_head, + }); + let fetched_collation = FetchedCollation::from(&candidate_receipt); let candidate_hash = fetched_collation.candidate_hash; let id = match state.fetched_candidates.entry(fetched_collation) { @@ -1668,29 +1662,45 @@ async fn run_inner( }; let CollationEvent {collator_id, pending_collation, .. } = res.collation_event.clone(); - if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await { - gum::warn!( - target: LOG_TARGET, - relay_parent = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - error = %err, - "Seconding aborted due to an error", - ); - if err.is_malicious() { - // Report malicious peer. - modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await; + match kick_off_seconding(&mut ctx, &mut state, res).await { + Err(err) => { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + error = %err, + "Seconding aborted due to an error", + ); + + if err.is_malicious() { + // Report malicious peer. + modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await; + } + let maybe_candidate_hash = + pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + pending_collation.relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; + }, + Ok(false) => { + // No hard error occurred, but we can try fetching another collation. + let maybe_candidate_hash = + pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + pending_collation.relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; } - let maybe_candidate_hash = - pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - dequeue_next_collation_and_fetch( - &mut ctx, - &mut state, - pending_collation.relay_parent, - (collator_id, maybe_candidate_hash), - ) - .await; + Ok(true) => {} } } res = state.collation_fetch_timeouts.select_next_some() => { @@ -1800,12 +1810,13 @@ where } /// Handle a fetched collation result. +/// Returns whether or not seconding has begun. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn kick_off_seconding( ctx: &mut Context, state: &mut State, PendingCollationFetch { mut collation_event, candidate_receipt, pov, maybe_parent_head_data }: PendingCollationFetch, -) -> std::result::Result<(), SecondingError> { +) -> std::result::Result { let pending_collation = collation_event.pending_collation; let relay_parent = pending_collation.relay_parent; @@ -1818,7 +1829,7 @@ async fn kick_off_seconding( relay_parent = ?relay_parent, "Fetched collation for a parent out of view", ); - return Ok(()) + return Ok(false) }, }; let collations = &mut per_relay_parent.collations; @@ -1828,7 +1839,7 @@ async fn kick_off_seconding( collation_event.pending_collation.commitments_hash = Some(candidate_receipt.commitments_hash); - let (maybe_pvd, maybe_parent_head_and_hash) = match ( + let (maybe_pvd, maybe_parent_head, maybe_parent_head_hash) = match ( collation_event.collator_protocol_version, collation_event.pending_collation.prospective_candidate, ) { @@ -1844,7 +1855,7 @@ async fn kick_off_seconding( ) .await?; - (pvd, maybe_parent_head_data.map(|head_data| (head_data, parent_head_data_hash))) + (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, // Support V2 collators without async backing enabled. (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => { @@ -1854,20 +1865,60 @@ async fn kick_off_seconding( candidate_receipt.descriptor().para_id, ) .await?; - (pvd, None) + ( + Some(pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?), + maybe_parent_head_data, + None, + ) }, _ => { // `handle_advertisement` checks for protocol mismatch. - return Ok(()) + return Ok(false) + }, + }; + + let pvd = match (maybe_pvd, maybe_parent_head.clone(), maybe_parent_head_hash) { + (Some(pvd), _, _) => pvd, + (None, None, Some(parent_head_data_hash)) => { + // In this case, the collator did not supply the head data and neither could + // prospective-parachains. We add this to the blocked_from_seconding collection + // until we second its parent. + let blocked_collation = PendingCollationFetch { + collation_event, + candidate_receipt, + pov, + maybe_parent_head_data: None, + }; + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?blocked_collation.candidate_receipt.hash(), + relay_parent = ?blocked_collation.candidate_receipt.descriptor.relay_parent, + "Collation having parent head data hash {} is blocked from seconding. Waiting on its parent to be validated.", + parent_head_data_hash + ); + state + .blocked_from_seconding + .entry(BlockedCollationId { + para_id: blocked_collation.candidate_receipt.descriptor.para_id, + parent_head_data_hash, + }) + .or_insert_with(Vec::new) + .push(blocked_collation); + + return Ok(false) + }, + (None, _, _) => { + // Even though we already have the parent head data, the pvd fetching failed. We + // don't need to wait for seconding another collation outputting this head data. + return Err(SecondingError::PersistedValidationDataNotFound) }, }; - let pvd = maybe_pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?; fetched_collation_sanity_check( &collation_event.pending_collation, &candidate_receipt, &pvd, - maybe_parent_head_and_hash, + maybe_parent_head.and_then(|head| maybe_parent_head_hash.map(|hash| (head, hash))), )?; ctx.send_message(CandidateBackingMessage::Second( @@ -1882,7 +1933,7 @@ async fn kick_off_seconding( collations.status = CollationStatus::WaitingOnValidation; entry.insert(collation_event); - Ok(()) + Ok(true) } else { Err(SecondingError::Duplicate) } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index eaa725f2642ed38e3a6f222f9624dd6e2bc4fcce..178dcb85e035f05c4136f7d7d7e2433a3713d00c 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -23,6 +23,7 @@ use polkadot_primitives::{ AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, SigningContext, ValidatorId, }; +use rstest::rstest; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -115,15 +116,6 @@ pub(super) async fn update_view( let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), - ) if parent == leaf_hash => { - tx.send(test_state.chain_ids.iter().map(|para_id| (*para_id, min_number)).collect()).unwrap(); - } - ); - let ancestry_len = leaf_number + 1 - min_number; let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) .take(ancestry_len as usize); @@ -165,6 +157,17 @@ pub(super) async fn update_view( } ); + if requested_len == 0 { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), + ) if parent == leaf_hash => { + tx.send(test_state.chain_ids.iter().map(|para_id| (*para_id, min_number)).collect()).unwrap(); + } + ); + } + requested_len += 1; } } @@ -262,6 +265,48 @@ async fn assert_collation_seconded( } } +/// Assert that the next message is a persisted validation data request and respond with the +/// supplied PVD. +async fn assert_persisted_validation_data( + virtual_overseer: &mut VirtualOverseer, + version: CollationVersion, + expected_relay_parent: Hash, + expected_para_id: ParaId, + expected_parent_head_data_hash: Option, + pvd: Option, +) { + // Depending on relay parent mode pvd will be either requested + // from the Runtime API or Prospective Parachains. + let msg = overseer_recv(virtual_overseer).await; + match version { + CollationVersion::V1 => assert_matches!( + msg, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) => { + assert_eq!(expected_relay_parent, hash); + assert_eq!(expected_para_id, para_id); + assert_eq!(OccupiedCoreAssumption::Free, assumption); + tx.send(Ok(pvd)).unwrap(); + } + ), + CollationVersion::V2 => assert_matches!( + msg, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), + ) => { + assert_eq!(expected_relay_parent, request.candidate_relay_parent); + assert_eq!(expected_para_id, request.para_id); + if let Some(expected_parent_head_data_hash) = expected_parent_head_data_hash { + assert_eq!(expected_parent_head_data_hash, request.parent_head_data.hash()); + } + tx.send(pvd).unwrap(); + } + ), + } +} + #[test] fn v1_advertisement_accepted_and_seconded() { let test_state = TestState::default(); @@ -946,56 +991,73 @@ fn advertisement_spam_protection() { }); } -#[test] -fn backed_candidate_unblocks_advertisements() { +#[rstest] +#[case(true)] +#[case(false)] +fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; + let TestHarness { mut virtual_overseer, keystore } = test_harness; - let pair_a = CollatorPair::generate().0; - let pair_b = CollatorPair::generate().0; + let pair = CollatorPair::generate().0; + // Grandparent of head `a`. let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; - let head_c = get_parent_hash(head_b); // Grandparent of head `b`. - // Group rotation frequency is 1 by default, at `d` we're assigned + // Group rotation frequency is 1 by default, at `c` we're assigned // to the first para. - let head_d = get_parent_hash(head_c); + let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - // Accept both collators from the implicit view. connect_and_declare_collator( &mut virtual_overseer, peer_a, - pair_a.clone(), + pair.clone(), test_state.chain_ids[0], CollationVersion::V2, ) .await; - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair_b.clone(), - test_state.chain_ids[1], - CollationVersion::V2, - ) - .await; - let candidate_hash = CandidateHash::default(); - let parent_head_data_hash = Hash::zero(); + // Candidate A transitions from head data 0 to 1. + // Candidate B transitions from head data 1 to 2. + + // Candidate B is advertised and fetched before candidate A. + + let mut candidate_b = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate_b.descriptor.para_id = test_state.chain_ids[0]; + candidate_b.descriptor.para_head = HeadData(vec![2]).hash(); + candidate_b.descriptor.persisted_validation_data_hash = + PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } + .hash(); + let candidate_b_commitments = CandidateCommitments { + head_data: HeadData(vec![2]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate_b.commitments_hash = candidate_b_commitments.hash(); + + let candidate_b_hash = candidate_b.hash(); + advertise_collation( &mut virtual_overseer, - peer_b, + peer_a, head_c, - Some((candidate_hash, parent_head_data_hash)), + Some((candidate_b_hash, HeadData(vec![1]).hash())), ) .await; assert_matches!( @@ -1003,40 +1065,73 @@ fn backed_candidate_unblocks_advertisements() { AllMessages::CandidateBacking( CandidateBackingMessage::CanSecond(request, tx), ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[1]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - // Reject it. - tx.send(false).expect("receiving side should be alive"); + assert_eq!(request.candidate_hash, candidate_b_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, HeadData(vec![1]).hash()); + tx.send(true).expect("receiving side should be alive"); } ); - // Advertise with different para. - advertise_collation( + let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - peer_a, - head_d, // Note different relay parent. - Some((candidate_hash, parent_head_data_hash)), + head_c, + test_state.chain_ids[0], + Some(candidate_b_hash), ) .await; - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(false).expect("receiving side should be alive"); + + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate_b.clone(), + PoV { block_data: BlockData(vec![1]) }, + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + // Persisted validation data of candidate B is not found. + assert_persisted_validation_data( + &mut virtual_overseer, + CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![1]).hash()), + None, + ) + .await; + + // Now advertise, fetch and validate candidate A, which is the parent of B. + + let mut candidate_a = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate_a.descriptor.para_id = test_state.chain_ids[0]; + candidate_a.descriptor.para_head = HeadData(vec![1]).hash(); + candidate_a.descriptor.persisted_validation_data_hash = + PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), } - ); + .hash(); + let candidate_a_commitments = CandidateCommitments { + head_data: HeadData(vec![1]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate_a.commitments_hash = candidate_a_commitments.hash(); + + let candidate_a_hash = candidate_a.hash(); - overseer_send( + advertise_collation( &mut virtual_overseer, - CollatorProtocolMessage::Backed { - para_id: test_state.chain_ids[0], - para_head: parent_head_data_hash, - }, + peer_a, + head_c, + Some((candidate_a_hash, HeadData(vec![0]).hash())), ) .await; assert_matches!( @@ -1044,174 +1139,155 @@ fn backed_candidate_unblocks_advertisements() { AllMessages::CandidateBacking( CandidateBackingMessage::CanSecond(request, tx), ) => { - assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_hash, candidate_a_hash); assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + assert_eq!(request.parent_head_data_hash, HeadData(vec![0]).hash()); tx.send(true).expect("receiving side should be alive"); } ); - assert_fetch_collation_request( + + let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - head_d, + head_c, test_state.chain_ids[0], - Some(candidate_hash), + Some(candidate_a_hash), ) .await; - virtual_overseer - }); -} -#[test] -fn active_leave_unblocks_advertisements() { - let mut test_state = TestState::default(); - test_state.group_rotation_info.group_rotation_frequency = 100; + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate_a.clone(), + PoV { block_data: BlockData(vec![2]) }, + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; + assert_persisted_validation_data( + &mut virtual_overseer, + CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![0]).hash()), + Some(PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }), + ) + .await; - let head_b = Hash::from_low_u64_be(128); - let head_b_num: u32 = 0; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(head_c, relay_parent); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(PoV { block_data: BlockData(vec![2]) }, incoming_pov); + assert_eq!(PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }, received_pvd); + candidate_receipt + } + ); - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + // If candidate A is valid, proceed with seconding B. + if valid_parent { + send_seconded_statement( + &mut virtual_overseer, + keystore.clone(), + &CommittedCandidateReceipt { + descriptor: candidate_a.descriptor, + commitments: candidate_a_commitments, + }, + ) + .await; - let peers: Vec = (0..3).map(|_| CollatorPair::generate().0).collect(); - let peer_ids: Vec = (0..3).map(|_| PeerId::random()).collect(); - let candidates: Vec = - (0u8..3).map(|i| CandidateHash(Hash::repeat_byte(i))).collect(); + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; - for (peer, peer_id) in peers.iter().zip(&peer_ids) { - connect_and_declare_collator( + // Now that candidate A has been seconded, candidate B can be seconded as well. + + assert_persisted_validation_data( &mut virtual_overseer, - *peer_id, - peer.clone(), - test_state.chain_ids[0], CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![1]).hash()), + Some(PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }), ) .await; - } - let parent_head_data_hash = Hash::zero(); - for (peer, candidate) in peer_ids.iter().zip(&candidates).take(2) { - advertise_collation( + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(head_c, relay_parent); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(PoV { block_data: BlockData(vec![1]) }, incoming_pov); + assert_eq!(PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }, received_pvd); + candidate_receipt + } + ); + + send_seconded_statement( + &mut virtual_overseer, + keystore.clone(), + &CommittedCandidateReceipt { + descriptor: candidate_b.descriptor, + commitments: candidate_b_commitments, + }, + ) + .await; + + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; + } else { + // If candidate A is invalid, B won't be seconded. + overseer_send( &mut virtual_overseer, - *peer, - head_b, - Some((*candidate, parent_head_data_hash)), + CollatorProtocolMessage::Invalid(head_c, candidate_a), ) .await; assert_matches!( overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), ) => { - assert_eq!(request.candidate_hash, *candidate); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - // Send false. - tx.send(false).expect("receiving side should be alive"); + assert_eq!(peer, peer_a); + assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); } ); } - let head_c = Hash::from_low_u64_be(127); - let head_c_num: u32 = 1; - - let next_overseer_message = - update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1) - .await - .expect("should've sent request to backing"); - - // Unblock first request. - assert_matches!( - next_overseer_message, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[0]); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(true).expect("receiving side should be alive"); - } - ); - - assert_fetch_collation_request( - &mut virtual_overseer, - head_b, - test_state.chain_ids[0], - Some(candidates[0]), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[1]); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(false).expect("receiving side should be alive"); - } - ); - - // Collation request was discarded. test_helpers::Yield::new().await; assert_matches!(virtual_overseer.recv().now_or_never(), None); - advertise_collation( - &mut virtual_overseer, - peer_ids[2], - head_c, - Some((candidates[2], parent_head_data_hash)), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[2]); - tx.send(false).expect("receiving side should be alive"); - } - ); - - let head_d = Hash::from_low_u64_be(126); - let head_d_num: u32 = 2; - - let next_overseer_message = - update_view(&mut virtual_overseer, &test_state, vec![(head_d, head_d_num)], 1) - .await - .expect("should've sent request to backing"); - - // Reject 2, accept 3. - assert_matches!( - next_overseer_message, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[1]); - tx.send(false).expect("receiving side should be alive"); - } - ); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[2]); - tx.send(true).expect("receiving side should be alive"); - } - ); - assert_fetch_collation_request( - &mut virtual_overseer, - head_c, - test_state.chain_ids[0], - Some(candidates[2]), - ) - .await; - virtual_overseer }); } diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index ff9c302c73146d501114728c4a483e4465d199a9..dff285590d97cd4f47a00a6141b5c8a07315e35e 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } derive_more = "0.99.17" -parity-scale-codec = { version = "3.6.1", features = ["std"] } +parity-scale-codec = { version = "3.6.12", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-node-subsystem = { path = "../../subsystem" } @@ -25,7 +25,7 @@ sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" schnellru = "0.2.1" indexmap = "2.0.0" diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs index 2b3fc45983a98f67954a16fd342d76ac8068ae07..2409e6994f604ecfa905c5300fe7d59c6d9974fa 100644 --- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs @@ -132,7 +132,7 @@ enum MuxedMessage { /// A new request has arrived and should be handled. NewRequest(IncomingRequest), - /// Rate limit timer hit - is is time to process one row of messages. + /// Rate limit timer hit - is time to process one row of messages. /// /// This is the result of calling `self.peer_queues.pop_reqs()`. WakePeerQueuesPopReqs(Vec>), diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 0408e673791114793da1b231baa7448f185b0db6..c5015b8c64504b2712c9713463ab78af2f265948 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -16,7 +16,7 @@ hex = "0.4.3" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-jaeger = { path = "../../jaeger" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sc-network = { path = "../../../../substrate/client/network" } sc-network-types = { path = "../../../../substrate/client/network/types" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } @@ -24,7 +24,7 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime" } strum = { version = "0.26.2", features = ["derive"] } futures = "0.3.30" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" rand = "0.8" derive_more = "0.99" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index cab02bb88a00b429a80ce25e342fd38e25b3f3d5..fe06593bd7a0f58202471483cc06980ee35a2926 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -98,6 +98,10 @@ pub enum Protocol { /// Protocol for requesting candidates with attestations in statement distribution /// when async backing is enabled. AttestedCandidateV2, + + /// Protocol for chunk fetching version 2, used by availability distribution and availability + /// recovery. + ChunkFetchingV2, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -209,7 +213,7 @@ impl Protocol { let name = req_protocol_names.get_name(self); let legacy_names = self.get_legacy_name().into_iter().map(Into::into).collect(); match self { - Protocol::ChunkFetchingV1 => N::request_response_config( + Protocol::ChunkFetchingV1 | Protocol::ChunkFetchingV2 => N::request_response_config( name, legacy_names, 1_000, @@ -292,7 +296,7 @@ impl Protocol { // times (due to network delays), 100 seems big enough to accommodate for "bursts", // assuming we can service requests relatively quickly, which would need to be measured // as well. - Protocol::ChunkFetchingV1 => 100, + Protocol::ChunkFetchingV1 | Protocol::ChunkFetchingV2 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::CollationFetchingV1 | Protocol::CollationFetchingV2 => 10, // 10 seems reasonable, considering group sizes of max 10 validators. @@ -362,6 +366,7 @@ impl Protocol { // Introduced after legacy names became legacy. Protocol::AttestedCandidateV2 => None, Protocol::CollationFetchingV2 => None, + Protocol::ChunkFetchingV2 => None, } } } @@ -412,6 +417,7 @@ impl ReqProtocolNames { }; let short_name = match protocol { + // V1: Protocol::ChunkFetchingV1 => "/req_chunk/1", Protocol::CollationFetchingV1 => "/req_collation/1", Protocol::PoVFetchingV1 => "/req_pov/1", @@ -419,8 +425,10 @@ impl ReqProtocolNames { Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", + // V2: Protocol::CollationFetchingV2 => "/req_collation/2", Protocol::AttestedCandidateV2 => "/req_attested_candidate/2", + Protocol::ChunkFetchingV2 => "/req_chunk/2", }; format!("{}{}", prefix, short_name).into() diff --git a/polkadot/node/network/protocol/src/request_response/outgoing.rs b/polkadot/node/network/protocol/src/request_response/outgoing.rs index 96ef4a6ab25dcc13949e6b3dfbc0e8c6856393f5..f578c4ffded34b7ee20a1398925e34b0c7a02bc3 100644 --- a/polkadot/node/network/protocol/src/request_response/outgoing.rs +++ b/polkadot/node/network/protocol/src/request_response/outgoing.rs @@ -30,7 +30,7 @@ use super::{v1, v2, IsRequest, Protocol}; #[derive(Debug)] pub enum Requests { /// Request an availability chunk from a node. - ChunkFetchingV1(OutgoingRequest), + ChunkFetching(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. CollationFetchingV1(OutgoingRequest), /// Fetch a PoV from a validator which previously sent out a seconded statement. @@ -59,7 +59,7 @@ impl Requests { /// contained in the `enum`. pub fn encode_request(self) -> (Protocol, OutgoingRequest>) { match self { - Self::ChunkFetchingV1(r) => r.encode_request(), + Self::ChunkFetching(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), Self::CollationFetchingV2(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), @@ -164,24 +164,20 @@ where /// /// Returns a raw `Vec` response over the channel. Use the associated `ProtocolName` to know /// which request was the successful one and appropriately decode the response. - // WARNING: This is commented for now because it's not used yet. - // If you need it, make sure to test it. You may need to enable the V1 substream upgrade - // protocol, unless libp2p was in the meantime updated to a version that fixes the problem - // described in https://github.com/libp2p/rust-libp2p/issues/5074 - // pub fn new_with_fallback( - // peer: Recipient, - // payload: Req, - // fallback_request: FallbackReq, - // ) -> (Self, impl Future, ProtocolName)>>) { - // let (tx, rx) = oneshot::channel(); - // let r = Self { - // peer, - // payload, - // pending_response: tx, - // fallback_request: Some((fallback_request, FallbackReq::PROTOCOL)), - // }; - // (r, async { Ok(rx.await??) }) - // } + pub fn new_with_fallback( + peer: Recipient, + payload: Req, + fallback_request: FallbackReq, + ) -> (Self, impl Future, ProtocolName)>>) { + let (tx, rx) = oneshot::channel(); + let r = Self { + peer, + payload, + pending_response: tx, + fallback_request: Some((fallback_request, FallbackReq::PROTOCOL)), + }; + (r, async { Ok(rx.await??) }) + } /// Encode a request into a `Vec`. /// diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs index 60eecb69f738912ddb0240c890e2283db7f91a72..c503c6e4df03bd679242083bb4db082489bf8653 100644 --- a/polkadot/node/network/protocol/src/request_response/v1.rs +++ b/polkadot/node/network/protocol/src/request_response/v1.rs @@ -33,7 +33,8 @@ use super::{IsRequest, Protocol}; pub struct ChunkFetchingRequest { /// Hash of candidate we want a chunk for. pub candidate_hash: CandidateHash, - /// The index of the chunk to fetch. + /// The validator index we are requesting from. This must be identical to the index of the + /// chunk we'll receive. For v2, this may not be the case. pub index: ValidatorIndex, } @@ -57,6 +58,15 @@ impl From> for ChunkFetchingResponse { } } +impl From for Option { + fn from(x: ChunkFetchingResponse) -> Self { + match x { + ChunkFetchingResponse::Chunk(c) => Some(c), + ChunkFetchingResponse::NoSuchChunk => None, + } + } +} + /// Skimmed down variant of `ErasureChunk`. /// /// Instead of transmitting a full `ErasureChunk` we transmit `ChunkResponse` in @@ -80,7 +90,7 @@ impl From for ChunkResponse { impl ChunkResponse { /// Re-build an `ErasureChunk` from response and request. pub fn recombine_into_chunk(self, req: &ChunkFetchingRequest) -> ErasureChunk { - ErasureChunk { chunk: self.chunk, proof: self.proof, index: req.index } + ErasureChunk { chunk: self.chunk, proof: self.proof, index: req.index.into() } } } diff --git a/polkadot/node/network/protocol/src/request_response/v2.rs b/polkadot/node/network/protocol/src/request_response/v2.rs index 6b90c579237fbff95496035f461c5cc4c5202984..7e1a2d989168c1c8ed11d10d8871f3f7c514a64d 100644 --- a/polkadot/node/network/protocol/src/request_response/v2.rs +++ b/polkadot/node/network/protocol/src/request_response/v2.rs @@ -18,12 +18,13 @@ use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::ErasureChunk; use polkadot_primitives::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, - UncheckedSignedStatement, + UncheckedSignedStatement, ValidatorIndex, }; -use super::{IsRequest, Protocol}; +use super::{v1, IsRequest, Protocol}; use crate::v2::StatementFilter; /// Request a candidate with statements. @@ -78,3 +79,60 @@ impl IsRequest for CollationFetchingRequest { type Response = CollationFetchingResponse; const PROTOCOL: Protocol = Protocol::CollationFetchingV2; } + +/// Request an availability chunk. +#[derive(Debug, Copy, Clone, Encode, Decode)] +pub struct ChunkFetchingRequest { + /// Hash of candidate we want a chunk for. + pub candidate_hash: CandidateHash, + /// The validator index we are requesting from. This may not be identical to the index of the + /// chunk we'll receive. It's up to the caller to decide whether they need to validate they got + /// the chunk they were expecting. + pub index: ValidatorIndex, +} + +/// Receive a requested erasure chunk. +#[derive(Debug, Clone, Encode, Decode)] +pub enum ChunkFetchingResponse { + /// The requested chunk data. + #[codec(index = 0)] + Chunk(ErasureChunk), + /// Node was not in possession of the requested chunk. + #[codec(index = 1)] + NoSuchChunk, +} + +impl From> for ChunkFetchingResponse { + fn from(x: Option) -> Self { + match x { + Some(c) => ChunkFetchingResponse::Chunk(c), + None => ChunkFetchingResponse::NoSuchChunk, + } + } +} + +impl From for Option { + fn from(x: ChunkFetchingResponse) -> Self { + match x { + ChunkFetchingResponse::Chunk(c) => Some(c), + ChunkFetchingResponse::NoSuchChunk => None, + } + } +} + +impl From for ChunkFetchingRequest { + fn from(v1::ChunkFetchingRequest { candidate_hash, index }: v1::ChunkFetchingRequest) -> Self { + Self { candidate_hash, index } + } +} + +impl From for v1::ChunkFetchingRequest { + fn from(ChunkFetchingRequest { candidate_hash, index }: ChunkFetchingRequest) -> Self { + Self { candidate_hash, index } + } +} + +impl IsRequest for ChunkFetchingRequest { + type Response = ChunkFetchingResponse; + const PROTOCOL: Protocol = Protocol::ChunkFetchingV2; +} diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index d8ae031cbf36dabf2899e61574958ffdbde62921..65224f9e2be620cdf862a5da3b7486b17cfb6534 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -22,9 +22,9 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "2.0.0" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" bitvec = "1" [dev-dependencies] @@ -42,3 +42,13 @@ sc-network = { path = "../../../../substrate/client/network" } futures-timer = "3.0.2" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } rand_chacha = "0.3" +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[bench]] +name = "statement-distribution-regression-bench" +path = "benches/statement-distribution-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs new file mode 100644 index 0000000000000000000000000000000000000000..9cbe385e3f42ee5be7dc0e5cda44bdbc92930828 --- /dev/null +++ b/polkadot/node/network/statement-distribution/benches/statement-distribution-regression-bench.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! statement-distribution regression tests +//! +//! Statement distribution benchmark based on Kusama parameters and scale. + +use polkadot_subsystem_bench::{ + configuration::TestConfiguration, + statement::{benchmark_statement_distribution, prepare_test, TestState}, + usage::BenchmarkUsage, + utils::save_to_file, +}; +use std::io::Write; + +const BENCH_COUNT: usize = 50; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + let mut config = TestConfiguration::default(); + config.n_cores = 100; + config.n_validators = 500; + config.num_blocks = 10; + config.connectivity = 100; + config.generate_pov_sizes(); + let state = TestState::new(&config); + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, _cfgs) = prepare_test(&state, false); + env.runtime().block_on(benchmark_statement_distribution(&mut env, &state)) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/statement-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); + + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 106.4000, 0.001), + ("Sent to peers", 127.9100, 0.001), + ])); + messages.extend(average_usage.check_cpu_usage(&[("statement-distribution", 0.0390, 0.1)])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 4ca199c3378bfc80eeee9c5f2985b1ad570e5736..4d56c795f13b29949a7a3b3e7f9bfc85e89ab274 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -19,7 +19,6 @@ //! This is responsible for distributing signed statements about candidate //! validity among validators. -#![deny(unused_crate_dependencies)] #![warn(missing_docs)] use error::{log_error, FatalResult}; diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index ad56ad4a2365b94e6ced0e43ce0747d99fb6a80d..a4f2455c28401f536d449268591e2f9746a9db81 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -243,12 +243,12 @@ impl Candidates { /// Whether statements from a candidate are importable. /// /// This is only true when the candidate is known, confirmed, - /// and is importable in a fragment tree. + /// and is importable in a fragment chain. pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool { self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } - /// Note that a candidate is importable in a fragment tree indicated by the given + /// Note that a candidate is importable in a fragment chain indicated by the given /// leaf hash. pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { match candidate { diff --git a/polkadot/node/network/statement-distribution/src/v2/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs index 24d846c840e00c49446a0525b3768353f56ae524..b6e4163090c4d5fe00a22c61b50d99b74e69c8bd 100644 --- a/polkadot/node/network/statement-distribution/src/v2/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -46,10 +46,8 @@ //! - Request/response for the candidate + votes. //! - Ignore if they are inconsistent with the manifest. //! - A malicious backing group is capable of producing an unbounded number of backed candidates. -//! - We request the candidate only if the candidate has a hypothetical depth in any of our -//! fragment trees, and: -//! - the seconding validators have not seconded any other candidates at that depth in any of -//! those fragment trees +//! - We request the candidate only if the candidate is a hypothetical member in any of our +//! fragment chains, and: //! - All members of the group attempt to circulate all statements (in compact form) from the rest //! of the group on candidates that have already been backed. //! - They do this via the grid topology. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 8579ac15cbc13f5186acea86f4dab492b943c4aa..961ec45bdada037885fb5ea43858e4e1522fac2c 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -37,7 +37,7 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::{ messages::{ network_bridge_event::NewGossipTopology, CandidateBackingMessage, HypotheticalCandidate, - HypotheticalFrontierRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, + HypotheticalMembershipRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, }, overseer, ActivatedLeaf, @@ -753,7 +753,7 @@ pub(crate) async fn handle_active_leaves_update( } } - new_leaf_fragment_tree_updates(ctx, state, activated.hash).await; + new_leaf_fragment_chain_updates(ctx, state, activated.hash).await; Ok(()) } @@ -2216,7 +2216,7 @@ async fn determine_groups_per_para( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn fragment_tree_update_inner( +async fn fragment_chain_update_inner( ctx: &mut Context, state: &mut State, active_leaf_hash: Option, @@ -2230,31 +2230,34 @@ async fn fragment_tree_update_inner( }; // 2. find out which are in the frontier - let frontier = { + gum::debug!( + target: LOG_TARGET, + "Calling getHypotheticalMembership from statement distribution" + ); + let candidate_memberships = { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( - HypotheticalFrontierRequest { + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { candidates: hypotheticals, - fragment_tree_relay_parent: active_leaf_hash, - backed_in_path_only: false, + fragment_chain_relay_parent: active_leaf_hash, }, tx, )) .await; match rx.await { - Ok(frontier) => frontier, + Ok(candidate_memberships) => candidate_memberships, Err(oneshot::Canceled) => return, } }; // 3. note that they are importable under a given leaf hash. - for (hypo, membership) in frontier { - // skip parablocks outside of the frontier + for (hypo, membership) in candidate_memberships { + // skip parablocks which aren't potential candidates if membership.is_empty() { continue } - for (leaf_hash, _) in membership { + for leaf_hash in membership { state.candidates.note_importable_under(&hypo, leaf_hash); } @@ -2298,31 +2301,31 @@ async fn fragment_tree_update_inner( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_leaf_fragment_tree_updates( +async fn new_leaf_fragment_chain_updates( ctx: &mut Context, state: &mut State, leaf_hash: Hash, ) { - fragment_tree_update_inner(ctx, state, Some(leaf_hash), None, None).await + fragment_chain_update_inner(ctx, state, Some(leaf_hash), None, None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn prospective_backed_notification_fragment_tree_updates( +async fn prospective_backed_notification_fragment_chain_updates( ctx: &mut Context, state: &mut State, para_id: ParaId, para_head: Hash, ) { - fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id)), None).await + fragment_chain_update_inner(ctx, state, None, Some((para_head, para_id)), None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_confirmed_candidate_fragment_tree_updates( +async fn new_confirmed_candidate_fragment_chain_updates( ctx: &mut Context, state: &mut State, candidate: HypotheticalCandidate, ) { - fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await + fragment_chain_update_inner(ctx, state, None, None, Some(vec![candidate])).await } struct ManifestImportSuccess<'a> { @@ -2865,7 +2868,7 @@ pub(crate) async fn handle_backed_candidate_message( .await; // Search for children of the backed candidate to request. - prospective_backed_notification_fragment_tree_updates( + prospective_backed_notification_fragment_chain_updates( ctx, state, confirmed.para_id(), @@ -2956,7 +2959,8 @@ async fn apply_post_confirmation( post_confirmation.hypothetical.relay_parent(), ) .await; - new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; + new_confirmed_candidate_fragment_chain_updates(ctx, state, post_confirmation.hypothetical) + .await; } /// Dispatch pending requests for candidate data & statements. @@ -3185,8 +3189,8 @@ pub(crate) async fn handle_response( let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); - // Although the candidate is confirmed, it isn't yet on the - // hypothetical frontier of the fragment tree. Later, when it is, + // Although the candidate is confirmed, it isn't yet a + // hypothetical member of the fragment chain. Later, when it is, // we will import statements. if !confirmed.is_importable(None) { return diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 4fb033e08ce3af0724d5a8770bdb0abf942e38a5..fe51f953e244a560d7fe2e0bf414279fd0b8bf89 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -111,8 +111,8 @@ fn share_seconded_circulated_to_cluster() { ); // sharing a `Seconded` message confirms a candidate, which leads to new - // fragment tree updates. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + // fragment chain updates. + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -509,7 +509,7 @@ fn seconded_statement_leads_to_request() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -583,7 +583,7 @@ fn cluster_statements_shared_seconded_first() { .await; // result of new confirmed candidate. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer .send(FromOrchestra::Communication { @@ -717,8 +717,8 @@ fn cluster_accounts_for_implicit_view() { ); // sharing a `Seconded` message confirms a candidate, which leads to new - // fragment tree updates. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + // fragment chain updates. + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // activate new leaf, which has relay-parent in implicit view. let next_relay_parent = Hash::repeat_byte(2); @@ -855,7 +855,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { ); } - answer_expected_hypothetical_depth_request( + answer_expected_hypothetical_membership_request( &mut overseer, vec![( HypotheticalCandidate::Complete { @@ -863,7 +863,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }, - vec![(relay_parent, vec![0])], + vec![relay_parent], )], ) .await; @@ -978,7 +978,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; let next_relay_parent = Hash::repeat_byte(2); let mut next_test_leaf = state.make_dummy_leaf(next_relay_parent); @@ -996,7 +996,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }, - vec![(relay_parent, vec![0])], + vec![relay_parent], )], ) .await; @@ -1113,7 +1113,7 @@ fn ensure_seconding_limit_is_respected() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Candidate 2. @@ -1139,7 +1139,7 @@ fn ensure_seconding_limit_is_respected() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send first statement from peer A. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 9d00a92e742bb6a304004ed74eb59ea8b6c5440c..d2bf031368c14a13f5da44dd29ba28376109f9bf 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -129,7 +129,7 @@ fn backed_candidate_leads_to_advertisement() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -224,7 +224,7 @@ fn backed_candidate_leads_to_advertisement() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -384,7 +384,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -515,7 +515,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive Backed message. @@ -546,7 +546,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive a manifest about the same candidate from peer D. @@ -720,7 +720,7 @@ fn received_acknowledgements_for_locally_confirmed() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive an unexpected acknowledgement from peer D. @@ -785,7 +785,7 @@ fn received_acknowledgements_for_locally_confirmed() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive an unexpected acknowledgement from peer D. @@ -918,7 +918,7 @@ fn received_acknowledgements_for_externally_confirmed() { assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } let ack = BackedCandidateAcknowledgement { @@ -1101,7 +1101,7 @@ fn received_advertisement_after_confirmation_before_backing() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive advertisement from peer D (after confirmation but before backing). @@ -1272,9 +1272,12 @@ fn additional_statements_are_shared_after_manifest_exchange() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let membership = vec![(relay_parent, vec![0])]; - answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)]) - .await; + let membership = vec![relay_parent]; + answer_expected_hypothetical_membership_request( + &mut overseer, + vec![(hypothetical, membership)], + ) + .await; // Statements are sent to the Backing subsystem. { @@ -1338,7 +1341,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive a manifest about the same candidate from peer D. Contains different statements. @@ -1507,7 +1510,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -1574,7 +1577,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { }) .await; - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // Relay parent enters view of peer C. { @@ -1721,7 +1724,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -1816,7 +1819,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer leaves view. @@ -1982,9 +1985,12 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let membership = vec![(relay_parent, vec![0])]; - answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)]) - .await; + let membership = vec![relay_parent]; + answer_expected_hypothetical_membership_request( + &mut overseer, + vec![(hypothetical, membership)], + ) + .await; // Receive messages from Backing subsystem. { @@ -2616,7 +2622,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive conflicting advertisement from peer C after confirmation. @@ -2763,7 +2769,7 @@ fn inactive_local_participates_in_grid() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 3d987d3fc433fbf62c1b05675da864b82f019536..f9a484f47a94c37fb909cdb640b5886ec12e2b2b 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::Statement; use polkadot_node_subsystem::messages::{ - network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, - HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage, + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, HypotheticalCandidate, + HypotheticalMembership, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -539,7 +539,7 @@ async fn activate_leaf( leaf: &TestLeaf, test_state: &TestState, is_new_session: bool, - hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { let activated = new_leaf(leaf.hash, leaf.number); @@ -554,7 +554,7 @@ async fn activate_leaf( leaf, test_state, is_new_session, - hypothetical_frontier, + hypothetical_memberships, ) .await; } @@ -564,7 +564,7 @@ async fn handle_leaf_activation( leaf: &TestLeaf, test_state: &TestState, is_new_session: bool, - hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { let TestLeaf { number, @@ -586,19 +586,6 @@ async fn handle_leaf_activation( } ); - let mrp_response: Vec<(ParaId, BlockNumber)> = para_data - .iter() - .map(|(para_id, data)| (*para_id, data.min_relay_parent)) - .collect(); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) - ) if parent == *hash => { - tx.send(mrp_response).unwrap(); - } - ); - let header = Header { parent_hash: *parent_hash, number: *number, @@ -615,6 +602,19 @@ async fn handle_leaf_activation( } ); + let mrp_response: Vec<(ParaId, BlockNumber)> = para_data + .iter() + .map(|(para_id, data)| (*para_id, data.min_relay_parent)) + .collect(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == *hash => { + tx.send(mrp_response).unwrap(); + } + ); + loop { match virtual_overseer.recv().await { AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -674,18 +674,17 @@ async fn handle_leaf_activation( tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx), ) => { - assert_eq!(req.fragment_tree_relay_parent, Some(*hash)); - assert!(!req.backed_in_path_only); - for (i, (candidate, _)) in hypothetical_frontier.iter().enumerate() { + assert_eq!(req.fragment_chain_relay_parent, Some(*hash)); + for (i, (candidate, _)) in hypothetical_memberships.iter().enumerate() { assert!( req.candidates.iter().any(|c| &c == &candidate), "did not receive request for hypothetical candidate {}", i, ); } - tx.send(hypothetical_frontier).unwrap(); + tx.send(hypothetical_memberships).unwrap(); // this is the last expected runtime api call break }, @@ -727,17 +726,16 @@ async fn handle_sent_request( ); } -async fn answer_expected_hypothetical_depth_request( +async fn answer_expected_hypothetical_membership_request( virtual_overseer: &mut VirtualOverseer, - responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + responses: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx) + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx) ) => { - assert_eq!(req.fragment_tree_relay_parent, None); - assert!(!req.backed_in_path_only); + assert_eq!(req.fragment_chain_relay_parent, None); for (i, (candidate, _)) in responses.iter().enumerate() { assert!( req.candidates.iter().any(|c| &c == &candidate), diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index c9de42d2c4681b0b8cd13677964a85474b183201..38d7a10b86527c153f4a369beb8bbe86da05d582 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -169,7 +169,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -339,7 +339,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer C advertises candidate 2. @@ -411,7 +411,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer C sends an announcement for candidate 3. Should hit seconding limit for validator 1. @@ -634,7 +634,7 @@ fn peer_reported_for_not_enough_statements() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -789,7 +789,7 @@ fn peer_reported_for_duplicate_statements() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -919,7 +919,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1049,7 +1049,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1215,7 +1215,7 @@ fn disabled_validators_added_to_unwanted_mask() { assert_eq!(statement, seconded_b); } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1372,7 +1372,7 @@ fn when_validator_disabled_after_sending_the_request() { assert_eq!(statement, seconded_b); } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1475,7 +1475,7 @@ fn no_response_for_grid_request_not_meeting_quorum() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -1572,7 +1572,7 @@ fn no_response_for_grid_request_not_meeting_quorum() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } let mask = StatementFilter { @@ -1720,7 +1720,7 @@ fn disabling_works_from_the_latest_state_not_relay_parent() { if p == peer_disabled && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } activate_leaf(&mut overseer, &leaf_2, &state, false, vec![]).await; @@ -1862,7 +1862,7 @@ fn local_node_sanity_checks_incoming_requests() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Should drop requests from unknown peers. @@ -2036,7 +2036,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // Local node should respond to requests from peers in the same group // which appear to not have already seen the candidate @@ -2248,7 +2248,7 @@ fn local_node_respects_statement_mask() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -2347,7 +2347,7 @@ fn local_node_respects_statement_mask() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // `1` indicates statements NOT to request. @@ -2600,7 +2600,7 @@ fn should_delay_before_retrying_dropped_requests() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Sleep for the given amount of time. This should reset the delay for the first candidate. @@ -2691,7 +2691,7 @@ fn should_delay_before_retrying_dropped_requests() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 55a6bdb74ba73c04bc66847a1b04ff6175608fa7..87484914ef9756017aa2a5c420f5853fbabd1ed3 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -856,6 +856,7 @@ fn test_availability_recovery_msg() -> AvailabilityRecoveryMessage { dummy_candidate_receipt(dummy_hash()), Default::default(), None, + None, sender, ) } diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index a4bbd824e6712e001f1df1cd9d66f835d02822fb..526d4e480bb05d745aad137f94b611f4cf91c838 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bounded-vec = "0.7" futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 0f97250a934e0865d799fc1ea3923bbceff57a99..5f007bc8d67d9c07c5c202d8115e8a594f9ec721 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -30,13 +30,14 @@ use parity_scale_codec::{Decode, Encode, Error as CodecError, Input}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ - BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, + BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair, CommittedCandidateReceipt, CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, Id as ParaId, PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, - ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, + ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, + Randomness as BabeRandomness, }; pub use polkadot_parachain_primitives::primitives::{ @@ -58,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.11.0"; +pub const NODE_VERSION: &'static str = "1.12.0"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: @@ -639,7 +640,7 @@ pub struct ErasureChunk { /// The erasure-encoded chunk of data belonging to the candidate block. pub chunk: Vec, /// The index of this erasure-encoded chunk of data. - pub index: ValidatorIndex, + pub index: ChunkIndex, /// Proof for this chunk's branch in the Merkle tree. pub proof: Proof, } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 7c010778d50d3c1d10884562b6ea2c10fad8a272..0dfdf926b1b098c7a59e26a809eace3aeb877189 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -67,6 +67,7 @@ sp-version = { path = "../../../substrate/primitives/version" } pallet-babe = { path = "../../../substrate/frame/babe" } pallet-staking = { path = "../../../substrate/frame/staking" } pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api" } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", optional = true } frame-system = { path = "../../../substrate/frame/system" } # Substrate Other @@ -90,7 +91,7 @@ thiserror = { workspace = true } kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } parking_lot = "0.12.1" bitvec = { version = "1.0.1", optional = true } @@ -187,8 +188,18 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = ["bitvec", "westend-runtime", "westend-runtime-constants"] -rococo-native = ["bitvec", "rococo-runtime", "rococo-runtime-constants"] +westend-native = [ + "bitvec", + "frame-metadata-hash-extension", + "westend-runtime", + "westend-runtime-constants", +] +rococo-native = [ + "bitvec", + "frame-metadata-hash-extension", + "rococo-runtime", + "rococo-runtime-constants", +] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", @@ -227,7 +238,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -elastic-scaling-experimental = [ - "polkadot-collator-protocol?/elastic-scaling-experimental", -] diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index aa5a199cfee64c674e3fbe65323ea82110016e0b..dfe79fd9c5e5d920ad1da8f1cab38a6796316e97 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -14,12 +14,12 @@ "/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR", "/dns/boot-node.helikon.io/tcp/7060/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", - "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", + "/dns/kusama.bootnode.amforc.com/tcp/30001/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G", + "/dns/kusama.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWKvYf6qKaAF8UUDw3KsTwjHLnvkED23yxHbH3npMe8w4G", "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", - "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", + "/dns/boot.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", + "/dns/boot.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", "/dns/boot.metaspan.io/tcp/23015/ws/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", "/dns/boot.metaspan.io/tcp/23016/wss/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", @@ -36,7 +36,8 @@ "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", "/dns/boot-kusama.luckyfriday.io/tcp/443/wss/p2p/12D3KooWS1Lu6DmK8YHSvkErpxpcXmk14vG6y4KVEFEkd9g62PP8", - "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns4/kusama-0.boot.onfinality.io/tcp/27682/ws/p2p/12D3KooWFrwFo7ry3dEuFwhehGSSN96a5Xdzxot7SWfXeSbhELAe" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index 19eefd328994ed67c269fafc06acefaf28c751bf..e307d5213a396ee9221701e39a9b4be239f591b2 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -3,8 +3,8 @@ "id": "paseo", "chainType": "Live", "bootNodes": [ - "/dns/paseo.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWFD81HC9memUwuGMLvhDDEfmXjn6jC4n7zyNs3vToXapS", - "/dns/paseo.bootnode.amforc.com/tcp/30344/p2p/12D3KooWFD81HC9memUwuGMLvhDDEfmXjn6jC4n7zyNs3vToXapS", + "/dns/paseo.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWSdf63rZjtGdeWXpQwQwPh8K8c22upcB3B1VmqW8rxrjw", + "/dns/paseo.bootnode.amforc.com/tcp/30001/p2p/12D3KooWSdf63rZjtGdeWXpQwQwPh8K8c22upcB3B1VmqW8rxrjw", "/dns/boot.stake.plus/tcp/43334/wss/p2p/12D3KooWNhgAC3hjZHxaT52EpPFZohkCL1AHFAijqcN8xB9Rwud2", "/dns/boot.stake.plus/tcp/43333/p2p/12D3KooWNhgAC3hjZHxaT52EpPFZohkCL1AHFAijqcN8xB9Rwud2", "/dns/boot.metaspan.io/tcp/36017/wss/p2p/12D3KooWSW6nDfM3SS8rUtjMyjdszivK31bu4a1sRngGa2hFETz7", @@ -17,6 +17,8 @@ "/dns/boot.gatotech.network/tcp/35400/wss/p2p/12D3KooWEvz5Ygv3MhCUNTVQbUTVhzhvf4KKcNoe5M5YbVLPBeeW", "/dns/paseo-bootnode.turboflakes.io/tcp/30630/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", "/dns/paseo-bootnode.turboflakes.io/tcp/30730/wss/p2p/12D3KooWMjCN2CrnN71hAdehn6M2iYKeGdGbZ1A3SKhf4hxrgG9e", + "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", "/dns/paseo-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr", "/dns/paseo-boot-ng.dwellir.com/tcp/30354/p2p/12D3KooWBLLFKDGBxCwq3QmU3YwWKXUx953WwprRshJQicYu4Cfr" ], diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index bf0599f0bdc5f003ad68b13a60c0c890612cc3e1..f79b6db90fcf0cc3513f644edf468ed0c2fc2cd7 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -15,12 +15,12 @@ "/dns/boot.stake.plus/tcp/30334/wss/p2p/12D3KooWKT4ZHNxXH4icMjdrv7EwWBkfbz5duxE5sdJKKeWFYi5n", "/dns/boot-node.helikon.io/tcp/7070/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", - "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", + "/dns/polkadot.bootnode.amforc.com/tcp/30001/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr", + "/dns/polkadot.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWT2HyZx5C6BBeLbCKhYG2SqJYuiu7sLMxGzUcQBko3BMr", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", - "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", + "/dns/boot.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", + "/dns/boot.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", "/dns/boot.metaspan.io/tcp/13015/ws/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", "/dns/boot.metaspan.io/tcp/13016/wss/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 9dfc715df46de3d09d6f901f11ca30a2c0b8cc69..1bfb5ba334cef24e70b6335436ec10c77929f9e2 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -12,12 +12,12 @@ "/dns/boot.stake.plus/tcp/32334/wss/p2p/12D3KooWK8fjVoSvMq5copQYMsdYreSGPGgcMbGMgbMDPfpf3sm7", "/dns/boot-node.helikon.io/tcp/7080/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", - "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", + "/dns/westend.bootnode.amforc.com/tcp/30001/p2p/12D3KooWAPmR7rbm2axPjHzF51yvQNDM5GvWfkF5BTV44Y5vJ3ct", + "/dns/westend.bootnode.amforc.com/tcp/29999/wss/p2p/12D3KooWAPmR7rbm2axPjHzF51yvQNDM5GvWfkF5BTV44Y5vJ3ct", "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", - "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", + "/dns/boot.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", "/dns/boot.metaspan.io/tcp/33015/ws/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", "/dns/boot.metaspan.io/tcp/33016/wss/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index a0c4d3b04469b1bcf85990d4f6f62922f09bf88a..4dcff2078419c9161ec752dc269f3f6a919748a2 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -201,6 +201,7 @@ fn westend_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -215,6 +216,7 @@ fn westend_sign_call( (), (), (), + None, ), ); @@ -253,6 +255,7 @@ fn rococo_sign_call( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), ); let payload = runtime::SignedPayload::from_raw( @@ -267,6 +270,7 @@ fn rococo_sign_call( (), (), (), + None, ), ); diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 1b990af2394b6fba31b0c8ddb2c1839cb2c09fb0..c7019e3f0b22b3bf1793a41f25dc447adfa2cf70 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -70,11 +70,11 @@ pub struct Extensions { } // Generic chain spec, in case when we don't have the native runtime. -pub type GenericChainSpec = service::GenericChainSpec<(), Extensions>; +pub type GenericChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. #[cfg(feature = "westend-native")] -pub type WestendChainSpec = service::GenericChainSpec<(), Extensions>; +pub type WestendChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the westend runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. @@ -83,7 +83,7 @@ pub type WestendChainSpec = GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. #[cfg(feature = "rococo-native")] -pub type RococoChainSpec = service::GenericChainSpec<(), Extensions>; +pub type RococoChainSpec = service::GenericChainSpec; /// The `ChainSpec` parameterized for the rococo runtime. // Dummy chain spec, but that is fine when we don't have the native runtime. diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 5c889552a6ae4b7708c0afb7511447fb518f8a9a..34abc76813ffd9fdced196f53cfda86188eb9cf4 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -272,11 +272,11 @@ sp_api::impl_runtime_apis! { fn generate_proof( _: Vec, _: Option, - ) -> Result<(Vec, sp_mmr_primitives::Proof), sp_mmr_primitives::Error> { + ) -> Result<(Vec, sp_mmr_primitives::LeafProof), sp_mmr_primitives::Error> { unimplemented!() } - fn verify_proof(_: Vec, _: sp_mmr_primitives::Proof) + fn verify_proof(_: Vec, _: sp_mmr_primitives::LeafProof) -> Result<(), sp_mmr_primitives::Error> { unimplemented!() @@ -285,7 +285,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( _: Hash, _: Vec, - _: sp_mmr_primitives::Proof + _: sp_mmr_primitives::LeafProof ) -> Result<(), sp_mmr_primitives::Error> { unimplemented!() } @@ -416,8 +416,8 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(_: ::Extrinsic) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(_: (), _: ()) -> Result, xcm_fee_payment_runtime_api::dry_run::Error> { unimplemented!() } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 665533e9bc70a9173a5a9f6ff106657de07f859f..9ee81f80d66a545fbc704068b62cdebf01c06a08 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -88,6 +88,7 @@ use telemetry::TelemetryWorker; #[cfg(feature = "full-node")] use telemetry::{Telemetry, TelemetryWorkerHandle}; +use beefy_primitives::ecdsa_crypto; pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; @@ -100,8 +101,8 @@ pub use sc_executor::NativeExecutionDispatch; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; pub use service::{ config::{DatabaseSource, PrometheusConfig}, - ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis, - TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, + ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, TFullBackend, + TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, }; pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; pub use sp_runtime::{ @@ -394,8 +395,8 @@ type FullSelectChain = relay_chain_selection::SelectRelayChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; #[cfg(feature = "full-node")] -type FullBeefyBlockImport = - beefy::import::BeefyBlockImport; +type FullBeefyBlockImport = + beefy::import::BeefyBlockImport; #[cfg(feature = "full-node")] struct Basics { @@ -486,11 +487,14 @@ fn new_partial( babe::BabeBlockImport< Block, FullClient, - FullBeefyBlockImport>, + FullBeefyBlockImport< + FullGrandpaBlockImport, + ecdsa_crypto::AuthorityId, + >, >, grandpa::LinkHalf, babe::BabeLink, - beefy::BeefyVoterLinks, + beefy::BeefyVoterLinks, ), grandpa::SharedVoterState, sp_consensus_babe::SlotDuration, @@ -601,7 +605,7 @@ where subscription_executor: subscription_executor.clone(), finality_provider: finality_proof_provider.clone(), }, - beefy: polkadot_rpc::BeefyDeps { + beefy: polkadot_rpc::BeefyDeps:: { beefy_finality_proof_stream: beefy_rpc_links.from_voter_justif_stream.clone(), beefy_best_block_stream: beefy_rpc_links.from_voter_best_beefy_stream.clone(), subscription_executor, @@ -750,6 +754,7 @@ pub fn new_full< prepare_workers_hard_max_num, }: NewFullParams, ) -> Result { + use polkadot_availability_recovery::FETCH_CHUNKS_THRESHOLD; use polkadot_node_network_protocol::request_response::IncomingRequest; use sc_network_sync::WarpSyncParams; @@ -914,7 +919,10 @@ pub fn new_full< let (pov_req_receiver, cfg) = IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); net_config.add_request_response_protocol(cfg); - let (chunk_req_receiver, cfg) = + let (chunk_req_v1_receiver, cfg) = + IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); + net_config.add_request_response_protocol(cfg); + let (chunk_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver::<_, Network>(&req_protocol_names); net_config.add_request_response_protocol(cfg); @@ -988,19 +996,26 @@ pub fn new_full< stagnant_check_interval: Default::default(), stagnant_check_mode: chain_selection_subsystem::StagnantCheckMode::PruneOnly, }; + + // Kusama + testnets get a higher threshold, we are conservative on Polkadot for now. + let fetch_chunks_threshold = + if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) }; + Some(ExtendedOverseerGenArgs { keystore, parachains_db, candidate_validation_config, availability_config: AVAILABILITY_CONFIG, pov_req_receiver, - chunk_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, statement_req_receiver, candidate_req_v2_receiver, approval_voting_config, dispute_req_receiver, dispute_coordinator_config, chain_selection_config, + fetch_chunks_threshold, }) }; @@ -1282,7 +1297,9 @@ pub fn new_full< is_authority: role.is_authority(), }; - let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _, ecdsa_crypto::AuthorityId>( + beefy_params, + ); // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it // is noticed. diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index 26b1446bf515afa342bd53c6237a87e7e750022d..6f35718cd18f25ca6ae4f2c302f2035fa77153bc 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -119,8 +119,10 @@ pub struct ExtendedOverseerGenArgs { pub availability_config: AvailabilityConfig, /// POV request receiver. pub pov_req_receiver: IncomingRequestReceiver, - /// Erasure chunks request receiver. - pub chunk_req_receiver: IncomingRequestReceiver, + /// Erasure chunk request v1 receiver. + pub chunk_req_v1_receiver: IncomingRequestReceiver, + /// Erasure chunk request v2 receiver. + pub chunk_req_v2_receiver: IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, /// Receiver for incoming candidate requests. @@ -133,6 +135,10 @@ pub struct ExtendedOverseerGenArgs { pub dispute_coordinator_config: DisputeCoordinatorConfig, /// Configuration for the chain selection subsystem. pub chain_selection_config: ChainSelectionConfig, + /// Optional availability recovery fetch chunks threshold. If PoV size size is lower + /// than the value put in here we always try to recovery availability from backers. + /// The presence of this parameter here is needed to have different values per chain. + pub fetch_chunks_threshold: Option, } /// Obtain a prepared validator `Overseer`, that is initialized with all default values. @@ -159,13 +165,15 @@ pub fn validator_overseer_builder( candidate_validation_config, availability_config, pov_req_receiver, - chunk_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, statement_req_receiver, candidate_req_v2_receiver, approval_voting_config, dispute_req_receiver, dispute_coordinator_config, chain_selection_config, + fetch_chunks_threshold, }: ExtendedOverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -221,7 +229,7 @@ where network_service.clone(), authority_discovery_service.clone(), network_bridge_metrics.clone(), - req_protocol_names, + req_protocol_names.clone(), peerset_protocol_names.clone(), notification_sinks.clone(), )) @@ -236,11 +244,18 @@ where )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + req_protocol_names.clone(), Metrics::register(registry)?, )) - .availability_recovery(AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + .availability_recovery(AvailabilityRecoverySubsystem::for_validator( + fetch_chunks_threshold, available_data_req_receiver, + &req_protocol_names, Metrics::register(registry)?, )) .availability_store(AvailabilityStoreSubsystem::new( @@ -385,7 +400,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - ProspectiveParachainsSubsystem, + DummySubsystem, >, Error, > @@ -406,7 +421,7 @@ where network_service.clone(), authority_discovery_service.clone(), network_bridge_metrics.clone(), - req_protocol_names, + req_protocol_names.clone(), peerset_protocol_names.clone(), notification_sinks.clone(), )) @@ -421,7 +436,9 @@ where )) .availability_distribution(DummySubsystem) .availability_recovery(AvailabilityRecoverySubsystem::for_collator( + None, available_data_req_receiver, + &req_protocol_names, Metrics::register(registry)?, )) .availability_store(DummySubsystem) @@ -462,7 +479,7 @@ where .dispute_coordinator(DummySubsystem) .dispute_distribution(DummySubsystem) .chain_selection(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) + .prospective_parachains(DummySubsystem) .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index e56efbf825429e9f7461fd9453289f5df68b9b6c..ebd9322e9f74a7b7f7d44399ac5ea5904629a97b 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -28,6 +28,7 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-availability-recovery = { path = "../network/availability-recovery", features = ["subsystem-benchmarks"] } polkadot-availability-distribution = { path = "../network/availability-distribution" } +polkadot-statement-distribution = { path = "../network/statement-distribution" } polkadot-node-core-av-store = { path = "../core/av-store" } polkadot-node-core-chain-api = { path = "../core/chain-api" } polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution" } @@ -55,7 +56,7 @@ rand_distr = "0.4.3" bitvec = "1.0.1" kvdb-memorydb = "0.13.0" -parity-scale-codec = { version = "3.6.1", features = ["derive", "std"] } +parity-scale-codec = { version = "3.6.12", features = ["derive", "std"] } tokio = { version = "1.24.2", features = ["parking_lot", "rt-multi-thread"] } clap-num = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } @@ -88,6 +89,7 @@ paste = "1.0.14" orchestra = { version = "0.3.5", default-features = false, features = ["futures_channel"] } pyroscope = "0.5.7" pyroscope_pprofrs = "0.2.7" +strum = { version = "0.24", features = ["derive"] } [features] default = [] diff --git a/polkadot/node/subsystem-bench/examples/availability_read.yaml b/polkadot/node/subsystem-bench/examples/availability_read.yaml index 82355b0e2973aaff490a5c2d3ed54d37c61430de..263a6988242e22c4c6f947dbd472a34167ef7453 100644 --- a/polkadot/node/subsystem-bench/examples/availability_read.yaml +++ b/polkadot/node/subsystem-bench/examples/availability_read.yaml @@ -1,8 +1,8 @@ TestConfiguration: # Test 1 - objective: !DataAvailabilityRead - fetch_from_backers: true - n_validators: 300 + strategy: FullFromBackers + n_validators: 500 n_cores: 20 min_pov_size: 5120 max_pov_size: 5120 @@ -16,7 +16,7 @@ TestConfiguration: # Test 2 - objective: !DataAvailabilityRead - fetch_from_backers: true + strategy: FullFromBackers n_validators: 500 n_cores: 20 min_pov_size: 5120 @@ -31,7 +31,7 @@ TestConfiguration: # Test 3 - objective: !DataAvailabilityRead - fetch_from_backers: true + strategy: FullFromBackers n_validators: 1000 n_cores: 20 min_pov_size: 5120 diff --git a/polkadot/node/subsystem-bench/examples/statement_distribution.yaml b/polkadot/node/subsystem-bench/examples/statement_distribution.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e86669ffefc356430c6ce4aa0ccd2ed5fca77474 --- /dev/null +++ b/polkadot/node/subsystem-bench/examples/statement_distribution.yaml @@ -0,0 +1,5 @@ +TestConfiguration: +- objective: StatementDistribution + num_blocks: 10 + n_cores: 100 + n_validators: 500 diff --git a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs index 10953b6c7839d8ea2a4e0396a4ee37b9875828e9..346a058b9796e2ccfe0aab2b6fbbac6fb1f600d7 100644 --- a/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs +++ b/polkadot/node/subsystem-bench/src/cli/subsystem-bench.rs @@ -20,7 +20,7 @@ use clap::Parser; use color_eyre::eyre; use colored::Colorize; -use polkadot_subsystem_bench::{approval, availability, configuration}; +use polkadot_subsystem_bench::{approval, availability, configuration, statement}; use pyroscope::PyroscopeAgent; use pyroscope_pprofrs::{pprof_backend, PprofConfig}; use serde::{Deserialize, Serialize}; @@ -40,6 +40,8 @@ pub enum TestObjective { DataAvailabilityWrite, /// Benchmark the approval-voting and approval-distribution subsystems. ApprovalVoting(approval::ApprovalsOptions), + // Benchmark the statement-distribution subsystem + StatementDistribution, } impl std::fmt::Display for TestObjective { @@ -51,6 +53,7 @@ impl std::fmt::Display for TestObjective { Self::DataAvailabilityRead(_) => "DataAvailabilityRead", Self::DataAvailabilityWrite => "DataAvailabilityWrite", Self::ApprovalVoting(_) => "ApprovalVoting", + Self::StatementDistribution => "StatementDistribution", } ) } @@ -142,11 +145,8 @@ impl BenchCli { availability::TestDataAvailability::Read(opts), true, ); - env.runtime().block_on(availability::benchmark_availability_read( - &benchmark_name, - &mut env, - &state, - )) + env.runtime() + .block_on(availability::benchmark_availability_read(&mut env, &state)) }, TestObjective::DataAvailabilityWrite => { let state = availability::TestState::new(&test_config); @@ -155,23 +155,22 @@ impl BenchCli { availability::TestDataAvailability::Write, true, ); - env.runtime().block_on(availability::benchmark_availability_write( - &benchmark_name, - &mut env, - &state, - )) + env.runtime() + .block_on(availability::benchmark_availability_write(&mut env, &state)) }, TestObjective::ApprovalVoting(ref options) => { let (mut env, state) = approval::prepare_test(test_config.clone(), options.clone(), true); - env.runtime().block_on(approval::bench_approvals( - &benchmark_name, - &mut env, - state, - )) + env.runtime().block_on(approval::bench_approvals(&mut env, state)) + }, + TestObjective::StatementDistribution => { + let state = statement::TestState::new(&test_config); + let (mut env, _protocol_config) = statement::prepare_test(&state, true); + env.runtime() + .block_on(statement::benchmark_statement_distribution(&mut env, &state)) }, }; - println!("{}", usage); + println!("\n{}\n{}", benchmark_name.purple(), usage); } if let Some(agent_running) = agent_running { diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index 6ac0776d2d35a496be7737987988a68fd854d956..2e5831276ad3f3889e454fc89f378531d15db0f0 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -30,7 +30,7 @@ use crate::{ mock::{ chain_api::{ChainApiState, MockChainApi}, network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::MockRuntimeApi, + runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState}, AlwaysSupportsParachains, TestSyncOracle, }, network::{ @@ -465,8 +465,9 @@ impl ApprovalTestState { } } +#[async_trait::async_trait] impl HandleNetworkMessage for ApprovalTestState { - fn handle( + async fn handle( &self, _message: crate::network::NetworkMessage, _node_sender: &mut futures::channel::mpsc::UnboundedSender, @@ -807,6 +808,7 @@ fn build_overseer( state.candidate_events_by_block(), Some(state.babe_epoch.clone()), 1, + MockRuntimeApiCoreState::Occupied, ); let mock_tx_bridge = MockNetworkBridgeTx::new( network.clone(), @@ -886,7 +888,6 @@ fn prepare_test_inner( } pub async fn bench_approvals( - benchmark_name: &str, env: &mut TestEnvironment, mut state: ApprovalTestState, ) -> BenchmarkUsage { @@ -898,12 +899,11 @@ pub async fn bench_approvals( env.registry().clone(), ) .await; - bench_approvals_run(benchmark_name, env, state, producer_rx).await + bench_approvals_run(env, state, producer_rx).await } /// Runs the approval benchmark. pub async fn bench_approvals_run( - benchmark_name: &str, env: &mut TestEnvironment, state: ApprovalTestState, producer_rx: oneshot::Receiver<()>, @@ -915,7 +915,9 @@ pub async fn bench_approvals_run( // First create the initialization messages that make sure that then node under // tests receives notifications about the topology used and the connected peers. - let mut initialization_messages = env.network().generate_peer_connected(); + let mut initialization_messages = env.network().generate_peer_connected(|e| { + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(e)) + }); initialization_messages.extend(generate_new_session_topology( &state.test_authorities, ValidatorIndex(NODE_UNDER_TEST), @@ -1068,5 +1070,5 @@ pub async fn bench_approvals_run( state.total_unique_messages.load(std::sync::atomic::Ordering::SeqCst) ); - env.collect_resource_usage(benchmark_name, &["approval-distribution", "approval-voting"]) + env.collect_resource_usage(&["approval-distribution", "approval-voting"]) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index 5b93c3d862de683ea35dde9f0a9e313b1a77ad4d..52944ffb08f3eff41915aa37b8661c68edd78dbf 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -17,12 +17,14 @@ use crate::{ availability::av_store_helpers::new_av_store, dummy_builder, - environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + environment::{TestEnvironment, TestEnvironmentDependencies}, mock::{ - av_store::{self, MockAvailabilityStore, NetworkAvailabilityState}, + av_store::{MockAvailabilityStore, NetworkAvailabilityState}, chain_api::{ChainApiState, MockChainApi}, network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx}, - runtime_api::{self, MockRuntimeApi}, + runtime_api::{ + node_features_with_chunk_mapping_enabled, MockRuntimeApi, MockRuntimeApiCoreState, + }, AlwaysSupportsParachains, }, network::new_network, @@ -30,16 +32,17 @@ use crate::{ }; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; + use parity_scale_codec::Encode; use polkadot_availability_bitfield_distribution::BitfieldDistribution; use polkadot_availability_distribution::{ AvailabilityDistributionSubsystem, IncomingRequestReceivers, }; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; +use polkadot_availability_recovery::{AvailabilityRecoverySubsystem, RecoveryStrategyKind}; use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ - request_response::{IncomingRequest, ReqProtocolNames}, + request_response::{v1, v2, IncomingRequest}, OurView, }; use polkadot_node_subsystem::{ @@ -51,12 +54,13 @@ use polkadot_node_subsystem_types::{ Span, }; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{Block, GroupIndex, Hash}; +use polkadot_primitives::{Block, CoreIndex, GroupIndex, Hash}; use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; +use std::{ops::Sub, sync::Arc, time::Instant}; +use strum::Display; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; -use std::{ops::Sub, sync::Arc, time::Instant}; pub use test_state::TestState; mod av_store_helpers; @@ -64,15 +68,26 @@ mod test_state; const LOG_TARGET: &str = "subsystem-bench::availability"; +#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq, Serialize, Deserialize, Display)] +#[value(rename_all = "kebab-case")] +#[strum(serialize_all = "kebab-case")] +pub enum Strategy { + /// Regular random chunk recovery. This is also the fallback for the next strategies. + Chunks, + /// Recovery from systematic chunks. Much faster than regular chunk recovery becasue it avoid + /// doing the reed-solomon reconstruction. + Systematic, + /// Fetch the full availability datafrom backers first. Saves CPU as we don't need to + /// re-construct from chunks. Typically this is only faster if nodes have enough bandwidth. + FullFromBackers, +} + #[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)] #[clap(rename_all = "kebab-case")] #[allow(missing_docs)] pub struct DataAvailabilityReadOptions { - #[clap(short, long, default_value_t = false)] - /// Turbo boost AD Read by fetching the full availability datafrom backers first. Saves CPU as - /// we don't need to re-construct from chunks. Typically this is only faster if nodes have - /// enough bandwidth. - pub fetch_from_backers: bool, + #[clap(short, long, default_value_t = Strategy::Systematic)] + pub strategy: Strategy, } pub enum TestDataAvailability { @@ -84,7 +99,7 @@ fn build_overseer_for_availability_read( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, av_store: MockAvailabilityStore, - network_bridge: (MockNetworkBridgeTx, MockNetworkBridgeRx), + (network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx), availability_recovery: AvailabilityRecoverySubsystem, dependencies: &TestEnvironmentDependencies, ) -> (Overseer, AlwaysSupportsParachains>, OverseerHandle) { @@ -95,8 +110,8 @@ fn build_overseer_for_availability_read( let builder = dummy .replace_runtime_api(|_| runtime_api) .replace_availability_store(|_| av_store) - .replace_network_bridge_tx(|_| network_bridge.0) - .replace_network_bridge_rx(|_| network_bridge.1) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx) .replace_availability_recovery(|_| availability_recovery); let (overseer, raw_handle) = @@ -109,7 +124,7 @@ fn build_overseer_for_availability_read( fn build_overseer_for_availability_write( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, - network_bridge: (MockNetworkBridgeTx, MockNetworkBridgeRx), + (network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx), availability_distribution: AvailabilityDistributionSubsystem, chain_api: MockChainApi, availability_store: AvailabilityStoreSubsystem, @@ -123,8 +138,8 @@ fn build_overseer_for_availability_write( let builder = dummy .replace_runtime_api(|_| runtime_api) .replace_availability_store(|_| availability_store) - .replace_network_bridge_tx(|_| network_bridge.0) - .replace_network_bridge_rx(|_| network_bridge.1) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx) .replace_chain_api(|_| chain_api) .replace_bitfield_distribution(|_| bitfield_distribution) // This is needed to test own chunk recovery for `n_cores`. @@ -142,10 +157,14 @@ pub fn prepare_test( with_prometheus_endpoint: bool, ) -> (TestEnvironment, Vec) { let dependencies = TestEnvironmentDependencies::default(); + let availability_state = NetworkAvailabilityState { candidate_hashes: state.candidate_hashes.clone(), + candidate_hash_to_core_index: state.candidate_hash_to_core_index.clone(), available_data: state.available_data.clone(), chunks: state.chunks.clone(), + chunk_indices: state.chunk_indices.clone(), + req_protocol_names: state.req_protocol_names.clone(), }; let mut req_cfgs = Vec::new(); @@ -153,20 +172,31 @@ pub fn prepare_test( let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + >(&state.req_protocol_names); req_cfgs.push(collation_req_cfg); let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::< Block, sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); - - let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver::< - Block, - sc_network::NetworkWorker, - >(&ReqProtocolNames::new(GENESIS_HASH, None)); + >(&state.req_protocol_names); req_cfgs.push(pov_req_cfg); + let (chunk_req_v1_receiver, chunk_req_v1_cfg) = + IncomingRequest::::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&state.req_protocol_names); + + // We won't use v1 chunk fetching requests, but we need to keep the inbound queue alive. + // Otherwise, av-distribution subsystem will terminate. + std::mem::forget(chunk_req_v1_cfg); + + let (chunk_req_v2_receiver, chunk_req_v2_cfg) = + IncomingRequest::::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&state.req_protocol_names); + let (network, network_interface, network_receiver) = new_network( &state.config, &dependencies, @@ -180,37 +210,48 @@ pub fn prepare_test( state.test_authorities.clone(), ); let network_bridge_rx = - network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_cfg)); + network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg)); - let runtime_api = runtime_api::MockRuntimeApi::new( + let runtime_api = MockRuntimeApi::new( state.config.clone(), state.test_authorities.clone(), state.candidate_receipts.clone(), Default::default(), Default::default(), 0, + MockRuntimeApiCoreState::Occupied, ); let (overseer, overseer_handle) = match &mode { TestDataAvailability::Read(options) => { - let use_fast_path = options.fetch_from_backers; - - let subsystem = if use_fast_path { - AvailabilityRecoverySubsystem::with_fast_path( + let subsystem = match options.strategy { + Strategy::FullFromBackers => + AvailabilityRecoverySubsystem::with_recovery_strategy_kind( + collation_req_receiver, + &state.req_protocol_names, + Metrics::try_register(&dependencies.registry).unwrap(), + RecoveryStrategyKind::BackersFirstAlways, + ), + Strategy::Chunks => AvailabilityRecoverySubsystem::with_recovery_strategy_kind( collation_req_receiver, + &state.req_protocol_names, Metrics::try_register(&dependencies.registry).unwrap(), - ) - } else { - AvailabilityRecoverySubsystem::with_chunks_only( + RecoveryStrategyKind::ChunksAlways, + ), + Strategy::Systematic => AvailabilityRecoverySubsystem::with_recovery_strategy_kind( collation_req_receiver, + &state.req_protocol_names, Metrics::try_register(&dependencies.registry).unwrap(), - ) + RecoveryStrategyKind::SystematicChunks, + ), }; // Use a mocked av-store. - let av_store = av_store::MockAvailabilityStore::new( + let av_store = MockAvailabilityStore::new( state.chunks.clone(), + state.chunk_indices.clone(), state.candidate_hashes.clone(), + state.candidate_hash_to_core_index.clone(), ); build_overseer_for_availability_read( @@ -225,7 +266,12 @@ pub fn prepare_test( TestDataAvailability::Write => { let availability_distribution = AvailabilityDistributionSubsystem::new( state.test_authorities.keyring.keystore(), - IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + IncomingRequestReceivers { + pov_req_receiver, + chunk_req_v1_receiver, + chunk_req_v2_receiver, + }, + state.req_protocol_names.clone(), Metrics::try_register(&dependencies.registry).unwrap(), ); @@ -261,7 +307,6 @@ pub fn prepare_test( } pub async fn benchmark_availability_read( - benchmark_name: &str, env: &mut TestEnvironment, state: &TestState, ) -> BenchmarkUsage { @@ -295,6 +340,7 @@ pub async fn benchmark_availability_read( Some(GroupIndex( candidate_num as u32 % (std::cmp::max(5, config.n_cores) / 5) as u32, )), + Some(*state.candidate_hash_to_core_index.get(&candidate.hash()).unwrap()), tx, ), ); @@ -326,11 +372,10 @@ pub async fn benchmark_availability_read( ); env.stop().await; - env.collect_resource_usage(benchmark_name, &["availability-recovery"]) + env.collect_resource_usage(&["availability-recovery"]) } pub async fn benchmark_availability_write( - benchmark_name: &str, env: &mut TestEnvironment, state: &TestState, ) -> BenchmarkUsage { @@ -340,7 +385,7 @@ pub async fn benchmark_availability_write( env.metrics().set_n_cores(config.n_cores); gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ..."); - for backed_candidate in state.backed_candidates.clone() { + for (core_index, backed_candidate) in state.backed_candidates.clone().into_iter().enumerate() { let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap(); let available_data = state.available_data[candidate_index].clone(); let (tx, rx) = oneshot::channel(); @@ -351,6 +396,8 @@ pub async fn benchmark_availability_write( available_data, expected_erasure_root: backed_candidate.descriptor().erasure_root, tx, + core_index: CoreIndex(core_index as u32), + node_features: node_features_with_chunk_mapping_enabled(), }, )) .await; @@ -459,8 +506,9 @@ pub async fn benchmark_availability_write( ); env.stop().await; - env.collect_resource_usage( - benchmark_name, - &["availability-distribution", "bitfield-distribution", "availability-store"], - ) + env.collect_resource_usage(&[ + "availability-distribution", + "bitfield-distribution", + "availability-store", + ]) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs index c328ffedf916e1ae9ce7fcd05e25750e01dc7506..5d443734bb387fcdbdce68e03839b314a10106d0 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/test_state.rs @@ -14,22 +14,28 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::configuration::{TestAuthorities, TestConfiguration}; +use crate::{ + configuration::{TestAuthorities, TestConfiguration}, + environment::GENESIS_HASH, + mock::runtime_api::node_features_with_chunk_mapping_enabled, +}; use bitvec::bitvec; use colored::Colorize; use itertools::Itertools; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ - request_response::v1::ChunkFetchingRequest, Versioned, VersionedValidationProtocol, + request_response::{v2::ChunkFetchingRequest, ReqProtocolNames}, + Versioned, VersionedValidationProtocol, }; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV}; use polkadot_node_subsystem_test_helpers::{ derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, }; +use polkadot_node_subsystem_util::availability_chunks::availability_chunk_indices; use polkadot_overseer::BlockInfo; use polkadot_primitives::{ - AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, Hash, HeadData, Header, - PersistedValidationData, Signed, SigningContext, ValidatorIndex, + AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceipt, ChunkIndex, CoreIndex, + Hash, HeadData, Header, PersistedValidationData, Signed, SigningContext, ValidatorIndex, }; use polkadot_primitives_test_helpers::{dummy_candidate_receipt, dummy_hash}; use sp_core::H256; @@ -49,14 +55,20 @@ pub struct TestState { pub pov_size_to_candidate: HashMap, // Map from generated candidate hashes to candidate index in `available_data` and `chunks`. pub candidate_hashes: HashMap, + // Map from candidate hash to occupied core index. + pub candidate_hash_to_core_index: HashMap, // Per candidate index receipts. pub candidate_receipt_templates: Vec, // Per candidate index `AvailableData` pub available_data: Vec, - // Per candiadte index chunks + // Per candidate index chunks pub chunks: Vec>, + // Per-core ValidatorIndex -> ChunkIndex mapping + pub chunk_indices: Vec>, // Per relay chain block - candidate backed by our backing group pub backed_candidates: Vec, + // Request protcol names + pub req_protocol_names: ReqProtocolNames, // Relay chain block infos pub block_infos: Vec, // Chung fetching requests for backed candidates @@ -89,6 +101,9 @@ impl TestState { candidate_receipts: Default::default(), block_headers: Default::default(), test_authorities: config.generate_authorities(), + req_protocol_names: ReqProtocolNames::new(GENESIS_HASH, None), + chunk_indices: Default::default(), + candidate_hash_to_core_index: Default::default(), }; // we use it for all candidates. @@ -99,6 +114,17 @@ impl TestState { relay_parent_storage_root: Default::default(), }; + test_state.chunk_indices = (0..config.n_cores) + .map(|core_index| { + availability_chunk_indices( + Some(&node_features_with_chunk_mapping_enabled()), + config.n_validators, + CoreIndex(core_index as u32), + ) + .unwrap() + }) + .collect(); + // For each unique pov we create a candidate receipt. for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() { gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue()); @@ -167,6 +193,11 @@ impl TestState { // Store the new candidate in the state test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index); + let core_index = (index % config.n_cores) as u32; + test_state + .candidate_hash_to_core_index + .insert(candidate_receipt.hash(), core_index.into()); + gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate"); candidate_receipt diff --git a/polkadot/node/subsystem-bench/src/lib/configuration.rs b/polkadot/node/subsystem-bench/src/lib/configuration.rs index 1e0efb72a7dff163898bcf6974ce9bc048bfa086..f614a5e552a803f0a6e2d2475efc5a824b64e9a2 100644 --- a/polkadot/node/subsystem-bench/src/lib/configuration.rs +++ b/polkadot/node/subsystem-bench/src/lib/configuration.rs @@ -18,12 +18,13 @@ use crate::keyring::Keyring; use itertools::Itertools; -use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId}; +use polkadot_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId, ValidatorPair}; use rand::thread_rng; use rand_distr::{Distribution, Normal, Uniform}; use sc_network_types::PeerId; use serde::{Deserialize, Serialize}; use sp_consensus_babe::AuthorityId; +use sp_core::Pair; use std::collections::HashMap; /// Peer networking latency configuration. @@ -89,6 +90,15 @@ fn default_n_delay_tranches() -> usize { fn default_no_show_slots() -> usize { 3 } +fn default_minimum_backing_votes() -> u32 { + 2 +} +fn default_max_candidate_depth() -> u32 { + 3 +} +fn default_allowed_ancestry_len() -> u32 { + 2 +} /// The test input parameters #[derive(Clone, Debug, Serialize, Deserialize)] @@ -137,6 +147,15 @@ pub struct TestConfiguration { pub connectivity: usize, /// Number of blocks to run the test for pub num_blocks: usize, + /// Number of minimum backing votes + #[serde(default = "default_minimum_backing_votes")] + pub minimum_backing_votes: u32, + /// Async Backing max_candidate_depth + #[serde(default = "default_max_candidate_depth")] + pub max_candidate_depth: u32, + /// Async Backing allowed_ancestry_len + #[serde(default = "default_allowed_ancestry_len")] + pub allowed_ancestry_len: u32, } impl Default for TestConfiguration { @@ -158,6 +177,9 @@ impl Default for TestConfiguration { latency: default_peer_latency(), connectivity: default_connectivity(), num_blocks: Default::default(), + minimum_backing_votes: default_minimum_backing_votes(), + max_candidate_depth: default_max_candidate_depth(), + allowed_ancestry_len: default_allowed_ancestry_len(), } } } @@ -208,6 +230,11 @@ impl TestConfiguration { .map(|(peer_id, authority_id)| (*peer_id, authority_id.clone())) .collect(); + let validator_pairs = key_seeds + .iter() + .map(|seed| ValidatorPair::from_string_with_seed(seed, None).unwrap().0) + .collect(); + TestAuthorities { keyring, validator_public, @@ -217,6 +244,7 @@ impl TestConfiguration { validator_assignment_id, key_seeds, peer_id_to_authority, + validator_pairs, } } } @@ -246,6 +274,7 @@ pub struct TestAuthorities { pub key_seeds: Vec, pub peer_ids: Vec, pub peer_id_to_authority: HashMap, + pub validator_pairs: Vec, } /// Sample latency (in milliseconds) from a normal distribution with parameters diff --git a/polkadot/node/subsystem-bench/src/lib/environment.rs b/polkadot/node/subsystem-bench/src/lib/environment.rs index 42955d0302232f35e387a044e34a0c7d665512e8..a63f90da50b3aabcacca38874d723e7166173842 100644 --- a/polkadot/node/subsystem-bench/src/lib/environment.rs +++ b/polkadot/node/subsystem-bench/src/lib/environment.rs @@ -351,13 +351,8 @@ impl TestEnvironment { } } - pub fn collect_resource_usage( - &self, - benchmark_name: &str, - subsystems_under_test: &[&str], - ) -> BenchmarkUsage { + pub fn collect_resource_usage(&self, subsystems_under_test: &[&str]) -> BenchmarkUsage { BenchmarkUsage { - benchmark_name: benchmark_name.to_string(), network_usage: self.network_usage(), cpu_usage: self.cpu_usage(subsystems_under_test), } diff --git a/polkadot/node/subsystem-bench/src/lib/lib.rs b/polkadot/node/subsystem-bench/src/lib/lib.rs index ef2724abc98920c79d8dd9d94f97bed32b0ab8e2..e18227af8be349debc38043529bd1649c67e1c56 100644 --- a/polkadot/node/subsystem-bench/src/lib/lib.rs +++ b/polkadot/node/subsystem-bench/src/lib/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// The validator index that represent the node that is under test. +// The validator index that represents the node that is under test. pub const NODE_UNDER_TEST: u32 = 0; pub mod approval; @@ -25,5 +25,6 @@ pub(crate) mod environment; pub(crate) mod keyring; pub(crate) mod mock; pub(crate) mod network; +pub mod statement; pub mod usage; pub mod utils; diff --git a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs index fba33523be85dbfd241bc1fcf42d8f8069ebf95c..14ec4ccb4c32a1d0ffd3422bc61c5a87ac962dab 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/av_store.rs @@ -20,7 +20,7 @@ use crate::network::{HandleNetworkMessage, NetworkMessage}; use futures::{channel::oneshot, FutureExt}; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ - v1::{AvailableDataFetchingResponse, ChunkFetchingResponse, ChunkResponse}, + v1::AvailableDataFetchingResponse, v2::ChunkFetchingResponse, Protocol, ReqProtocolNames, Requests, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; @@ -28,13 +28,14 @@ use polkadot_node_subsystem::{ messages::AvailabilityStoreMessage, overseer, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_types::OverseerSignal; -use polkadot_primitives::CandidateHash; -use sc_network::ProtocolName; +use polkadot_primitives::{CandidateHash, ChunkIndex, CoreIndex, ValidatorIndex}; use std::collections::HashMap; pub struct AvailabilityStoreState { candidate_hashes: HashMap, chunks: Vec>, + chunk_indices: Vec>, + candidate_hash_to_core_index: HashMap, } const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; @@ -43,21 +44,25 @@ const LOG_TARGET: &str = "subsystem-bench::av-store-mock"; /// used in a test. #[derive(Clone)] pub struct NetworkAvailabilityState { + pub req_protocol_names: ReqProtocolNames, pub candidate_hashes: HashMap, pub available_data: Vec, pub chunks: Vec>, + pub chunk_indices: Vec>, + pub candidate_hash_to_core_index: HashMap, } // Implement access to the state. +#[async_trait::async_trait] impl HandleNetworkMessage for NetworkAvailabilityState { - fn handle( + async fn handle( &self, message: NetworkMessage, _node_sender: &mut futures::channel::mpsc::UnboundedSender, ) -> Option { match message { NetworkMessage::RequestFromNode(peer, request) => match request { - Requests::ChunkFetchingV1(outgoing_request) => { + Requests::ChunkFetching(outgoing_request) => { gum::debug!(target: LOG_TARGET, request = ?outgoing_request, "Received `RequestFromNode`"); let validator_index: usize = outgoing_request.payload.index.0 as usize; let candidate_hash = outgoing_request.payload.candidate_hash; @@ -68,11 +73,22 @@ impl HandleNetworkMessage for NetworkAvailabilityState { .expect("candidate was generated previously; qed"); gum::warn!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let chunk: ChunkResponse = - self.chunks.get(*candidate_index).unwrap()[validator_index].clone().into(); + let candidate_chunks = self.chunks.get(*candidate_index).unwrap(); + let chunk_indices = self + .chunk_indices + .get( + self.candidate_hash_to_core_index.get(&candidate_hash).unwrap().0 + as usize, + ) + .unwrap(); + + let chunk = candidate_chunks + .get(chunk_indices.get(validator_index).unwrap().0 as usize) + .unwrap(); + let response = Ok(( - ChunkFetchingResponse::from(Some(chunk)).encode(), - ProtocolName::Static("dummy"), + ChunkFetchingResponse::from(Some(chunk.clone())).encode(), + self.req_protocol_names.get_name(Protocol::ChunkFetchingV2), )); if let Err(err) = outgoing_request.pending_response.send(response) { @@ -93,7 +109,7 @@ impl HandleNetworkMessage for NetworkAvailabilityState { let response = Ok(( AvailableDataFetchingResponse::from(Some(available_data)).encode(), - ProtocolName::Static("dummy"), + self.req_protocol_names.get_name(Protocol::AvailableDataFetchingV1), )); outgoing_request .pending_response @@ -118,16 +134,25 @@ pub struct MockAvailabilityStore { impl MockAvailabilityStore { pub fn new( chunks: Vec>, + chunk_indices: Vec>, candidate_hashes: HashMap, + candidate_hash_to_core_index: HashMap, ) -> MockAvailabilityStore { - Self { state: AvailabilityStoreState { chunks, candidate_hashes } } + Self { + state: AvailabilityStoreState { + chunks, + candidate_hashes, + chunk_indices, + candidate_hash_to_core_index, + }, + } } async fn respond_to_query_all_request( &self, candidate_hash: CandidateHash, - send_chunk: impl Fn(usize) -> bool, - tx: oneshot::Sender>, + send_chunk: impl Fn(ValidatorIndex) -> bool, + tx: oneshot::Sender>, ) { let candidate_index = self .state @@ -136,15 +161,27 @@ impl MockAvailabilityStore { .expect("candidate was generated previously; qed"); gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let v = self - .state - .chunks - .get(*candidate_index) - .unwrap() - .iter() - .filter(|c| send_chunk(c.index.0 as usize)) - .cloned() - .collect(); + let n_validators = self.state.chunks[0].len(); + let candidate_chunks = self.state.chunks.get(*candidate_index).unwrap(); + let core_index = self.state.candidate_hash_to_core_index.get(&candidate_hash).unwrap(); + // We'll likely only send our chunk, so use capacity 1. + let mut v = Vec::with_capacity(1); + + for validator_index in 0..n_validators { + if !send_chunk(ValidatorIndex(validator_index as u32)) { + continue; + } + let chunk_index = self + .state + .chunk_indices + .get(core_index.0 as usize) + .unwrap() + .get(validator_index) + .unwrap(); + + let chunk = candidate_chunks.get(chunk_index.0 as usize).unwrap().clone(); + v.push((ValidatorIndex(validator_index as u32), chunk.clone())); + } let _ = tx.send(v); } @@ -181,8 +218,12 @@ impl MockAvailabilityStore { AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx) => { // We always have our own chunk. gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAllChunks"); - self.respond_to_query_all_request(candidate_hash, |index| index == 0, tx) - .await; + self.respond_to_query_all_request( + candidate_hash, + |index| index == 0.into(), + tx, + ) + .await; }, AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx) => { gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryChunkSize"); @@ -194,12 +235,29 @@ impl MockAvailabilityStore { .expect("candidate was generated previously; qed"); gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index"); - let chunk_size = - self.state.chunks.get(*candidate_index).unwrap()[0].encoded_size(); + let chunk_size = self + .state + .chunks + .get(*candidate_index) + .unwrap() + .first() + .unwrap() + .encoded_size(); let _ = tx.send(Some(chunk_size)); }, - AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk, tx } => { - gum::debug!(target: LOG_TARGET, chunk_index = ?chunk.index ,candidate_hash = ?candidate_hash, "Responding to StoreChunk"); + AvailabilityStoreMessage::StoreChunk { + candidate_hash, + chunk, + tx, + validator_index, + } => { + gum::debug!( + target: LOG_TARGET, + chunk_index = ?chunk.index, + validator_index = ?validator_index, + candidate_hash = ?candidate_hash, + "Responding to StoreChunk" + ); let _ = tx.send(Ok(())); }, _ => { diff --git a/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs b/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs new file mode 100644 index 0000000000000000000000000000000000000000..51494016e185e5e04f00c264eb341bf25d8991dd --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/mock/candidate_backing.rs @@ -0,0 +1,171 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A generic candidate backing subsystem mockup suitable to be used in benchmarks. + +use crate::{configuration::TestConfiguration, NODE_UNDER_TEST}; +use futures::FutureExt; +use polkadot_node_primitives::{SignedFullStatementWithPVD, Statement, StatementWithPVD}; +use polkadot_node_subsystem::{ + messages::CandidateBackingMessage, overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::OverseerSignal; +use polkadot_primitives::{ + CandidateHash, Hash, PersistedValidationData, SigningContext, ValidatorIndex, ValidatorPair, +}; +use sp_core::Pair; +use std::collections::HashMap; + +const LOG_TARGET: &str = "subsystem-bench::candidate-backing-mock"; + +struct MockCandidateBackingState { + pair: ValidatorPair, + pvd: PersistedValidationData, + own_backing_group: Vec, +} + +pub struct MockCandidateBacking { + config: TestConfiguration, + state: MockCandidateBackingState, +} + +impl MockCandidateBacking { + pub fn new( + config: TestConfiguration, + pair: ValidatorPair, + pvd: PersistedValidationData, + own_backing_group: Vec, + ) -> Self { + Self { config, state: MockCandidateBackingState { pair, pvd, own_backing_group } } + } + + fn handle_statement( + &self, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, + statements_tracker: &mut HashMap, + ) -> Vec { + let mut messages = vec![]; + let validator_id = statement.validator_index(); + let is_own_backing_group = self.state.own_backing_group.contains(&validator_id); + + match statement.payload() { + StatementWithPVD::Seconded(receipt, _pvd) => { + let candidate_hash = receipt.hash(); + statements_tracker + .entry(candidate_hash) + .and_modify(|v| { + *v += 1; + }) + .or_insert(1); + + let statements_received_count = *statements_tracker.get(&candidate_hash).unwrap(); + if statements_received_count == (self.config.minimum_backing_votes - 1) && + is_own_backing_group + { + let statement = Statement::Valid(candidate_hash); + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.to_compact().signing_payload(&context); + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Share( + relay_parent, + SignedFullStatementWithPVD::new( + statement.supply_pvd(self.state.pvd.clone()), + ValidatorIndex(NODE_UNDER_TEST), + self.state.pair.sign(&payload[..]), + &context, + &self.state.pair.public(), + ) + .unwrap(), + ); + messages.push(message); + } + + if statements_received_count == self.config.minimum_backing_votes { + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Backed( + candidate_hash, + ); + messages.push(message); + } + }, + StatementWithPVD::Valid(candidate_hash) => { + statements_tracker + .entry(*candidate_hash) + .and_modify(|v| { + *v += 1; + }) + .or_insert(1); + + let statements_received_count = *statements_tracker.get(candidate_hash).unwrap(); + if statements_received_count == self.config.minimum_backing_votes { + let message = + polkadot_node_subsystem::messages::StatementDistributionMessage::Backed( + *candidate_hash, + ); + messages.push(message); + } + }, + } + + messages + } +} + +#[overseer::subsystem(CandidateBacking, error=SubsystemError, prefix=self::overseer)] +impl MockCandidateBacking { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +impl MockCandidateBacking { + async fn run(self, mut ctx: Context) { + let mut statements_tracker: HashMap = Default::default(); + + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => { + gum::trace!(target: LOG_TARGET, msg=?msg, "recv message"); + + match msg { + CandidateBackingMessage::Statement(relay_parent, statement) => { + let messages = self.handle_statement( + relay_parent, + statement, + &mut statements_tracker, + ); + for message in messages { + ctx.send_message(message).await; + } + }, + _ => { + unimplemented!("Unexpected candidate-backing message") + }, + } + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs index 6dda9a47d398f3e6e952cd054bce9769e8942e70..12766374bfa9f49ad67699173cdda512623c4b8b 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -19,9 +19,11 @@ use polkadot_node_subsystem_types::Hash; use sp_consensus::SyncOracle; pub mod av_store; +pub mod candidate_backing; pub mod chain_api; pub mod dummy; pub mod network_bridge; +pub mod prospective_parachains; pub mod runtime_api; pub struct AlwaysSupportsParachains {} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs index ec66ad4e279c217f7510d4c09def128f41fbffe5..d70953926d130eabc7be096cab71ff59d6e91bc8 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/network_bridge.rs @@ -27,14 +27,19 @@ use polkadot_node_subsystem::{ messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_types::{ - messages::{ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent}, + messages::{ + ApprovalDistributionMessage, BitfieldDistributionMessage, NetworkBridgeEvent, + StatementDistributionMessage, + }, OverseerSignal, }; use sc_network::{request_responses::ProtocolConfig, RequestFailure}; const LOG_TARGET: &str = "subsystem-bench::network-bridge"; -const CHUNK_REQ_PROTOCOL_NAME_V1: &str = - "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/1"; +const ALLOWED_PROTOCOLS: &[&str] = &[ + "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/2", + "/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_attested_candidate/2", +]; /// A mock of the network bridge tx subsystem. pub struct MockNetworkBridgeTx { @@ -106,8 +111,15 @@ impl MockNetworkBridgeTx { NetworkBridgeTxMessage::SendRequests(requests, _if_disconnected) => { for request in requests { gum::debug!(target: LOG_TARGET, request = ?request, "Processing request"); - let peer_id = - request.authority_id().expect("all nodes are authorities").clone(); + let peer_id = match request.authority_id() { + Some(v) => v.clone(), + None => self + .test_authorities + .peer_id_to_authority + .get(request.peer_id().expect("Should exist")) + .expect("Should exist") + .clone(), + }; if !self.network.is_peer_connected(&peer_id) { // Attempting to send a request to a disconnected peer. @@ -141,7 +153,23 @@ impl MockNetworkBridgeTx { .expect("Should not fail"); } }, - _ => unimplemented!("Unexpected network bridge message"), + NetworkBridgeTxMessage::SendValidationMessages(messages) => { + for (peers, message) in messages { + for peer in peers { + self.to_network_interface + .unbounded_send(NetworkMessage::MessageFromNode( + self.test_authorities + .peer_id_to_authority + .get(&peer) + .unwrap() + .clone(), + message.clone(), + )) + .expect("Should not fail"); + } + } + }, + message => unimplemented!("Unexpected network bridge message {:?}", message), }, } } @@ -175,13 +203,20 @@ impl MockNetworkBridgeRx { ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) ).await; } + Versioned::V3( + polkadot_node_network_protocol::v3::ValidationProtocol::StatementDistribution(msg) + ) => { + ctx.send_message( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, polkadot_node_network_protocol::Versioned::V3(msg))) + ).await; + } _ => { unimplemented!("We only talk v2 network protocol") }, }, NetworkMessage::RequestFromPeer(request) => { if let Some(protocol) = self.chunk_request_sender.as_mut() { - assert_eq!(&*protocol.name, CHUNK_REQ_PROTOCOL_NAME_V1); + assert!(ALLOWED_PROTOCOLS.contains(&&*protocol.name)); if let Some(inbound_queue) = protocol.inbound_queue.as_ref() { inbound_queue .send(request) diff --git a/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs b/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs new file mode 100644 index 0000000000000000000000000000000000000000..8a865af21a073aefe55b85b37e7ccd091e8a2f0f --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/mock/prospective_parachains.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A generic prospective parachains subsystem mockup suitable to be used in benchmarks. + +use futures::FutureExt; +use polkadot_node_subsystem::{ + messages::ProspectiveParachainsMessage, overseer, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_types::OverseerSignal; +use polkadot_primitives::Hash; + +pub struct MockProspectiveParachains {} + +impl MockProspectiveParachains { + pub fn new() -> Self { + Self {} + } +} + +#[overseer::subsystem(ProspectiveParachains, error=SubsystemError, prefix=self::overseer)] +impl MockProspectiveParachains { + fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self.run(ctx).map(|_| Ok(())).boxed(); + + SpawnedSubsystem { name: "test-environment", future } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +impl MockProspectiveParachains { + async fn run(self, mut ctx: Context) { + loop { + let msg = ctx.recv().await.expect("Overseer never fails us"); + match msg { + orchestra::FromOrchestra::Signal(signal) => + if signal == OverseerSignal::Conclude { + return + }, + orchestra::FromOrchestra::Communication { msg } => match msg { + ProspectiveParachainsMessage::GetMinimumRelayParents(_relay_parent, tx) => { + tx.send(vec![]).unwrap(); + }, + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx) => { + tx.send( + req.candidates + .iter() + .cloned() + .map(|candidate| (candidate, vec![Hash::repeat_byte(0)])) + .collect(), + ) + .unwrap(); + }, + _ => { + unimplemented!("Unexpected chain-api message") + }, + }, + } + } + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs index b73d61321cd3b274915dda37233c1945240c2b11..be9dbd55cb6f9898a879e7bbaa6c5f5b9c303484 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/runtime_api.rs @@ -26,8 +26,9 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::OverseerSignal; use polkadot_primitives::{ - CandidateEvent, CandidateReceipt, CoreState, GroupIndex, IndexedVec, NodeFeatures, - OccupiedCore, SessionIndex, SessionInfo, ValidatorIndex, + node_features, AsyncBackingParams, CandidateEvent, CandidateReceipt, CoreState, GroupIndex, + GroupRotationInfo, IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, + SessionInfo, ValidatorIndex, }; use sp_consensus_babe::Epoch as BabeEpoch; use sp_core::H256; @@ -40,6 +41,8 @@ const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock"; pub struct RuntimeApiState { // All authorities in the test, authorities: TestAuthorities, + // Node features state in the runtime + node_features: NodeFeatures, // Candidate hashes per block candidate_hashes: HashMap>, // Included candidates per bock @@ -49,11 +52,20 @@ pub struct RuntimeApiState { session_index: SessionIndex, } +#[derive(Clone)] +pub enum MockRuntimeApiCoreState { + Occupied, + Scheduled, + #[allow(dead_code)] + Free, +} + /// A mocked `runtime-api` subsystem. #[derive(Clone)] pub struct MockRuntimeApi { state: RuntimeApiState, config: TestConfiguration, + core_state: MockRuntimeApiCoreState, } impl MockRuntimeApi { @@ -64,7 +76,11 @@ impl MockRuntimeApi { included_candidates: HashMap>, babe_epoch: Option, session_index: SessionIndex, + core_state: MockRuntimeApiCoreState, ) -> MockRuntimeApi { + // Enable chunk mapping feature to make systematic av-recovery possible. + let node_features = node_features_with_chunk_mapping_enabled(); + Self { state: RuntimeApiState { authorities, @@ -72,8 +88,10 @@ impl MockRuntimeApi { included_candidates, babe_epoch, session_index, + node_features, }, config, + core_state, } } @@ -156,15 +174,15 @@ impl MockRuntimeApi { }, RuntimeApiMessage::Request( _block_hash, - RuntimeApiRequest::SessionExecutorParams(_session_index, sender), + RuntimeApiRequest::NodeFeatures(_session_index, sender), ) => { - let _ = sender.send(Ok(Some(Default::default()))); + let _ = sender.send(Ok(self.state.node_features.clone())); }, RuntimeApiMessage::Request( - _request, - RuntimeApiRequest::NodeFeatures(_session_index, sender), + _block_hash, + RuntimeApiRequest::SessionExecutorParams(_session_index, sender), ) => { - let _ = sender.send(Ok(NodeFeatures::EMPTY)); + let _ = sender.send(Ok(Some(Default::default()))); }, RuntimeApiMessage::Request( _block_hash, @@ -198,16 +216,26 @@ impl MockRuntimeApi { // Ensure test breaks if badly configured. assert!(index < validator_group_count); - CoreState::Occupied(OccupiedCore { - next_up_on_available: None, - occupied_since: 0, - time_out_at: 0, - next_up_on_time_out: None, - availability: BitVec::default(), - group_responsible: GroupIndex(index as u32), - candidate_hash: candidate_receipt.hash(), - candidate_descriptor: candidate_receipt.descriptor.clone(), - }) + use MockRuntimeApiCoreState::*; + match self.core_state { + Occupied => CoreState::Occupied(OccupiedCore { + next_up_on_available: None, + occupied_since: 0, + time_out_at: 0, + next_up_on_time_out: None, + availability: BitVec::default(), + group_responsible: GroupIndex(index as u32), + candidate_hash: candidate_receipt.hash(), + candidate_descriptor: candidate_receipt + .descriptor + .clone(), + }), + Scheduled => CoreState::Scheduled(ScheduledCore { + para_id: (index + 1).into(), + collator: None, + }), + Free => todo!(), + } }) .collect::>(); @@ -223,6 +251,43 @@ impl MockRuntimeApi { .clone() .expect("Babe epoch unpopulated"))); }, + RuntimeApiMessage::Request( + _block_hash, + RuntimeApiRequest::AsyncBackingParams(sender), + ) => { + let _ = sender.send(Ok(AsyncBackingParams { + max_candidate_depth: self.config.max_candidate_depth, + allowed_ancestry_len: self.config.allowed_ancestry_len, + })); + }, + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::Version(tx)) => { + tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)) + .unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::DisabledValidators(tx), + ) => { + tx.send(Ok(vec![])).unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::MinimumBackingVotes(_session_index, tx), + ) => { + tx.send(Ok(self.config.minimum_backing_votes)).unwrap(); + }, + RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + ) => { + let groups = self.session_info().validator_groups.to_vec(); + let group_rotation_info = GroupRotationInfo { + session_start_block: 1, + group_rotation_frequency: 12, + now: 1, + }; + tx.send(Ok((groups, group_rotation_info))).unwrap(); + }, // Long term TODO: implement more as needed. message => { unimplemented!("Unexpected runtime-api message: {:?}", message) @@ -233,3 +298,10 @@ impl MockRuntimeApi { } } } + +pub fn node_features_with_chunk_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features +} diff --git a/polkadot/node/subsystem-bench/src/lib/network.rs b/polkadot/node/subsystem-bench/src/lib/network.rs index 9bf2415e5a864a9c4512350b9387b6c172b37114..775f881eaad84bc352e098487f720ddd390decb5 100644 --- a/polkadot/node/subsystem-bench/src/lib/network.rs +++ b/polkadot/node/subsystem-bench/src/lib/network.rs @@ -51,13 +51,14 @@ use futures::{ }; use itertools::Itertools; use net_protocol::{ - peer_set::{ProtocolVersion, ValidationVersion}, + peer_set::ValidationVersion, request_response::{Recipient, Requests, ResponseSender}, - ObservedRole, VersionedValidationProtocol, + ObservedRole, VersionedValidationProtocol, View, }; use parity_scale_codec::Encode; use polkadot_node_network_protocol::{self as net_protocol, Versioned}; -use polkadot_node_subsystem_types::messages::{ApprovalDistributionMessage, NetworkBridgeEvent}; +use polkadot_node_subsystem::messages::StatementDistributionMessage; +use polkadot_node_subsystem_types::messages::NetworkBridgeEvent; use polkadot_node_subsystem_util::metrics::prometheus::{ self, CounterVec, Opts, PrometheusError, Registry, }; @@ -437,6 +438,7 @@ pub struct EmulatedPeerHandle { /// Send actions to be performed by the peer. actions_tx: UnboundedSender, peer_id: PeerId, + authority_id: AuthorityDiscoveryId, } impl EmulatedPeerHandle { @@ -496,29 +498,31 @@ impl EmulatedPeer { } /// Interceptor pattern for handling messages. +#[async_trait::async_trait] pub trait HandleNetworkMessage { /// Returns `None` if the message was handled, or the `message` /// otherwise. /// /// `node_sender` allows sending of messages to the node in response /// to the handled message. - fn handle( + async fn handle( &self, message: NetworkMessage, node_sender: &mut UnboundedSender, ) -> Option; } +#[async_trait::async_trait] impl HandleNetworkMessage for Arc where - T: HandleNetworkMessage, + T: HandleNetworkMessage + Sync + Send, { - fn handle( + async fn handle( &self, message: NetworkMessage, node_sender: &mut UnboundedSender, ) -> Option { - self.as_ref().handle(message, node_sender) + T::handle(self, message, node_sender).await } } @@ -551,7 +555,7 @@ async fn emulated_peer_loop( for handler in handlers.iter() { // The check below guarantees that message is always `Some`: we are still // inside the loop. - message = handler.handle(message.unwrap(), &mut to_network_interface); + message = handler.handle(message.unwrap(), &mut to_network_interface).await; if message.is_none() { break } @@ -613,6 +617,7 @@ async fn emulated_peer_loop( } /// Creates a new peer emulator task and returns a handle to it. +#[allow(clippy::too_many_arguments)] pub fn new_peer( bandwidth: usize, spawn_task_handle: SpawnTaskHandle, @@ -621,6 +626,7 @@ pub fn new_peer( to_network_interface: UnboundedSender, latency_ms: usize, peer_id: PeerId, + authority_id: AuthorityDiscoveryId, ) -> EmulatedPeerHandle { let (messages_tx, messages_rx) = mpsc::unbounded::(); let (actions_tx, actions_rx) = mpsc::unbounded::(); @@ -649,7 +655,7 @@ pub fn new_peer( .boxed(), ); - EmulatedPeerHandle { messages_tx, actions_tx, peer_id } + EmulatedPeerHandle { messages_tx, actions_tx, peer_id, authority_id } } /// Book keeping of sent and received bytes. @@ -714,6 +720,18 @@ impl Peer { Peer::Disconnected(ref emulator) => emulator, } } + + pub fn authority_id(&self) -> AuthorityDiscoveryId { + match self { + Peer::Connected(handle) | Peer::Disconnected(handle) => handle.authority_id.clone(), + } + } + + pub fn peer_id(&self) -> PeerId { + match self { + Peer::Connected(handle) | Peer::Disconnected(handle) => handle.peer_id, + } + } } /// A ha emulated network implementation. @@ -728,21 +746,34 @@ pub struct NetworkEmulatorHandle { } impl NetworkEmulatorHandle { + pub fn generate_statement_distribution_peer_view_change(&self, view: View) -> Vec { + self.peers + .iter() + .filter(|peer| peer.is_connected()) + .map(|peer| { + AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(peer.peer_id(), view.clone()), + ), + ) + }) + .collect_vec() + } + /// Generates peer_connected messages for all peers in `test_authorities` - pub fn generate_peer_connected(&self) -> Vec { + pub fn generate_peer_connected(&self, mapper: F) -> Vec + where + F: Fn(NetworkBridgeEvent) -> AllMessages, + { self.peers .iter() .filter(|peer| peer.is_connected()) .map(|peer| { - let network = NetworkBridgeEvent::PeerConnected( + mapper(NetworkBridgeEvent::PeerConnected( peer.handle().peer_id, - ObservedRole::Full, - ProtocolVersion::from(ValidationVersion::V3), - None, - ); - - AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate( - network, + ObservedRole::Authority, + ValidationVersion::V3.into(), + Some(vec![peer.authority_id()].into_iter().collect()), )) }) .collect_vec() @@ -772,7 +803,7 @@ pub fn new_network( let (stats, mut peers): (_, Vec<_>) = (0..n_peers) .zip(authorities.validator_authority_id.clone()) .map(|(peer_index, authority_id)| { - validator_authority_id_mapping.insert(authority_id, peer_index); + validator_authority_id_mapping.insert(authority_id.clone(), peer_index); let stats = Arc::new(PeerEmulatorStats::new(peer_index, metrics.clone())); ( stats.clone(), @@ -784,6 +815,7 @@ pub fn new_network( to_network_interface.clone(), random_latency(config.latency.as_ref()), *authorities.peer_ids.get(peer_index).unwrap(), + authority_id, )), ) }) @@ -971,6 +1003,8 @@ impl Metrics { pub trait RequestExt { /// Get the authority id if any from the request. fn authority_id(&self) -> Option<&AuthorityDiscoveryId>; + /// Get the peer id if any from the request. + fn peer_id(&self) -> Option<&PeerId>; /// Consume self and return the response sender. fn into_response_sender(self) -> ResponseSender; /// Allows to change the `ResponseSender` in place. @@ -982,7 +1016,7 @@ pub trait RequestExt { impl RequestExt for Requests { fn authority_id(&self) -> Option<&AuthorityDiscoveryId> { match self { - Requests::ChunkFetchingV1(request) => { + Requests::ChunkFetching(request) => { if let Recipient::Authority(authority_id) = &request.peer { Some(authority_id) } else { @@ -996,15 +1030,29 @@ impl RequestExt for Requests { None } }, + // Requested by PeerId + Requests::AttestedCandidateV2(_) => None, request => { unimplemented!("RequestAuthority not implemented for {:?}", request) }, } } + fn peer_id(&self) -> Option<&PeerId> { + match self { + Requests::AttestedCandidateV2(request) => match &request.peer { + Recipient::Authority(_) => None, + Recipient::Peer(peer_id) => Some(peer_id), + }, + request => { + unimplemented!("peer_id() is not implemented for {:?}", request) + }, + } + } + fn into_response_sender(self) -> ResponseSender { match self { - Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.pending_response, + Requests::ChunkFetching(outgoing_request) => outgoing_request.pending_response, Requests::AvailableDataFetchingV1(outgoing_request) => outgoing_request.pending_response, _ => unimplemented!("unsupported request type"), @@ -1014,10 +1062,12 @@ impl RequestExt for Requests { /// Swaps the `ResponseSender` and returns the previous value. fn swap_response_sender(&mut self, new_sender: ResponseSender) -> ResponseSender { match self { - Requests::ChunkFetchingV1(outgoing_request) => + Requests::ChunkFetching(outgoing_request) => std::mem::replace(&mut outgoing_request.pending_response, new_sender), Requests::AvailableDataFetchingV1(outgoing_request) => std::mem::replace(&mut outgoing_request.pending_response, new_sender), + Requests::AttestedCandidateV2(outgoing_request) => + std::mem::replace(&mut outgoing_request.pending_response, new_sender), _ => unimplemented!("unsupported request type"), } } @@ -1025,9 +1075,11 @@ impl RequestExt for Requests { /// Returns the size in bytes of the request payload. fn size(&self) -> usize { match self { - Requests::ChunkFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), + Requests::ChunkFetching(outgoing_request) => outgoing_request.payload.encoded_size(), Requests::AvailableDataFetchingV1(outgoing_request) => outgoing_request.payload.encoded_size(), + Requests::AttestedCandidateV2(outgoing_request) => + outgoing_request.payload.encoded_size(), _ => unimplemented!("received an unexpected request"), } } diff --git a/polkadot/node/subsystem-bench/src/lib/statement/mod.rs b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..bd47505f56aeabc201f0d57bec57fa6a3f3380d4 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/statement/mod.rs @@ -0,0 +1,449 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + configuration::TestAuthorities, + dummy_builder, + environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH}, + mock::{ + candidate_backing::MockCandidateBacking, + chain_api::{ChainApiState, MockChainApi}, + network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx}, + prospective_parachains::MockProspectiveParachains, + runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState}, + AlwaysSupportsParachains, + }, + network::{new_network, NetworkEmulatorHandle, NetworkInterface, NetworkInterfaceReceiver}, + usage::BenchmarkUsage, + NODE_UNDER_TEST, +}; +use bitvec::vec::BitVec; +use colored::Colorize; +use itertools::Itertools; +use polkadot_node_metrics::metrics::Metrics; +use polkadot_node_network_protocol::{ + grid_topology::{SessionGridTopology, TopologyPeerInfo}, + request_response::{IncomingRequest, ReqProtocolNames}, + v3::{self, BackedCandidateManifest, StatementFilter}, + view, Versioned, View, +}; +use polkadot_node_subsystem::messages::{ + network_bridge_event::NewGossipTopology, AllMessages, NetworkBridgeEvent, + StatementDistributionMessage, +}; +use polkadot_overseer::{ + Handle as OverseerHandle, Overseer, OverseerConnector, OverseerMetrics, SpawnGlue, +}; +use polkadot_primitives::{ + AuthorityDiscoveryId, Block, GroupIndex, Hash, Id, ValidatorId, ValidatorIndex, +}; +use polkadot_statement_distribution::StatementDistributionSubsystem; +use rand::SeedableRng; +use sc_keystore::LocalKeystore; +use sc_network::request_responses::ProtocolConfig; +use sc_network_types::PeerId; +use sc_service::SpawnTaskHandle; +use sp_keystore::{Keystore, KeystorePtr}; +use sp_runtime::RuntimeAppPublic; +use std::{ + sync::{atomic::Ordering, Arc}, + time::{Duration, Instant}, +}; +pub use test_state::TestState; + +mod test_state; + +const LOG_TARGET: &str = "subsystem-bench::statement"; + +pub fn make_keystore() -> KeystorePtr { + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some("//Node0")) + .expect("Insert key into keystore"); + Keystore::sr25519_generate_new(&*keystore, AuthorityDiscoveryId::ID, Some("//Node0")) + .expect("Insert key into keystore"); + keystore +} + +fn build_overseer( + state: &TestState, + network: NetworkEmulatorHandle, + network_interface: NetworkInterface, + network_receiver: NetworkInterfaceReceiver, + dependencies: &TestEnvironmentDependencies, +) -> ( + Overseer, AlwaysSupportsParachains>, + OverseerHandle, + Vec, +) { + let overseer_connector = OverseerConnector::with_event_capacity(64000); + let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap(); + let spawn_task_handle = dependencies.task_manager.spawn_handle(); + let mock_runtime_api = MockRuntimeApi::new( + state.config.clone(), + state.test_authorities.clone(), + state.candidate_receipts.clone(), + Default::default(), + Default::default(), + 0, + MockRuntimeApiCoreState::Scheduled, + ); + let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() }; + let mock_chain_api = MockChainApi::new(chain_api_state); + let mock_prospective_parachains = MockProspectiveParachains::new(); + let mock_candidate_backing = MockCandidateBacking::new( + state.config.clone(), + state + .test_authorities + .validator_pairs + .get(NODE_UNDER_TEST as usize) + .unwrap() + .clone(), + state.pvd.clone(), + state.own_backing_group.clone(), + ); + let (statement_req_receiver, statement_req_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let (candidate_req_receiver, candidate_req_cfg) = IncomingRequest::get_config_receiver::< + Block, + sc_network::NetworkWorker, + >(&ReqProtocolNames::new(GENESIS_HASH, None)); + let keystore = make_keystore(); + let subsystem = StatementDistributionSubsystem::new( + keystore.clone(), + statement_req_receiver, + candidate_req_receiver, + Metrics::try_register(&dependencies.registry).unwrap(), + rand::rngs::StdRng::from_entropy(), + ); + let network_bridge_tx = MockNetworkBridgeTx::new( + network, + network_interface.subsystem_sender(), + state.test_authorities.clone(), + ); + let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg)); + + let dummy = dummy_builder!(spawn_task_handle, overseer_metrics) + .replace_runtime_api(|_| mock_runtime_api) + .replace_chain_api(|_| mock_chain_api) + .replace_prospective_parachains(|_| mock_prospective_parachains) + .replace_candidate_backing(|_| mock_candidate_backing) + .replace_statement_distribution(|_| subsystem) + .replace_network_bridge_tx(|_| network_bridge_tx) + .replace_network_bridge_rx(|_| network_bridge_rx); + let (overseer, raw_handle) = dummy.build_with_connector(overseer_connector).unwrap(); + let overseer_handle = OverseerHandle::new(raw_handle); + + (overseer, overseer_handle, vec![statement_req_cfg]) +} + +pub fn prepare_test( + state: &TestState, + with_prometheus_endpoint: bool, +) -> (TestEnvironment, Vec) { + let dependencies = TestEnvironmentDependencies::default(); + let (network, network_interface, network_receiver) = new_network( + &state.config, + &dependencies, + &state.test_authorities, + vec![Arc::new(state.clone())], + ); + let (overseer, overseer_handle, cfg) = + build_overseer(state, network.clone(), network_interface, network_receiver, &dependencies); + + ( + TestEnvironment::new( + dependencies, + state.config.clone(), + network, + overseer, + overseer_handle, + state.test_authorities.clone(), + with_prometheus_endpoint, + ), + cfg, + ) +} + +pub fn generate_peer_view_change(block_hash: Hash, peer_id: PeerId) -> AllMessages { + let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0)); + + AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(network)) +} + +pub fn generate_new_session_topology( + topology: &SessionGridTopology, + test_node: ValidatorIndex, +) -> Vec { + let event = NetworkBridgeEvent::NewGossipTopology(NewGossipTopology { + session: 0, + topology: topology.clone(), + local_index: Some(test_node), + }); + vec![AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate( + event, + ))] +} + +/// Generates a topology to be used for this benchmark. +pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopology { + let keyrings = test_authorities + .validator_authority_id + .clone() + .into_iter() + .zip(test_authorities.peer_ids.clone()) + .collect_vec(); + + let topology = keyrings + .clone() + .into_iter() + .enumerate() + .map(|(index, (discovery_id, peer_id))| TopologyPeerInfo { + peer_ids: vec![peer_id], + validator_index: ValidatorIndex(index as u32), + discovery_id, + }) + .collect_vec(); + let shuffled = (0..keyrings.len()).collect_vec(); + + SessionGridTopology::new(shuffled, topology) +} + +pub async fn benchmark_statement_distribution( + env: &mut TestEnvironment, + state: &TestState, +) -> BenchmarkUsage { + state.reset_trackers(); + + let connected_validators = state + .test_authorities + .validator_authority_id + .iter() + .enumerate() + .filter_map(|(i, id)| if env.network().is_peer_connected(id) { Some(i) } else { None }) + .collect_vec(); + let seconding_validator_in_own_backing_group = state + .own_backing_group + .iter() + .find(|v| connected_validators.contains(&(v.0 as usize))) + .unwrap() + .to_owned(); + + let config = env.config().clone(); + let groups = state.session_info.validator_groups.clone(); + let own_backing_group_index = groups + .iter() + .position(|group| group.iter().any(|v| v.0 == NODE_UNDER_TEST)) + .unwrap(); + + env.metrics().set_n_validators(config.n_validators); + env.metrics().set_n_cores(config.n_cores); + + let topology = generate_topology(&state.test_authorities); + let peer_connected_messages = env.network().generate_peer_connected(|e| { + AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(e)) + }); + let new_session_topology_messages = + generate_new_session_topology(&topology, ValidatorIndex(NODE_UNDER_TEST)); + for message in peer_connected_messages.into_iter().chain(new_session_topology_messages) { + env.send_message(message).await; + } + + let test_start = Instant::now(); + let mut candidates_advertised = 0; + for block_info in state.block_infos.iter() { + let block_num = block_info.number as usize; + gum::info!(target: LOG_TARGET, "Current block {}/{} {:?}", block_num, config.num_blocks, block_info.hash); + env.metrics().set_current_block(block_num); + env.import_block(block_info.clone()).await; + + for peer_view_change in env + .network() + .generate_statement_distribution_peer_view_change(view![block_info.hash]) + { + env.send_message(peer_view_change).await; + } + + let seconding_peer_id = *state + .test_authorities + .peer_ids + .get(seconding_validator_in_own_backing_group.0 as usize) + .unwrap(); + let candidate = state.candidate_receipts.get(&block_info.hash).unwrap().first().unwrap(); + let candidate_hash = candidate.hash(); + let statement = state + .statements + .get(&candidate_hash) + .unwrap() + .get(seconding_validator_in_own_backing_group.0 as usize) + .unwrap() + .clone(); + let message = AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + seconding_peer_id, + Versioned::V3(v3::StatementDistributionMessage::Statement( + block_info.hash, + statement, + )), + )), + ); + env.send_message(message).await; + + let max_messages_per_candidate = state.config.max_candidate_depth + 1; + // One was just sent for the own backing group + let mut messages_tracker = (0..groups.len()) + .map(|i| if i == own_backing_group_index { max_messages_per_candidate } else { 0 }) + .collect_vec(); + + let neighbors = + topology.compute_grid_neighbors_for(ValidatorIndex(NODE_UNDER_TEST)).unwrap(); + let connected_neighbors_x = neighbors + .validator_indices_x + .iter() + .filter(|&v| connected_validators.contains(&(v.0 as usize))) + .cloned() + .collect_vec(); + let connected_neighbors_y = neighbors + .validator_indices_y + .iter() + .filter(|&v| connected_validators.contains(&(v.0 as usize))) + .cloned() + .collect_vec(); + let one_hop_peers_and_groups = connected_neighbors_x + .iter() + .chain(connected_neighbors_y.iter()) + .map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + let group_index = + groups.iter().position(|group| group.contains(validator_index)).unwrap(); + (peer_id, group_index) + }) + .collect_vec(); + let two_hop_x_peers_and_groups = connected_neighbors_x + .iter() + .flat_map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + topology + .compute_grid_neighbors_for(*validator_index) + .unwrap() + .validator_indices_y + .iter() + .map(|validator_neighbor| { + let group_index = groups + .iter() + .position(|group| group.contains(validator_neighbor)) + .unwrap(); + (peer_id, group_index) + }) + .collect_vec() + }) + .collect_vec(); + let two_hop_y_peers_and_groups = connected_neighbors_y + .iter() + .flat_map(|validator_index| { + let peer_id = + *state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap(); + topology + .compute_grid_neighbors_for(*validator_index) + .unwrap() + .validator_indices_x + .iter() + .map(|validator_neighbor| { + let group_index = groups + .iter() + .position(|group| group.contains(validator_neighbor)) + .unwrap(); + (peer_id, group_index) + }) + .collect_vec() + }) + .collect_vec(); + + for (seconding_peer_id, group_index) in one_hop_peers_and_groups + .into_iter() + .chain(two_hop_x_peers_and_groups) + .chain(two_hop_y_peers_and_groups) + { + let messages_sent_count = messages_tracker.get_mut(group_index).unwrap(); + if *messages_sent_count == max_messages_per_candidate { + continue + } + *messages_sent_count += 1; + + let candidate_hash = state + .candidate_receipts + .get(&block_info.hash) + .unwrap() + .get(group_index) + .unwrap() + .hash(); + let manifest = BackedCandidateManifest { + relay_parent: block_info.hash, + candidate_hash, + group_index: GroupIndex(group_index as u32), + para_id: Id::new(group_index as u32 + 1), + parent_head_data_hash: state.pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: BitVec::from_iter( + groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| true), + ), + validated_in_group: BitVec::from_iter( + groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| false), + ), + }, + }; + let message = AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + seconding_peer_id, + Versioned::V3(v3::StatementDistributionMessage::BackedCandidateManifest( + manifest, + )), + )), + ); + env.send_message(message).await; + } + + candidates_advertised += messages_tracker.iter().filter(|&&v| v > 0).collect_vec().len(); + + loop { + let manifests_count = state + .manifests_tracker + .values() + .filter(|v| v.load(Ordering::SeqCst)) + .collect::>() + .len(); + gum::debug!(target: LOG_TARGET, "{}/{} manifest exchanges", manifests_count, candidates_advertised); + + if manifests_count == candidates_advertised { + break; + } + tokio::time::sleep(Duration::from_millis(50)).await; + } + } + + let duration: u128 = test_start.elapsed().as_millis(); + gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{:?}ms", duration).cyan()); + gum::info!(target: LOG_TARGET, + "Avg block time: {}", + format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red() + ); + + env.stop().await; + env.collect_resource_usage(&["statement-distribution"]) +} diff --git a/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs new file mode 100644 index 0000000000000000000000000000000000000000..b8ea64c7e331773fdcc2ee6bbd3da67a1c147980 --- /dev/null +++ b/polkadot/node/subsystem-bench/src/lib/statement/test_state.rs @@ -0,0 +1,436 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + configuration::{TestAuthorities, TestConfiguration}, + mock::runtime_api::session_info_for_peers, + network::{HandleNetworkMessage, NetworkMessage}, + NODE_UNDER_TEST, +}; +use bitvec::vec::BitVec; +use futures::channel::oneshot; +use itertools::Itertools; +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_network_protocol::{ + request_response::{ + v2::{AttestedCandidateRequest, AttestedCandidateResponse}, + Requests, + }, + v3::{ + BackedCandidateAcknowledgement, StatementDistributionMessage, StatementFilter, + ValidationProtocol, + }, + Versioned, +}; +use polkadot_node_primitives::{AvailableData, BlockData, PoV}; +use polkadot_node_subsystem_test_helpers::{ + derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info, +}; +use polkadot_overseer::BlockInfo; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CompactStatement, + Hash, Header, Id, PersistedValidationData, SessionInfo, SignedStatement, SigningContext, + UncheckedSigned, ValidatorIndex, ValidatorPair, +}; +use polkadot_primitives_test_helpers::{ + dummy_committed_candidate_receipt, dummy_hash, dummy_head_data, dummy_pvd, +}; +use sc_network::{config::IncomingRequest, ProtocolName}; +use sp_core::{Pair, H256}; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +#[derive(Clone)] +pub struct TestState { + // Full test config + pub config: TestConfiguration, + // Authority keys for the network emulation. + pub test_authorities: TestAuthorities, + // Relay chain block infos + pub block_infos: Vec, + // Map from generated candidate receipts + pub candidate_receipts: HashMap>, + // Map from generated commited candidate receipts + pub commited_candidate_receipts: HashMap>, + // PersistedValidationData, we use one for all candidates + pub pvd: PersistedValidationData, + // Relay chain block headers + pub block_headers: HashMap, + // Session info + pub session_info: SessionInfo, + // Pregenerated statements + pub statements: HashMap>>, + // Indices in the backing group where the node under test is + pub own_backing_group: Vec, + // Tracks how many statements we received for a candidates + pub statements_tracker: HashMap>>, + // Tracks if manifest exchange happened + pub manifests_tracker: HashMap>, +} + +impl TestState { + pub fn new(config: &TestConfiguration) -> Self { + let test_authorities = config.generate_authorities(); + let session_info = session_info_for_peers(config, &test_authorities); + let own_backing_group = session_info + .validator_groups + .iter() + .find(|g| g.contains(&ValidatorIndex(NODE_UNDER_TEST))) + .unwrap() + .clone(); + let mut state = Self { + config: config.clone(), + test_authorities, + block_infos: (1..=config.num_blocks).map(generate_block_info).collect(), + candidate_receipts: Default::default(), + commited_candidate_receipts: Default::default(), + pvd: dummy_pvd(dummy_head_data(), 0), + block_headers: Default::default(), + statements_tracker: Default::default(), + manifests_tracker: Default::default(), + session_info, + own_backing_group, + statements: Default::default(), + }; + + state.block_headers = state.block_infos.iter().map(generate_block_header).collect(); + + // For each unique pov we create a candidate receipt. + let pov_sizes = Vec::from(config.pov_sizes()); // For n_cores + let pov_size_to_candidate = generate_pov_size_to_candidate(&pov_sizes); + let receipt_templates = + generate_receipt_templates(&pov_size_to_candidate, config.n_validators, &state.pvd); + + for block_info in state.block_infos.iter() { + for core_idx in 0..config.n_cores { + let pov_size = pov_sizes.get(core_idx).expect("This is a cycle; qed"); + let candidate_index = + *pov_size_to_candidate.get(pov_size).expect("pov_size always exists; qed"); + let mut receipt = receipt_templates[candidate_index].clone(); + receipt.descriptor.para_id = Id::new(core_idx as u32 + 1); + receipt.descriptor.relay_parent = block_info.hash; + + state.candidate_receipts.entry(block_info.hash).or_default().push( + CandidateReceipt { + descriptor: receipt.descriptor.clone(), + commitments_hash: receipt.commitments.hash(), + }, + ); + state.statements_tracker.entry(receipt.hash()).or_default().extend( + (0..config.n_validators) + .map(|_| Arc::new(AtomicBool::new(false))) + .collect_vec(), + ); + state.manifests_tracker.insert(receipt.hash(), Arc::new(AtomicBool::new(false))); + state + .commited_candidate_receipts + .entry(block_info.hash) + .or_default() + .push(receipt); + } + } + + let groups = state.session_info.validator_groups.clone(); + + for block_info in state.block_infos.iter() { + for (index, group) in groups.iter().enumerate() { + let candidate = + state.candidate_receipts.get(&block_info.hash).unwrap().get(index).unwrap(); + let statements = group + .iter() + .map(|&v| { + sign_statement( + CompactStatement::Seconded(candidate.hash()), + block_info.hash, + v, + state.test_authorities.validator_pairs.get(v.0 as usize).unwrap(), + ) + }) + .collect_vec(); + state.statements.insert(candidate.hash(), statements); + } + } + + state + } + + pub fn reset_trackers(&self) { + self.statements_tracker.values().for_each(|v| { + v.iter() + .enumerate() + .for_each(|(index, v)| v.as_ref().store(index <= 1, Ordering::SeqCst)) + }); + self.manifests_tracker + .values() + .for_each(|v| v.as_ref().store(false, Ordering::SeqCst)); + } +} + +fn sign_statement( + statement: CompactStatement, + relay_parent: H256, + validator_index: ValidatorIndex, + pair: &ValidatorPair, +) -> UncheckedSigned { + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.signing_payload(&context); + + SignedStatement::new( + statement, + validator_index, + pair.sign(&payload[..]), + &context, + &pair.public(), + ) + .unwrap() + .as_unchecked() + .to_owned() +} + +fn generate_block_info(block_num: usize) -> BlockInfo { + new_block_import_info(Hash::repeat_byte(block_num as u8), block_num as BlockNumber) +} + +fn generate_block_header(info: &BlockInfo) -> (H256, Header) { + ( + info.hash, + Header { + digest: Default::default(), + number: info.number, + parent_hash: info.parent_hash, + extrinsics_root: Default::default(), + state_root: Default::default(), + }, + ) +} + +fn generate_pov_size_to_candidate(pov_sizes: &[usize]) -> HashMap { + pov_sizes + .iter() + .cloned() + .unique() + .enumerate() + .map(|(index, pov_size)| (pov_size, index)) + .collect() +} + +fn generate_receipt_templates( + pov_size_to_candidate: &HashMap, + n_validators: usize, + pvd: &PersistedValidationData, +) -> Vec { + pov_size_to_candidate + .iter() + .map(|(&pov_size, &index)| { + let mut receipt = dummy_committed_candidate_receipt(dummy_hash()); + let (_, erasure_root) = derive_erasure_chunks_with_proofs_and_root( + n_validators, + &AvailableData { + validation_data: pvd.clone(), + pov: Arc::new(PoV { block_data: BlockData(vec![index as u8; pov_size]) }), + }, + |_, _| {}, + ); + receipt.descriptor.persisted_validation_data_hash = pvd.hash(); + receipt.descriptor.erasure_root = erasure_root; + receipt + }) + .collect() +} + +#[async_trait::async_trait] +impl HandleNetworkMessage for TestState { + async fn handle( + &self, + message: NetworkMessage, + node_sender: &mut futures::channel::mpsc::UnboundedSender, + ) -> Option { + match message { + NetworkMessage::RequestFromNode(_authority_id, Requests::AttestedCandidateV2(req)) => { + let payload = req.payload; + let candidate_receipt = self + .commited_candidate_receipts + .values() + .flatten() + .find(|v| v.hash() == payload.candidate_hash) + .unwrap() + .clone(); + let persisted_validation_data = self.pvd.clone(); + let statements = self.statements.get(&payload.candidate_hash).unwrap().clone(); + let res = AttestedCandidateResponse { + candidate_receipt, + persisted_validation_data, + statements, + }; + let _ = req.pending_response.send(Ok((res.encode(), ProtocolName::from("")))); + None + }, + NetworkMessage::MessageFromNode( + authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::Statement(relay_parent, statement), + )), + ) => { + let index = self + .test_authorities + .validator_authority_id + .iter() + .position(|v| v == &authority_id) + .unwrap(); + let candidate_hash = *statement.unchecked_payload().candidate_hash(); + + let statements_sent_count = self + .statements_tracker + .get(&candidate_hash) + .unwrap() + .get(index) + .unwrap() + .as_ref(); + if statements_sent_count.load(Ordering::SeqCst) { + return None + } else { + statements_sent_count.store(true, Ordering::SeqCst); + } + + let group_statements = self.statements.get(&candidate_hash).unwrap(); + if !group_statements.iter().any(|s| s.unchecked_validator_index().0 == index as u32) + { + return None + } + + let statement = CompactStatement::Valid(candidate_hash); + let context = SigningContext { parent_hash: relay_parent, session_index: 0 }; + let payload = statement.signing_payload(&context); + let pair = self.test_authorities.validator_pairs.get(index).unwrap(); + let signature = pair.sign(&payload[..]); + let statement = SignedStatement::new( + statement, + ValidatorIndex(index as u32), + signature, + &context, + &pair.public(), + ) + .unwrap() + .as_unchecked() + .to_owned(); + + node_sender + .start_send(NetworkMessage::MessageFromPeer( + *self.test_authorities.peer_ids.get(index).unwrap(), + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::Statement(relay_parent, statement), + )), + )) + .unwrap(); + None + }, + NetworkMessage::MessageFromNode( + authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateManifest(manifest), + )), + ) => { + let index = self + .test_authorities + .validator_authority_id + .iter() + .position(|v| v == &authority_id) + .unwrap(); + let backing_group = + self.session_info.validator_groups.get(manifest.group_index).unwrap(); + let group_size = backing_group.len(); + let is_own_backing_group = backing_group.contains(&ValidatorIndex(NODE_UNDER_TEST)); + let mut seconded_in_group = + BitVec::from_iter((0..group_size).map(|_| !is_own_backing_group)); + let mut validated_in_group = BitVec::from_iter((0..group_size).map(|_| false)); + + if is_own_backing_group { + let (pending_response, response_receiver) = oneshot::channel(); + let peer_id = self.test_authorities.peer_ids.get(index).unwrap().to_owned(); + node_sender + .start_send(NetworkMessage::RequestFromPeer(IncomingRequest { + peer: peer_id, + payload: AttestedCandidateRequest { + candidate_hash: manifest.candidate_hash, + mask: StatementFilter::blank(self.own_backing_group.len()), + } + .encode(), + pending_response, + })) + .unwrap(); + + let response = response_receiver.await.unwrap(); + let response = + AttestedCandidateResponse::decode(&mut response.result.unwrap().as_ref()) + .unwrap(); + + for statement in response.statements { + let validator_index = statement.unchecked_validator_index(); + let position_in_group = + backing_group.iter().position(|v| *v == validator_index).unwrap(); + match statement.unchecked_payload() { + CompactStatement::Seconded(_) => + seconded_in_group.set(position_in_group, true), + CompactStatement::Valid(_) => + validated_in_group.set(position_in_group, true), + } + } + } + + let ack = BackedCandidateAcknowledgement { + candidate_hash: manifest.candidate_hash, + statement_knowledge: StatementFilter { seconded_in_group, validated_in_group }, + }; + node_sender + .start_send(NetworkMessage::MessageFromPeer( + *self.test_authorities.peer_ids.get(index).unwrap(), + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateKnown(ack), + )), + )) + .unwrap(); + + self.manifests_tracker + .get(&manifest.candidate_hash) + .unwrap() + .as_ref() + .store(true, Ordering::SeqCst); + + None + }, + NetworkMessage::MessageFromNode( + _authority_id, + Versioned::V3(ValidationProtocol::StatementDistribution( + StatementDistributionMessage::BackedCandidateKnown(ack), + )), + ) => { + self.manifests_tracker + .get(&ack.candidate_hash) + .unwrap() + .as_ref() + .store(true, Ordering::SeqCst); + + None + }, + _ => Some(message), + } + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index bfaac3265a2e3741aaa2fa4811785bd90e8ea016..883e9aa7ad0a0cec210ef49ccd7542f17d7256ee 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -23,7 +23,6 @@ use std::collections::HashMap; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct BenchmarkUsage { - pub benchmark_name: String, pub network_usage: Vec, pub cpu_usage: Vec, } @@ -32,8 +31,7 @@ impl std::fmt::Display for BenchmarkUsage { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, - "\n{}\n\n{}\n{}\n\n{}\n{}\n", - self.benchmark_name.purple(), + "\n{}\n{}\n\n{}\n{}\n", format!("{:<32}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(), self.network_usage .iter() @@ -59,18 +57,17 @@ impl BenchmarkUsage { let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect(); Self { - benchmark_name: usages.first().map(|v| v.benchmark_name.clone()).unwrap_or_default(), network_usage: ResourceUsage::average_by_resource_name(&all_network_usages), cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage), } } pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { - check_usage(&self.benchmark_name, &self.network_usage, checks) + check_usage(&self.network_usage, checks) } pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec { - check_usage(&self.benchmark_name, &self.cpu_usage, checks) + check_usage(&self.cpu_usage, checks) } pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option { @@ -105,18 +102,8 @@ impl BenchmarkUsage { } } -fn check_usage( - benchmark_name: &str, - usage: &[ResourceUsage], - checks: &[ResourceUsageCheck], -) -> Vec { - checks - .iter() - .filter_map(|check| { - check_resource_usage(usage, check) - .map(|message| format!("{}: {}", benchmark_name, message)) - }) - .collect() +fn check_usage(usage: &[ResourceUsage], checks: &[ResourceUsageCheck]) -> Vec { + checks.iter().filter_map(|check| check_resource_usage(usage, check)).collect() } fn check_resource_usage( diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs index 6c1ac86c4507b798e4270b7c52e97a4dad74b64c..375121c3746377157e852de382c9d2cb64bce496 100644 --- a/polkadot/node/subsystem-test-helpers/src/lib.rs +++ b/polkadot/node/subsystem-test-helpers/src/lib.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ SubsystemError, SubsystemResult, TrySendError, }; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_primitives::{Hash, ValidatorIndex}; +use polkadot_primitives::{ChunkIndex, Hash}; use futures::{channel::mpsc, poll, prelude::*}; use parking_lot::Mutex; @@ -487,7 +487,7 @@ pub fn derive_erasure_chunks_with_proofs_and_root( .enumerate() .map(|(index, (proof, chunk))| ErasureChunk { chunk: chunk.to_vec(), - index: ValidatorIndex(index as _), + index: ChunkIndex(index as _), proof: Proof::try_from(proof).unwrap(), }) .collect::>(); diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 93dd43c5dbfc495ff92fa76f28e3ad80992b8a6a..e03fc60a1fd73c32e0ebfaa6c0d63e76b3d4aebc 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -11,6 +11,7 @@ workspace = true [dependencies] derive_more = "0.99.17" +fatality = "0.1.1" futures = "0.3.30" polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } diff --git a/polkadot/node/subsystem-types/src/errors.rs b/polkadot/node/subsystem-types/src/errors.rs index 44136362a69efa0ba316413cc9f78d834b5c3e9f..b8e70641243eab6b6958649c416ab1b533f9abc5 100644 --- a/polkadot/node/subsystem-types/src/errors.rs +++ b/polkadot/node/subsystem-types/src/errors.rs @@ -18,6 +18,7 @@ use crate::JaegerError; use ::orchestra::OrchestraError as OverseerError; +use fatality::fatality; /// A description of an error causing the runtime API request to be unservable. #[derive(thiserror::Error, Debug, Clone)] @@ -68,32 +69,21 @@ impl core::fmt::Display for ChainApiError { impl std::error::Error for ChainApiError {} /// An error that may happen during Availability Recovery process. -#[derive(PartialEq, Debug, Clone)] +#[derive(PartialEq, Clone)] +#[fatality(splitable)] +#[allow(missing_docs)] pub enum RecoveryError { - /// A chunk is recovered but is invalid. + #[error("Invalid data")] Invalid, - /// A requested chunk is unavailable. + #[error("Data is unavailable")] Unavailable, - /// Erasure task channel closed, usually means node is shutting down. + #[fatal] + #[error("Erasure task channel closed")] ChannelClosed, } -impl std::fmt::Display for RecoveryError { - fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - let msg = match self { - RecoveryError::Invalid => "Invalid", - RecoveryError::Unavailable => "Unavailable", - RecoveryError::ChannelClosed => "ChannelClosed", - }; - - write!(f, "{}", msg) - } -} - -impl std::error::Error for RecoveryError {} - /// An error type that describes faults that may happen /// /// These are: diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index e75d80395c4ba0371b58ce1383db2a1f364eaad8..722a97989bce0b8587c8992827f6438fd78ff21e 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -62,7 +62,7 @@ pub mod network_bridge_event; pub use network_bridge_event::NetworkBridgeEvent; /// A request to the candidate backing subsystem to check whether -/// there exists vacant membership in some fragment tree. +/// we can second this candidate. #[derive(Debug, Copy, Clone)] pub struct CanSecondRequest { /// Para id of the candidate. @@ -90,10 +90,12 @@ pub enum CandidateBackingMessage { oneshot::Sender>>, ), /// Request the subsystem to check whether it's allowed to second given candidate. - /// The rule is to only fetch collations that are either built on top of the root - /// of some fragment tree or have a parent node which represents backed candidate. + /// The rule is to only fetch collations that can either be directly chained to any + /// FragmentChain in the view or there is at least one FragmentChain where this candidate is a + /// potentially unconnected candidate (we predict that it may become connected to a + /// FragmentChain in the future). /// - /// Always responses with `false` if async backing is disabled for candidate's relay + /// Always responds with `false` if async backing is disabled for candidate's relay /// parent. CanSecond(CanSecondRequest, oneshot::Sender), /// Note that the Candidate Backing subsystem should second the given candidate in the context @@ -244,13 +246,6 @@ pub enum CollatorProtocolMessage { /// /// The hash is the relay parent. Seconded(Hash, SignedFullStatement), - /// The candidate received enough validity votes from the backing group. - Backed { - /// Candidate's para id. - para_id: ParaId, - /// Hash of the para head generated by candidate. - para_head: Hash, - }, } impl Default for CollatorProtocolMessage { @@ -485,6 +480,8 @@ pub enum AvailabilityRecoveryMessage { CandidateReceipt, SessionIndex, Option, // Optional backing group to request from first. + Option, /* A `CoreIndex` needs to be specified for the recovery process to + * prefer systematic chunk recovery. */ oneshot::Sender>, ), } @@ -520,7 +517,7 @@ pub enum AvailabilityStoreMessage { QueryChunkSize(CandidateHash, oneshot::Sender>), /// Query all chunks that we have for the given candidate hash. - QueryAllChunks(CandidateHash, oneshot::Sender>), + QueryAllChunks(CandidateHash, oneshot::Sender>), /// Query whether an `ErasureChunk` exists within the AV Store. /// @@ -535,6 +532,8 @@ pub enum AvailabilityStoreMessage { StoreChunk { /// A hash of the candidate this chunk belongs to. candidate_hash: CandidateHash, + /// Validator index. May not be equal to the chunk index. + validator_index: ValidatorIndex, /// The chunk itself. chunk: ErasureChunk, /// Sending side of the channel to send result to. @@ -554,6 +553,11 @@ pub enum AvailabilityStoreMessage { available_data: AvailableData, /// Erasure root we expect to get after chunking. expected_erasure_root: Hash, + /// Core index where the candidate was backed. + core_index: CoreIndex, + /// Node features at the candidate relay parent. Used for computing the validator->chunk + /// mapping. + node_features: NodeFeatures, /// Sending side of the channel to send result to. tx: oneshot::Sender>, }, @@ -1023,9 +1027,9 @@ pub enum GossipSupportMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } -/// Request introduction of a candidate into the prospective parachains subsystem. +/// Request introduction of a seconded candidate into the prospective parachains subsystem. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct IntroduceCandidateRequest { +pub struct IntroduceSecondedCandidateRequest { /// The para-id of the candidate. pub candidate_para: ParaId, /// The candidate receipt itself. @@ -1034,7 +1038,7 @@ pub struct IntroduceCandidateRequest { pub persisted_validation_data: PersistedValidationData, } -/// A hypothetical candidate to be evaluated for frontier membership +/// A hypothetical candidate to be evaluated for potential/actual membership /// in the prospective parachains subsystem. /// /// Hypothetical candidates are either complete or incomplete. @@ -1103,21 +1107,27 @@ impl HypotheticalCandidate { candidate_relay_parent, } } + + /// Get the output head data hash, if the candidate is complete. + pub fn output_head_data_hash(&self) -> Option { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + Some(receipt.descriptor.para_head), + HypotheticalCandidate::Incomplete { .. } => None, + } + } } /// Request specifying which candidates are either already included -/// or might be included in the hypothetical frontier of fragment trees -/// under a given active leaf. +/// or might become included in fragment chain under a given active leaf (or any active leaf if +/// `fragment_chain_relay_parent` is `None`). #[derive(Debug, PartialEq, Eq, Clone)] -pub struct HypotheticalFrontierRequest { +pub struct HypotheticalMembershipRequest { /// Candidates, in arbitrary order, which should be checked for - /// possible membership in fragment trees. + /// hypothetical/actual membership in fragment chains. pub candidates: Vec, - /// Either a specific fragment tree to check, otherwise all. - pub fragment_tree_relay_parent: Option, - /// Only return membership if all candidates in the path from the - /// root are backed. - pub backed_in_path_only: bool, + /// Either a specific fragment chain to check, otherwise all. + pub fragment_chain_relay_parent: Option, } /// A request for the persisted validation data stored in the prospective @@ -1156,9 +1166,9 @@ impl ParentHeadData { } } -/// Indicates the relay-parents whose fragment tree a candidate -/// is present in and the depths of that tree the candidate is present in. -pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// Indicates the relay-parents whose fragment chain a candidate +/// is present in or can be added in (right now or in the future). +pub type HypotheticalMembership = Vec; /// A collection of ancestor candidates of a parachain. pub type Ancestors = HashSet; @@ -1166,15 +1176,11 @@ pub type Ancestors = HashSet; /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { - /// Inform the Prospective Parachains Subsystem of a new candidate. + /// Inform the Prospective Parachains Subsystem of a new seconded candidate. /// - /// The response sender accepts the candidate membership, which is the existing - /// membership of the candidate if it was already known. - IntroduceCandidate(IntroduceCandidateRequest, oneshot::Sender), - /// Inform the Prospective Parachains Subsystem that a previously introduced candidate - /// has been seconded. This requires that the candidate was successfully introduced in - /// the past. - CandidateSeconded(ParaId, CandidateHash), + /// The response sender returns false if the candidate was rejected by prospective parachains, + /// true otherwise (if it was accepted or already present) + IntroduceSecondedCandidate(IntroduceSecondedCandidateRequest, oneshot::Sender), /// Inform the Prospective Parachains Subsystem that a previously introduced candidate /// has been backed. This requires that the candidate was successfully introduced in /// the past. @@ -1193,23 +1199,29 @@ pub enum ProspectiveParachainsMessage { Ancestors, oneshot::Sender>, ), - /// Get the hypothetical frontier membership of candidates with the given properties - /// under the specified active leaves' fragment trees. + /// Get the hypothetical or actual membership of candidates with the given properties + /// under the specified active leave's fragment chain. + /// + /// For each candidate, we return a vector of leaves where the candidate is present or could be + /// added. "Could be added" either means that the candidate can be added to the chain right now + /// or could be added in the future (we may not have its ancestors yet). + /// Note that even if we think it could be added in the future, we may find out that it was + /// invalid, as time passes. + /// If an active leaf is not in the vector, it means that there's no + /// chance this candidate will become valid under that leaf in the future. /// - /// For any candidate which is already known, this returns the depths the candidate - /// occupies. - GetHypotheticalFrontier( - HypotheticalFrontierRequest, - oneshot::Sender>, + /// If `fragment_chain_relay_parent` in the request is `Some()`, the return vector can only + /// contain this relay parent (or none). + GetHypotheticalMembership( + HypotheticalMembershipRequest, + oneshot::Sender>, ), - /// Get the membership of the candidate in all fragment trees. - GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), - /// Get the minimum accepted relay-parent number for each para in the fragment tree + /// Get the minimum accepted relay-parent number for each para in the fragment chain /// for the given relay-chain block hash. /// /// That is, if the block hash is known and is an active leaf, this returns the /// minimum relay-parent block number in the same branch of the relay chain which - /// is accepted in the fragment tree for each para-id. + /// is accepted in the fragment chain for each para-id. /// /// If the block hash is not an active leaf, this will return an empty vector. /// @@ -1219,8 +1231,10 @@ pub enum ProspectiveParachainsMessage { /// Para-IDs are returned in no particular order. GetMinimumRelayParents(Hash, oneshot::Sender>), /// Get the validation data of some prospective candidate. The candidate doesn't need - /// to be part of any fragment tree, but this only succeeds if the parent head-data and - /// relay-parent are part of some fragment tree. + /// to be part of any fragment chain, but this only succeeds if the parent head-data and + /// relay-parent are part of the `CandidateStorage` (meaning that it's a candidate which is + /// part of some fragment chain or which prospective-parachains predicted will become part of + /// some fragment chain). GetProspectiveValidationData( ProspectiveValidationDataRequest, oneshot::Sender>, diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index cb93ad75d20b1487bae29759dd92b4d2a5ff8502..9259ca94f07359dcc1ca7eac29647e34f1f18428 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -14,16 +14,17 @@ async-trait = "0.1.79" futures = "0.3.30" futures-channel = "0.3.23" itertools = "0.11" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.1" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" schnellru = "0.2.1" +erasure-coding = { package = "polkadot-erasure-coding", path = "../../erasure-coding" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-jaeger = { path = "../jaeger" } diff --git a/polkadot/node/subsystem-util/src/availability_chunks.rs b/polkadot/node/subsystem-util/src/availability_chunks.rs new file mode 100644 index 0000000000000000000000000000000000000000..45168e4512e156fcc9151f28246c95f32f8e184e --- /dev/null +++ b/polkadot/node/subsystem-util/src/availability_chunks.rs @@ -0,0 +1,227 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use erasure_coding::systematic_recovery_threshold; +use polkadot_primitives::{node_features, ChunkIndex, CoreIndex, NodeFeatures, ValidatorIndex}; + +/// Compute the per-validator availability chunk index. +/// WARNING: THIS FUNCTION IS CRITICAL TO PARACHAIN CONSENSUS. +/// Any modification to the output of the function needs to be coordinated via the runtime. +/// It's best to use minimal/no external dependencies. +pub fn availability_chunk_index( + maybe_node_features: Option<&NodeFeatures>, + n_validators: usize, + core_index: CoreIndex, + validator_index: ValidatorIndex, +) -> Result { + if let Some(features) = maybe_node_features { + if let Some(&true) = features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32; + let core_start_pos = core_index.0 * systematic_threshold; + + return Ok(ChunkIndex((core_start_pos + validator_index.0) % n_validators as u32)) + } + } + + Ok(validator_index.into()) +} + +/// Compute the per-core availability chunk indices. Returns a Vec which maps ValidatorIndex to +/// ChunkIndex for a given availability core index +/// WARNING: THIS FUNCTION IS CRITICAL TO PARACHAIN CONSENSUS. +/// Any modification to the output of the function needs to be coordinated via the +/// runtime. It's best to use minimal/no external dependencies. +pub fn availability_chunk_indices( + maybe_node_features: Option<&NodeFeatures>, + n_validators: usize, + core_index: CoreIndex, +) -> Result, erasure_coding::Error> { + let identity = (0..n_validators).map(|index| ChunkIndex(index as u32)); + if let Some(features) = maybe_node_features { + if let Some(&true) = features + .get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8)) + .as_deref() + { + let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32; + let core_start_pos = core_index.0 * systematic_threshold; + + return Ok(identity + .into_iter() + .cycle() + .skip(core_start_pos as usize) + .take(n_validators) + .collect()) + } + } + + Ok(identity.collect()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashSet; + + pub fn node_features_with_mapping_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features + .resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true); + node_features + } + + pub fn node_features_with_other_bits_enabled() -> NodeFeatures { + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize + 1, true); + node_features + .set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, false); + node_features + } + + #[test] + fn test_availability_chunk_indices() { + let n_validators = 20u32; + let n_cores = 15u32; + + // If the mapping feature is not enabled, it should always be the identity vector. + { + for node_features in + [None, Some(NodeFeatures::EMPTY), Some(node_features_with_other_bits_enabled())] + { + for core_index in 0..n_cores { + let indices = availability_chunk_indices( + node_features.as_ref(), + n_validators as usize, + CoreIndex(core_index), + ) + .unwrap(); + + for validator_index in 0..n_validators { + assert_eq!( + indices[validator_index as usize], + availability_chunk_index( + node_features.as_ref(), + n_validators as usize, + CoreIndex(core_index), + ValidatorIndex(validator_index) + ) + .unwrap() + ) + } + + assert_eq!( + indices, + (0..n_validators).map(|i| ChunkIndex(i)).collect::>() + ); + } + } + } + + // Test when mapping feature is enabled. + { + let node_features = node_features_with_mapping_enabled(); + let mut previous_indices = None; + + for core_index in 0..n_cores { + let indices = availability_chunk_indices( + Some(&node_features), + n_validators as usize, + CoreIndex(core_index), + ) + .unwrap(); + + for validator_index in 0..n_validators { + assert_eq!( + indices[validator_index as usize], + availability_chunk_index( + Some(&node_features), + n_validators as usize, + CoreIndex(core_index), + ValidatorIndex(validator_index) + ) + .unwrap() + ) + } + + // Check that it's not equal to the previous core's indices. + if let Some(previous_indices) = previous_indices { + assert_ne!(previous_indices, indices); + } + + previous_indices = Some(indices.clone()); + + // Check that it's indeed a permutation. + assert_eq!( + (0..n_validators).map(|i| ChunkIndex(i)).collect::>(), + indices.into_iter().collect::>() + ); + } + } + } + + #[test] + // This is just a dummy test that checks the mapping against some hardcoded outputs, to prevent + // accidental changes to the algorithms. + fn prevent_changes_to_mapping() { + let n_validators = 7; + let node_features = node_features_with_mapping_enabled(); + + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(0)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![0, 1, 2, 3, 4, 5, 6] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(1)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![2, 3, 4, 5, 6, 0, 1] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(2)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![4, 5, 6, 0, 1, 2, 3] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(3)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![6, 0, 1, 2, 3, 4, 5] + ); + assert_eq!( + availability_chunk_indices(Some(&node_features), n_validators, CoreIndex(4)) + .unwrap() + .into_iter() + .map(|i| i.0) + .collect::>(), + vec![1, 2, 3, 4, 5, 6, 0] + ); + } +} diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index a14536a17666c8e2d9183755e3579990f6185113..23a758d25715bbd33251b5edf8835ca3625f3ffa 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -17,23 +17,45 @@ use futures::channel::oneshot; use polkadot_node_subsystem::{ errors::ChainApiError, - messages::{ChainApiMessage, ProspectiveParachainsMessage}, + messages::{ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage}, SubsystemSender, }; use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; +use crate::{ + request_session_index_for_child, + runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, +}; + // Always aim to retain 1 block before the active leaves. const MINIMUM_RETAIN_LENGTH: BlockNumber = 2; /// Handles the implicit view of the relay chain derived from the immediate view, which /// is composed of active leaves, and the minimum relay-parents allowed for /// candidates of various parachains at those leaves. -#[derive(Default, Clone)] +#[derive(Clone)] pub struct View { leaves: HashMap, block_info_storage: HashMap, + collating_for: Option, +} + +impl View { + /// Create a new empty view. + /// If `collating_for` is `Some`, the node is a collator and is only interested in the allowed + /// relay parents of a single paraid. When this is true, prospective-parachains is no longer + /// queried. + pub fn new(collating_for: Option) -> Self { + Self { leaves: Default::default(), block_info_storage: Default::default(), collating_for } + } +} + +impl Default for View { + fn default() -> Self { + Self::new(None) + } } // Minimum relay parents implicitly relative to a particular block. @@ -106,15 +128,13 @@ impl View { } /// Activate a leaf in the view. - /// This will request the minimum relay parents from the - /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each - /// leaf in the view as needed. These are the 'implicit ancestors' of the leaf. + /// This will request the minimum relay parents the leaf and will load headers in the + /// ancestry of the leaf as needed. These are the 'implicit ancestors' of the leaf. /// /// To maximize reuse of outdated leaves, it's best to activate new leaves before /// deactivating old ones. /// - /// This returns a list of para-ids which are relevant to the leaf, - /// and the allowed relay parents for these paras under this leaf can be + /// The allowed relay parents for the relevant paras under this leaf can be /// queried with [`View::known_allowed_relay_parents_under`]. /// /// No-op for known leaves. @@ -122,10 +142,11 @@ impl View { &mut self, sender: &mut Sender, leaf_hash: Hash, - ) -> Result, FetchError> + ) -> Result<(), FetchError> where - Sender: SubsystemSender, - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, { if self.leaves.contains_key(&leaf_hash) { return Err(FetchError::AlreadyKnown) @@ -135,6 +156,7 @@ impl View { leaf_hash, &mut self.block_info_storage, &mut *sender, + self.collating_for, ) .await; @@ -150,7 +172,7 @@ impl View { self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); - Ok(fetched.relevant_paras) + Ok(()) }, Err(e) => Err(e), } @@ -249,6 +271,10 @@ pub enum FetchError { /// Request to the Chain API subsystem failed. #[error("The chain API subsystem was unavailable")] ChainApiUnavailable, + + /// Request to the runtime API failed. + #[error("Runtime API error: {0}")] + RuntimeApi(#[from] runtime::Error), } /// Reasons a block header might have been unavailable. @@ -265,30 +291,92 @@ pub enum BlockHeaderUnavailableReason { struct FetchSummary { minimum_ancestor_number: BlockNumber, leaf_number: BlockNumber, - relevant_paras: Vec, } -async fn fetch_fresh_leaf_and_insert_ancestry( +// Request the min relay parents from prospective-parachains. +async fn fetch_min_relay_parents_from_prospective_parachains< + Sender: SubsystemSender, +>( leaf_hash: Hash, - block_info_storage: &mut HashMap, sender: &mut Sender, -) -> Result +) -> Result, FetchError> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) + .await; + + rx.await.map_err(|_| FetchError::ProspectiveParachainsUnavailable) +} + +// Request the min relay parent for the purposes of a collator, directly using ChainApi (where +// prospective-parachains is not available). +async fn fetch_min_relay_parents_for_collator( + leaf_hash: Hash, + leaf_number: BlockNumber, + sender: &mut Sender, +) -> Result, FetchError> where - Sender: SubsystemSender, - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, { - let min_relay_parents_raw = { - let (tx, rx) = oneshot::channel(); - sender - .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) - .await; + let Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) = + prospective_parachains_mode(sender, leaf_hash).await + else { + // This should never happen, leaves that don't have prospective parachains mode enabled + // should not use implicit view. + return Ok(None) + }; - match rx.await { - Ok(m) => m, - Err(_) => return Err(FetchError::ProspectiveParachainsUnavailable), + // Fetch the session of the leaf. We must make sure that we stop at the ancestor which has a + // different session index. + let required_session = + recv_runtime(request_session_index_for_child(leaf_hash, sender).await).await?; + + let mut min = leaf_number; + + // Fetch the ancestors, up to allowed_ancestry_len. + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::Ancestors { + hash: leaf_hash, + k: allowed_ancestry_len, + response_channel: tx, + }) + .await; + let hashes = rx + .await + .map_err(|_| FetchError::ChainApiUnavailable)? + .map_err(|err| FetchError::ChainApiError(leaf_hash, err))?; + + for hash in hashes { + // The relay chain cannot accept blocks backed from previous sessions, with + // potentially previous validators. This is a technical limitation we need to + // respect here. + let session = recv_runtime(request_session_index_for_child(hash, sender).await).await?; + + if session == required_session { + // We should never underflow here, the ChainAPI stops at genesis block. + min = min.saturating_sub(1); + } else { + break } - }; + } + Ok(Some(min)) +} + +async fn fetch_fresh_leaf_and_insert_ancestry( + leaf_hash: Hash, + block_info_storage: &mut HashMap, + sender: &mut Sender, + collating_for: Option, +) -> Result +where + Sender: SubsystemSender + + SubsystemSender + + SubsystemSender, +{ let leaf_header = { let (tx, rx) = oneshot::channel(); sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; @@ -313,8 +401,18 @@ where } }; - let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - let relevant_paras = min_relay_parents_raw.iter().map(|x| x.0).collect(); + // If the node is a collator, bypass prospective-parachains. We're only interested in the one + // paraid and the subsystem is not present. + let min_relay_parents = if let Some(para_id) = collating_for { + fetch_min_relay_parents_for_collator(leaf_hash, leaf_header.number, sender) + .await? + .map(|x| vec![(para_id, x)]) + .unwrap_or_default() + } else { + fetch_min_relay_parents_from_prospective_parachains(leaf_hash, sender).await? + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; let ancestry = if leaf_header.number > 0 { @@ -380,14 +478,11 @@ where vec![leaf_hash] }; - let fetched_ancestry = FetchSummary { - minimum_ancestor_number: min_min, - leaf_number: leaf_header.number, - relevant_paras, - }; + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; let allowed_relay_parents = AllowedRelayParents { - minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), + minimum_relay_parents: min_relay_parents.into_iter().collect(), allowed_relay_parents_contiguous: ancestry, }; @@ -408,12 +503,12 @@ mod tests { use crate::TimeoutExt; use assert_matches::assert_matches; use futures::future::{join, FutureExt}; - use polkadot_node_subsystem::AllMessages; + use polkadot_node_subsystem::{messages::RuntimeApiRequest, AllMessages}; use polkadot_node_subsystem_test_helpers::{ make_subsystem_context, TestSubsystemContextHandle, }; use polkadot_overseer::SubsystemContext; - use polkadot_primitives::Header; + use polkadot_primitives::{AsyncBackingParams, Header}; use sp_core::testing::TaskExecutor; use std::time::Duration; @@ -514,6 +609,71 @@ mod tests { ); } + async fn assert_async_backing_params_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + params: AsyncBackingParams, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + leaf_hash, + RuntimeApiRequest::AsyncBackingParams( + tx + ) + ) + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(Ok(params)).unwrap(); + } + ); + } + + async fn assert_session_index_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + session: u32, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request( + leaf_hash, + RuntimeApiRequest::SessionIndexForChild( + tx + ) + ) + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(Ok(session)).unwrap(); + } + ); + } + + async fn assert_ancestors_request( + virtual_overseer: &mut VirtualOverseer, + leaf: Hash, + expected_ancestor_len: u32, + response: Vec, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi( + ChainApiMessage::Ancestors { + hash: leaf_hash, + k, + response_channel: tx + } + ) => { + assert_eq!(leaf, leaf_hash, "received unexpected leaf hash"); + assert_eq!(k, expected_ancestor_len as usize); + + tx.send(Ok(response)).unwrap(); + } + ); + } + #[test] fn construct_fresh_view() { let pool = TaskExecutor::new(); @@ -521,6 +681,8 @@ mod tests { let mut view = View::default(); + assert_eq!(view.collating_for, None); + // Chain B. const PARA_A_MIN_PARENT: u32 = 4; const PARA_B_MIN_PARENT: u32 = 3; @@ -528,15 +690,17 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT), (PARA_B, PARA_B_MIN_PARENT)]; let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; let min_min_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A, PARA_B]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..]).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..leaf_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -558,6 +722,11 @@ mod tests { allowed_relay_parents.allowed_relay_parents_contiguous, expected_ancestry ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..(PARA_A_MIN_PARENT - 1) as usize])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)), Some(&expected_ancestry[..])); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); } ); @@ -566,18 +735,188 @@ mod tests { let prospective_response = vec![(PARA_C, PARA_C_MIN_PARENT)]; let leaf = CHAIN_A.last().unwrap(); let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_C]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[..leaf_idx]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_C], GENESIS_NUMBER); + let expected_ancestry: Vec = + blocks[..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + } + ); + } + + #[test] + fn construct_fresh_view_single_para() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::new(Some(PARA_A)); + + assert_eq!(view.collating_for, Some(PARA_A)); + + // Chain B. + const PARA_A_MIN_PARENT: u32 = 4; + + let current_session = 2; + + let leaf = CHAIN_B.last().unwrap(); + let leaf_idx = CHAIN_B.len() - 1; + let min_min_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await; + + assert_async_backing_params_request( + &mut ctx_handle, + *leaf, + AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: PARA_A_MIN_PARENT, + }, + ) + .await; + + assert_session_index_request(&mut ctx_handle, *leaf, current_session).await; + + assert_ancestors_request( + &mut ctx_handle, + *leaf, + PARA_A_MIN_PARENT, + CHAIN_B[min_min_idx..leaf_idx].iter().copied().rev().collect(), + ) + .await; + + for hash in CHAIN_B[min_min_idx..leaf_idx].into_iter().rev() { + assert_session_index_request(&mut ctx_handle, *hash, current_session).await; + } + + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..leaf_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); + for i in min_min_idx..(CHAIN_B.len() - 1) { + // No allowed relay parents constructed for ancestry. + assert!(view.known_allowed_relay_parents_under(&CHAIN_B[i], None).is_none()); + } + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + } + ); + + // Suppose the whole test chain A is allowed up to genesis for para A, but the genesis block + // is in a different session. + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + let leaf_idx = blocks.len() - 1; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + res.expect("`activate_leaf` timed out").unwrap(); + }); + + let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await; + + assert_async_backing_params_request( + &mut ctx_handle, + *leaf, + AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: blocks.len() as u32, + }, + ) + .await; + + assert_session_index_request(&mut ctx_handle, *leaf, current_session).await; + + assert_ancestors_request( + &mut ctx_handle, + *leaf, + blocks.len() as u32, + blocks[..leaf_idx].iter().rev().copied().collect(), + ) + .await; + + for hash in blocks[1..leaf_idx].into_iter().rev() { + assert_session_index_request(&mut ctx_handle, *hash, current_session).await; + } + + assert_session_index_request(&mut ctx_handle, GENESIS_HASH, 0).await; + + // We won't request for the genesis block + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[1..leaf_idx]).await; + }; + + futures::executor::block_on(join(fut, overseer_fut)); + assert_eq!(view.leaves.len(), 2); + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], 1); + let expected_ancestry: Vec = + CHAIN_A[..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + + assert_eq!(view.known_allowed_relay_parents_under(&leaf, None), Some(&expected_ancestry[..])); + assert_eq!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_A)), Some(&expected_ancestry[..])); + + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_B)).unwrap().is_empty()); + assert!(view.known_allowed_relay_parents_under(&leaf, Some(PARA_C)).unwrap().is_empty()); + } + ); } #[test] @@ -595,15 +934,20 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), leaf_a).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[(leaf_a_number - 1)..leaf_a_number], + ) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[min_min_idx..leaf_a_number], + &CHAIN_B[min_min_idx..(leaf_a_number - 1)], ) .await; }; @@ -617,15 +961,20 @@ mod tests { let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), leaf_b).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_B]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[(leaf_b_number - 1)..leaf_b_number], + ) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[leaf_a_number..leaf_b_number], // Note the expected range. + &CHAIN_B[leaf_a_number..(leaf_b_number - 1)], // Note the expected range. ) .await; }; @@ -665,13 +1014,15 @@ mod tests { .timeout(TIMEOUT) .map(|res| res.unwrap().unwrap()); let overseer_fut = async { - assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; assert_block_header_requests( &mut ctx_handle, CHAIN_B, - &CHAIN_B[min_a_idx..=leaf_a_idx], + &CHAIN_B[leaf_a_idx..(leaf_a_idx + 1)], ) .await; + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_a_idx..leaf_a_idx]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -689,8 +1040,11 @@ mod tests { .timeout(TIMEOUT) .map(|res| res.expect("`activate_leaf` timed out").unwrap()); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &blocks[(blocks.len() - 1)..]) + .await; assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; - assert_block_header_requests(&mut ctx_handle, CHAIN_B, blocks).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &blocks[..(blocks.len() - 1)]) + .await; }; futures::executor::block_on(join(fut, overseer_fut)); @@ -721,19 +1075,18 @@ mod tests { let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; let fut = view.activate_leaf(ctx.sender(), GENESIS_HASH).timeout(TIMEOUT).map(|res| { - let paras = res.expect("`activate_leaf` timed out").unwrap(); - assert_eq!(paras, vec![PARA_A]); + res.expect("`activate_leaf` timed out").unwrap(); }); let overseer_fut = async { + assert_block_header_requests(&mut ctx_handle, &[GENESIS_HASH], &[GENESIS_HASH]).await; assert_min_relay_parents_request(&mut ctx_handle, &GENESIS_HASH, prospective_response) .await; - assert_block_header_requests(&mut ctx_handle, &[GENESIS_HASH], &[GENESIS_HASH]).await; }; futures::executor::block_on(join(fut, overseer_fut)); assert_matches!( view.known_allowed_relay_parents_under(&GENESIS_HASH, None), - Some(hashes) if !hashes.is_empty() + Some(hashes) if hashes == &[GENESIS_HASH] ); } } diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index d38d838fedefac643253528ab99d0035eeae55da..b5aef325c8b437ec43c2872f130651de65c28a52 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -39,8 +39,8 @@ /// /// # Usage /// -/// It's expected that the users of this module will be building up trees of -/// [`Fragment`]s and consistently pruning and adding to the tree. +/// It's expected that the users of this module will be building up chains of +/// [`Fragment`]s and consistently pruning and adding to the chains. /// /// ## Operating Constraints /// @@ -54,60 +54,65 @@ /// make an intelligent prediction about what might be accepted in the future based on /// prior fragments that also exist off-chain. /// -/// ## Fragment Trees +/// ## Fragment Chains +/// +/// For simplicity and practicality, we expect that collators of the same parachain are +/// cooperating and don't create parachain forks or cycles on the same relay chain active leaf. +/// Therefore, higher-level code should maintain one fragment chain for each active leaf (not a +/// fragment tree). If parachains do create forks, their performance in regards to async +/// backing and elastic scaling will suffer, because different validators will have different +/// predictions of the future. /// /// As the relay-chain grows, some predictions come true and others come false. /// And new predictions get made. These three changes correspond distinctly to the -/// 3 primary operations on fragment trees. -/// -/// A fragment tree is a mental model for thinking about a forking series of predictions -/// about a single parachain. There may be one or more fragment trees per parachain. -/// -/// In expectation, most parachains will have a plausibly-unique authorship method which means -/// that they should really be much closer to fragment-chains, maybe with an occasional fork. +/// 3 primary operations on fragment chains. /// -/// Avoiding fragment-tree blowup is beyond the scope of this module. +/// Avoiding fragment-chain blowup is beyond the scope of this module. Higher-level must ensure +/// proper spam protection. /// -/// ### Pruning Fragment Trees +/// ### Pruning Fragment Chains /// /// When the relay-chain advances, we want to compare the new constraints of that relay-parent -/// to the roots of the fragment trees we have. There are 3 cases: +/// to the root of the fragment chain we have. There are 3 cases: /// /// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. -/// This is the "prediction still uncertain" case. +/// This is the "prediction still uncertain" case. (Corresponds to some candidates still +/// being pending availability). /// -/// 2. The root fragment is invalid under the new constraints because it has been subsumed by -/// the relay-chain. In this case, we can discard the root and split & re-root the fragment -/// tree under its descendants and compare to the new constraints again. This is the -/// "prediction came true" case. +/// 2. The root fragment (potentially along with a number of descendants) is invalid under the +/// new constraints because it has been included by the relay-chain. In this case, we can +/// discard the included chain and split & re-root the chain under its descendants and +/// compare to the new constraints again. This is the "prediction came true" case. /// -/// 3. The root fragment is invalid under the new constraints because a competing parachain -/// block has been included or it would never be accepted for some other reason. In this -/// case we can discard the entire fragment tree. This is the "prediction came false" case. +/// 3. The root fragment becomes invalid under the new constraints for any reason (if for +/// example the parachain produced a fork and the block producer picked a different +/// candidate to back). In this case we can discard the entire fragment chain. This is the +/// "prediction came false" case. /// /// This is all a bit of a simplification because it assumes that the relay-chain advances -/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to +/// without forks and is finalized instantly. In practice, the set of fragment-chains needs to /// be observable from the perspective of a few different possible forks of the relay-chain and /// not pruned too eagerly. /// /// Note that the fragments themselves don't need to change and the only thing we care about /// is whether the predictions they represent are still valid. /// -/// ### Extending Fragment Trees +/// ### Extending Fragment Chains /// /// As predictions fade into the past, new ones should be stacked on top. /// /// Every new relay-chain block is an opportunity to make a new prediction about the future. -/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether -/// to create a new fragment-tree. +/// Higher-level logic should decide whether to build upon an existing chain or whether +/// to create a new fragment-chain. /// /// ### Code Upgrades /// /// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade /// scheduling logic is very path-dependent and intricate so we just assume that code upgrades -/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -/// in practice and code upgrades are fairly rare. So what's likely to happen around code -/// upgrades is that the entire fragment-tree has to get discarded at some point. +/// can't be initiated and applied within a single fragment-chain. Fragment-chains aren't deep, +/// in practice (bounded by a linear function of the the number of cores assigned to a +/// parachain) and code upgrades are fairly rare. So what's likely to happen around code +/// upgrades is that the entire fragment-chain has to get discarded at some point. /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. @@ -116,10 +121,7 @@ use polkadot_primitives::{ CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, -}; +use std::{collections::HashMap, sync::Arc}; /// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] @@ -524,9 +526,9 @@ impl ConstraintModifications { /// here. But the erasure-root is not. This means that prospective candidates /// are not correlated to any session in particular. #[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate<'a> { +pub struct ProspectiveCandidate { /// The commitments to the output of the execution. - pub commitments: Cow<'a, CandidateCommitments>, + pub commitments: CandidateCommitments, /// The collator that created the candidate. pub collator: CollatorId, /// The signature of the collator on the payload. @@ -539,32 +541,6 @@ pub struct ProspectiveCandidate<'a> { pub validation_code_hash: ValidationCodeHash, } -impl<'a> ProspectiveCandidate<'a> { - fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } - } - - /// Partially clone the prospective candidate, but borrow the - /// parts which are potentially heavy. - pub fn partial_clone(&self) -> ProspectiveCandidate { - ProspectiveCandidate { - commitments: Cow::Borrowed(self.commitments.borrow()), - collator: self.collator.clone(), - collator_signature: self.collator_signature.clone(), - persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash, - validation_code_hash: self.validation_code_hash, - } - } -} - -#[cfg(test)] -impl ProspectiveCandidate<'static> { - fn commitments_mut(&mut self) -> &mut CandidateCommitments { - self.commitments.to_mut() - } -} - /// Kinds of errors with the validity of a fragment. #[derive(Debug, Clone, PartialEq)] pub enum FragmentValidityError { @@ -618,19 +594,19 @@ pub enum FragmentValidityError { /// This is a type which guarantees that the candidate is valid under the /// operating constraints. #[derive(Debug, Clone, PartialEq)] -pub struct Fragment<'a> { +pub struct Fragment { /// The new relay-parent. relay_parent: RelayChainBlockInfo, /// The constraints this fragment is operating under. operating_constraints: Constraints, /// The core information about the prospective candidate. - candidate: ProspectiveCandidate<'a>, + candidate: Arc, /// Modifications to the constraints based on the outputs of /// the candidate. modifications: ConstraintModifications, } -impl<'a> Fragment<'a> { +impl Fragment { /// Create a new fragment. /// /// This fails if the fragment isn't in line with the operating @@ -642,10 +618,29 @@ impl<'a> Fragment<'a> { pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, - candidate: ProspectiveCandidate<'a>, + candidate: Arc, ) -> Result { + let modifications = Self::check_against_constraints( + &relay_parent, + &operating_constraints, + &candidate.commitments, + &candidate.validation_code_hash, + &candidate.persisted_validation_data, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Check the candidate against the operating constrains and return the constraint modifications + /// made by this candidate. + pub fn check_against_constraints( + relay_parent: &RelayChainBlockInfo, + operating_constraints: &Constraints, + commitments: &CandidateCommitments, + validation_code_hash: &ValidationCodeHash, + persisted_validation_data: &PersistedValidationData, + ) -> Result { let modifications = { - let commitments = &candidate.commitments; ConstraintModifications { required_parent: Some(commitments.head_data.clone()), hrmp_watermark: Some({ @@ -689,11 +684,13 @@ impl<'a> Fragment<'a> { validate_against_constraints( &operating_constraints, &relay_parent, - &candidate, + commitments, + persisted_validation_data, + validation_code_hash, &modifications, )?; - Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + Ok(modifications) } /// Access the relay parent information. @@ -707,7 +704,7 @@ impl<'a> Fragment<'a> { } /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate<'a> { + pub fn candidate(&self) -> &ProspectiveCandidate { &self.candidate } @@ -715,31 +712,14 @@ impl<'a> Fragment<'a> { pub fn constraint_modifications(&self) -> &ConstraintModifications { &self.modifications } - - /// Convert the fragment into an owned variant. - pub fn into_owned(self) -> Fragment<'static> { - Fragment { candidate: self.candidate.into_owned(), ..self } - } - - /// Validate this fragment against some set of constraints - /// instead of the operating constraints. - pub fn validate_against_constraints( - &self, - constraints: &Constraints, - ) -> Result<(), FragmentValidityError> { - validate_against_constraints( - constraints, - &self.relay_parent, - &self.candidate, - &self.modifications, - ) - } } fn validate_against_constraints( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - candidate: &ProspectiveCandidate, + commitments: &CandidateCommitments, + persisted_validation_data: &PersistedValidationData, + validation_code_hash: &ValidationCodeHash, modifications: &ConstraintModifications, ) -> Result<(), FragmentValidityError> { let expected_pvd = PersistedValidationData { @@ -749,17 +729,17 @@ fn validate_against_constraints( max_pov_size: constraints.max_pov_size as u32, }; - if expected_pvd != candidate.persisted_validation_data { + if expected_pvd != *persisted_validation_data { return Err(FragmentValidityError::PersistedValidationDataMismatch( expected_pvd, - candidate.persisted_validation_data.clone(), + persisted_validation_data.clone(), )) } - if constraints.validation_code_hash != candidate.validation_code_hash { + if constraints.validation_code_hash != *validation_code_hash { return Err(FragmentValidityError::ValidationCodeMismatch( constraints.validation_code_hash, - candidate.validation_code_hash, + *validation_code_hash, )) } @@ -770,7 +750,7 @@ fn validate_against_constraints( )) } - if candidate.commitments.new_validation_code.is_some() { + if commitments.new_validation_code.is_some() { match constraints.upgrade_restriction { None => {}, Some(UpgradeRestriction::Present) => @@ -778,11 +758,8 @@ fn validate_against_constraints( } } - let announced_code_size = candidate - .commitments - .new_validation_code - .as_ref() - .map_or(0, |code| code.0.len()); + let announced_code_size = + commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len()); if announced_code_size > constraints.max_code_size { return Err(FragmentValidityError::CodeSizeTooLarge( @@ -801,17 +778,17 @@ fn validate_against_constraints( } } - if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + if commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_hrmp_num_per_candidate, - messages_submitted: candidate.commitments.horizontal_messages.len(), + messages_submitted: commitments.horizontal_messages.len(), }) } - if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + if commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_ump_num_per_candidate, - messages_submitted: candidate.commitments.upward_messages.len(), + messages_submitted: commitments.upward_messages.len(), }) } @@ -1184,21 +1161,21 @@ mod tests { fn make_candidate( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate<'static> { + ) -> ProspectiveCandidate { let collator_pair = CollatorPair::generate().0; let collator = collator_pair.public(); let sig = collator_pair.sign(b"blabla".as_slice()); ProspectiveCandidate { - commitments: Cow::Owned(CandidateCommitments { + commitments: CandidateCommitments { upward_messages: Default::default(), horizontal_messages: Default::default(), new_validation_code: None, head_data: HeadData::from(vec![1, 2, 3, 4, 5]), processed_downward_messages: 0, hrmp_watermark: relay_parent.number, - }), + }, collator, collator_signature: sig, persisted_validation_data: PersistedValidationData { @@ -1229,7 +1206,7 @@ mod tests { candidate.validation_code_hash = got_code; assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), ) } @@ -1261,7 +1238,7 @@ mod tests { let got_pvd = candidate.persisted_validation_data.clone(); assert_eq!( - Fragment::new(relay_parent_b, constraints, candidate), + Fragment::new(relay_parent_b, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), ); } @@ -1278,10 +1255,10 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); let max_code_size = constraints.max_code_size; - candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); + candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), ); } @@ -1298,7 +1275,7 @@ mod tests { let candidate = make_candidate(&constraints, &relay_parent); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::RelayParentTooOld(5, 3,)), ); } @@ -1317,7 +1294,7 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; candidate - .commitments_mut() + .commitments .horizontal_messages .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { recipient: ParaId::from(i as u32), @@ -1326,7 +1303,7 @@ mod tests { .unwrap(); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: max_hrmp, messages_submitted: max_hrmp + 1, @@ -1346,22 +1323,36 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); // Empty dmp queue is ok. - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + assert!(Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ) + .is_ok()); // Unprocessed message that was sent later is ok. constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + assert!(Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ) + .is_ok()); for block_number in 0..=relay_parent.number { constraints.dmp_remaining_messages = vec![block_number]; assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ), Err(FragmentValidityError::DmpAdvancementRule), ); } - candidate.commitments.to_mut().processed_downward_messages = 1; - assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + candidate.commitments.processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())).is_ok()); } #[test] @@ -1379,13 +1370,12 @@ mod tests { candidate .commitments - .to_mut() .upward_messages .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) .unwrap(); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { messages_allowed: max_ump, messages_submitted: max_ump + 1, @@ -1405,10 +1395,10 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::CodeUpgradeRestricted), ); } @@ -1424,23 +1414,23 @@ mod tests { let constraints = make_constraints(); let mut candidate = make_candidate(&constraints, &relay_parent); - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![ OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]); assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Fragment::new(relay_parent.clone(), constraints.clone(), Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![ OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); } diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs index b93818070a183e43e71f327bcc1770a71fae1cb6..d371b699b9eb9eda4c59b68ad695514fe0ea80d3 100644 --- a/polkadot/node/subsystem-util/src/lib.rs +++ b/polkadot/node/subsystem-util/src/lib.rs @@ -25,17 +25,15 @@ #![warn(missing_docs)] +pub use overseer::{ + gen::{OrchestraError as OverseerError, Timeout}, + Subsystem, TimeoutExt, +}; use polkadot_node_subsystem::{ errors::{RuntimeApiError, SubsystemError}, messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, overseer, SubsystemSender, }; -use polkadot_primitives::{async_backing::BackingState, slashing, CoreIndex, ExecutorParams}; - -pub use overseer::{ - gen::{OrchestraError as OverseerError, Timeout}, - Subsystem, TimeoutExt, -}; pub use polkadot_node_metrics::{metrics, Metronome}; @@ -43,11 +41,12 @@ use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; use polkadot_primitives::{ - AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, - Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing::BackingState, slashing, AsyncBackingParams, AuthorityDiscoveryId, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, EncodeAs, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, }; pub use rand; use sp_application_crypto::AppCrypto; @@ -60,17 +59,18 @@ use std::{ use thiserror::Error; use vstaging::get_disabled_validators_with_fallback; +pub use determine_new_blocks::determine_new_blocks; pub use metered; pub use polkadot_node_network_protocol::MIN_GOSSIP_PEERS; -pub use determine_new_blocks::determine_new_blocks; - /// These reexports are required so that external crates can use the `delegated_subsystem` macro /// properly. pub mod reexports { pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext}; } +/// Helpers for the validator->chunk index mapping. +pub mod availability_chunks; /// A utility for managing the implicit view of the relay-chain derived from active /// leaves and the minimum allowed relay-parents that parachain candidates can have /// and be backed in those leaves' children. diff --git a/polkadot/node/subsystem-util/src/runtime/error.rs b/polkadot/node/subsystem-util/src/runtime/error.rs index 8751693b078a6797584ed59a2ebd3c320d87cd78..1111b119e95f5926321f7a09de6af02bcbc4acba 100644 --- a/polkadot/node/subsystem-util/src/runtime/error.rs +++ b/polkadot/node/subsystem-util/src/runtime/error.rs @@ -28,7 +28,7 @@ pub enum Error { /// Runtime API subsystem is down, which means we're shutting down. #[fatal] #[error("Runtime request got canceled")] - RuntimeRequestCanceled(oneshot::Canceled), + RuntimeRequestCanceled(#[from] oneshot::Canceled), /// Some request to the runtime failed. /// For example if we prune a block we're requesting info about. diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 714384b32e37bcbd6ba7f077252fa23f50eac490..214c58a8e88f76b66d1d6477a3593bd2e32f3e1f 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ node_features::FeatureIndex, slashing, AsyncBackingParams, CandidateEvent, CandidateHash, - CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, - NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + CoreIndex, CoreState, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, + IndexedVec, NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; @@ -348,7 +348,7 @@ where pub async fn get_occupied_cores( sender: &mut Sender, relay_parent: Hash, -) -> Result> +) -> Result> where Sender: overseer::SubsystemSender, { @@ -356,9 +356,10 @@ where Ok(cores .into_iter() - .filter_map(|core_state| { + .enumerate() + .filter_map(|(core_index, core_state)| { if let CoreState::Occupied(occupied) = core_state { - Some(occupied) + Some((CoreIndex(core_index as u32), occupied)) } else { None } diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index 7db00404eb8eca049a6194d19e5e92e66325628c..55d4d81d1c21bbb5007962ac71c8d38fe898d49b 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } # Polkadot dependencies polkadot-test-runtime = { path = "../../../runtime/test-runtime" } diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index f14fa9fde58b4426690cd1bd0e16129f095c34ec..e6a1229caf866d7572b32e04139e42a0e83918a4 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -33,7 +33,7 @@ use test_runtime_constants::currency::DOTS; const DEFAULT_PROTOCOL_ID: &str = "dot"; /// The `ChainSpec` parameterized for polkadot test runtime. -pub type PolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type PolkadotChainSpec = sc_service::GenericChainSpec; /// Returns the properties for the [`PolkadotChainSpec`]. pub fn polkadot_chain_spec_properties() -> serde_json::map::Map { diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index 9139c6a4e5e7e90ea9cc529ce375056b475d0e38..a0233bb46e5128b1b036a6fa7dd0181e587f1e9a 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -17,7 +17,7 @@ url = "2.3.1" tokio-tungstenite = "0.20.1" futures-util = "0.3.30" lazy_static = "1.4.0" -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../gum" } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 15eea2addc893ced551edaf2557ff703a455a879..1344baac64b65246945b3bd09569cf21990fde5e 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -13,7 +13,7 @@ workspace = true # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 5a1efdf898217a18972cab8c9ea49a41e5a2280f..2764384363727a8b7b350b02441903204eae717f 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -333,7 +333,19 @@ impl DmpMessageHandler for () { } /// The aggregate XCMP message format. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo, RuntimeDebug)] +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + TypeInfo, + RuntimeDebug, + MaxEncodedLen, +)] pub enum XcmpMessageFormat { /// Encoded `VersionedXcm` messages, all concatenated. ConcatenatedVersionedXcm, diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 6acdedf67ff2e4be34da2caa70c572a41f861eca..22f3d2942e0c2076cae4e9e37a6d960b42969860 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } adder = { package = "test-parachain-adder", path = "adder" } halt = { package = "test-parachain-halt", path = "halt" } diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index eec19ef788aad510d7ea9ef6d2ab61d7c6aeb8f9..273fa93a50f412ffc282aeeafb07c0e173fd5e72 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 5a2b5405741446f52e5f1212de2665ab04d350d2..f9aaab74debd8190e426ed49f3c8a966d4980f49 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "adder-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-adder = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 82ceebcf4eee99f36140908580b41c13b04ea87e..f2067a2c3b9bdc3c7e585fbb03fc79ee435f7d1d 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } dlmalloc = { version = "0.2.4", features = ["global"] } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index cacf7304f90a29d39848b80a58159afbbb6cb3e5..08d1e74d87983bbcc223eaeb71a64feda450f154 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -15,7 +15,7 @@ name = "undying-collator" path = "src/main.rs" [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } clap = { version = "4.5.3", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" @@ -24,7 +24,7 @@ log = { workspace = true, default-features = true } test-parachain-undying = { path = ".." } polkadot-primitives = { path = "../../../../primitives" } polkadot-cli = { path = "../../../../cli" } -polkadot-service = { path = "../../../../node/service", features = ["elastic-scaling-experimental", "rococo-native"] } +polkadot-service = { path = "../../../../node/service", features = ["rococo-native"] } polkadot-node-primitives = { path = "../../../../node/primitives" } polkadot-node-subsystem = { path = "../../../../node/subsystem" } diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 99800afc37fe0133b78dd6f1b7432014b923c0ca..603d08b8fee524f803b32c751cdd818249687132 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["bit-vec", "derive", "serde"] } log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 01f393086a668f43d2e18abc10b4491f54aae2cf..061794ca06d1be1883fd3605eb5bc691e5b7b23b 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -41,26 +41,26 @@ pub use v7::{ ApprovalVotingParams, AssignmentId, AsyncBackingParams, AuthorityDiscoveryId, AvailabilityBitfield, BackedCandidate, Balance, BlakeTwo256, Block, BlockId, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent, CandidateHash, CandidateIndex, - CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, - CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, - CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, - ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, - HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, - InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, - Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, - ParathreadEntry, PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, - UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, - MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, - PARACHAIN_KEY_TYPE_ID, + CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, ChunkIndex, + CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, + CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, + EncodeAs, ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExecutorParamsPrepHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, + HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, + InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, + MultiDisputeStatementSet, NodeFeatures, Nonce, OccupiedCore, OccupiedCoreAssumption, + OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, PersistedValidationData, + PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, RuntimeMetricLabelValue, + RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, RuntimeMetricUpdate, + ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, Signed, + SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, SigningContext, Slot, + UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, + ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, + LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + MIN_CODE_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, + PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 8a059408496c0f87e2d1394beb84263f48c4fbda..fb8406aece690f25626ed2dd3be5c26aeaa62b7e 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -117,6 +117,34 @@ pub trait TypeIndex { #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidatorIndex(pub u32); +/// Index of an availability chunk. +/// +/// The underlying type is identical to `ValidatorIndex`, because +/// the number of chunks will always be equal to the number of validators. +/// However, the chunk index held by a validator may not always be equal to its `ValidatorIndex`, so +/// we use a separate type to make code easier to read. +#[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode, TypeInfo, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] +pub struct ChunkIndex(pub u32); + +impl From for ValidatorIndex { + fn from(c_index: ChunkIndex) -> Self { + ValidatorIndex(c_index.0) + } +} + +impl From for ChunkIndex { + fn from(v_index: ValidatorIndex) -> Self { + ChunkIndex(v_index.0) + } +} + +impl From for ChunkIndex { + fn from(n: u32) -> Self { + ChunkIndex(n) + } +} + // We should really get https://github.com/paritytech/polkadot/issues/2403 going .. impl From for ValidatorIndex { fn from(n: u32) -> Self { @@ -1787,6 +1815,14 @@ where self.0.get(index.type_index()) } + /// Returns a mutable reference to an element indexed using `K`. + pub fn get_mut(&mut self, index: K) -> Option<&mut V> + where + K: TypeIndex, + { + self.0.get_mut(index.type_index()) + } + /// Returns number of elements in vector. pub fn len(&self) -> usize { self.0.len() @@ -1989,6 +2025,7 @@ pub mod node_features { /// A feature index used to identify a bit into the node_features array stored /// in the HostConfiguration. #[repr(u8)] + #[derive(Clone, Copy)] pub enum FeatureIndex { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` @@ -1997,10 +2034,16 @@ pub mod node_features { /// The value stored there represents the assumed core index where the candidates /// are backed. This is needed for the elastic scaling MVP. ElasticScalingMVP = 1, + /// Tells if the chunk mapping feature is enabled. + /// Enables the implementation of + /// [RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + /// Must not be enabled unless all validators and collators have stopped using `req_chunk` + /// protocol version 1. If it is enabled, validators can start systematic chunk recovery. + AvailabilityChunkMapping = 2, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. - FirstUnassigned = 2, + FirstUnassigned = 3, } } diff --git a/polkadot/roadmap/implementers-guide/src/SUMMARY.md b/polkadot/roadmap/implementers-guide/src/SUMMARY.md index bb19390c7af4d422dfe62252a7153392c8ef534d..41485e5df8ec1f75bc04a54c5c21ef87a5ef4bb6 100644 --- a/polkadot/roadmap/implementers-guide/src/SUMMARY.md +++ b/polkadot/roadmap/implementers-guide/src/SUMMARY.md @@ -8,6 +8,7 @@ - [Disputes Process](protocol-disputes.md) - [Dispute Flow](disputes-flow.md) - [Chain Selection and Finalization](protocol-chain-selection.md) + - [Validator Disabling](protocol-validator-disabling.md) - [Architecture Overview](architecture.md) - [Messaging Overview](messaging.md) - [PVF Pre-checking](pvf-prechecking.md) diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 345b3d2e6970403f3096272cc51f903e0566a22e..9b4082c49e2f003f0b7d0fd317bc2e8ce06580e4 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -396,7 +396,7 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: * Requires `(SessionIndex, SessionInfo, CandidateReceipt, ValidatorIndex, backing_group, block_hash, candidate_index)` * Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session. * Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, Some(backing_group), - response_sender)` +Some(core_index), response_sender)` * Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::ValidationCodeByHash(descriptor.validation_code_hash)` against the state of `block_hash`. * Spawn a background task with a clone of `background_tx` diff --git a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md index c57c4589244e79f6cd54a87f823abc466ed8eb43..5b756080becc05b4804138e64baeac3bfdaf97b8 100644 --- a/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md +++ b/polkadot/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -1,84 +1,108 @@ # Availability Recovery -This subsystem is the inverse of the [Availability Distribution](availability-distribution.md) subsystem: validators -will serve the availability chunks kept in the availability store to nodes who connect to them. And the subsystem will -also implement the other side: the logic for nodes to connect to validators, request availability pieces, and -reconstruct the `AvailableData`. +This subsystem is responsible for recovering the data made available via the +[Availability Distribution](availability-distribution.md) subsystem, neccessary for candidate validation during the +approval/disputes processes. Additionally, it is also being used by collators to recover PoVs in adversarial scenarios +where the other collators of the para are censoring blocks. -This version of the availability recovery subsystem is based off of direct connections to validators. In order to -recover any given `AvailableData`, we must recover at least `f + 1` pieces from validators of the session. Thus, we will -connect to and query randomly chosen validators until we have received `f + 1` pieces. +According to the Polkadot protocol, in order to recover any given `AvailableData`, we generally must recover at least +`f + 1` pieces from validators of the session. Thus, we should connect to and query randomly chosen validators until we +have received `f + 1` pieces. + +In practice, there are various optimisations implemented in this subsystem which avoid querying all chunks from +different validators and/or avoid doing the chunk reconstruction altogether. ## Protocol -`PeerSet`: `Validation` +This version of the availability recovery subsystem is based only on request-response network protocols. Input: -* `NetworkBridgeUpdate(update)` -* `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session, backing_group, response)` +* `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session, backing_group, core_index, response)` Output: -* `NetworkBridge::SendValidationMessage` -* `NetworkBridge::ReportPeer` -* `AvailabilityStore::QueryChunk` +* `NetworkBridgeMessage::SendRequests` +* `AvailabilityStoreMessage::QueryAllChunks` +* `AvailabilityStoreMessage::QueryAvailableData` +* `AvailabilityStoreMessage::QueryChunkSize` + ## Functionality -We hold a state which tracks the currently ongoing recovery tasks, as well as which request IDs correspond to which -task. A recovery task is a structure encapsulating all recovery tasks with the network necessary to recover the -available data in respect to one candidate. +We hold a state which tracks the currently ongoing recovery tasks. A `RecoveryTask` is a structure encapsulating all +network tasks needed in order to recover the available data in respect to a candidate. + +Each `RecoveryTask` has a collection of ordered recovery strategies to try. ```rust +/// Subsystem state. struct State { - /// Each recovery is implemented as an independent async task, and the handles only supply information about the result. - ongoing_recoveries: FuturesUnordered, - /// A recent block hash for which state should be available. - live_block_hash: Hash, - // An LRU cache of recently recovered data. - availability_lru: LruMap>, + /// Each recovery task is implemented as its own async task, + /// and these handles are for communicating with them. + ongoing_recoveries: FuturesUnordered, + /// A recent block hash for which state should be available. + live_block: (BlockNumber, Hash), + /// An LRU cache of recently recovered data. + availability_lru: LruMap, + /// Cached runtime info. + runtime_info: RuntimeInfo, } -/// This is a future, which concludes either when a response is received from the recovery tasks, -/// or all the `awaiting` channels have closed. -struct RecoveryHandle { - candidate_hash: CandidateHash, - interaction_response: RemoteHandle, - awaiting: Vec>>, -} - -struct Unavailable; -struct Concluded(CandidateHash, Result); - -struct RecoveryTaskParams { - validator_authority_keys: Vec, - validators: Vec, - // The number of pieces needed. - threshold: usize, - candidate_hash: Hash, - erasure_root: Hash, +struct RecoveryParams { + /// Discovery ids of `validators`. + pub validator_authority_keys: Vec, + /// Number of validators. + pub n_validators: usize, + /// The number of regular chunks needed. + pub threshold: usize, + /// The number of systematic chunks needed. + pub systematic_threshold: usize, + /// A hash of the relevant candidate. + pub candidate_hash: CandidateHash, + /// The root of the erasure encoding of the candidate. + pub erasure_root: Hash, + /// Metrics to report. + pub metrics: Metrics, + /// Do not request data from availability-store. Useful for collators. + pub bypass_availability_store: bool, + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, + /// Protocol name for ChunkFetchingV1. + pub req_v1_protocol_name: ProtocolName, + /// Protocol name for ChunkFetchingV2. + pub req_v2_protocol_name: ProtocolName, + /// Whether or not chunk mapping is enabled. + pub chunk_mapping_enabled: bool, + /// Channel to the erasure task handler. + pub erasure_task_tx: mpsc::Sender, } -enum RecoveryTask { - RequestFromBackers { - // a random shuffling of the validators from the backing group which indicates the order - // in which we connect to them and request the chunk. - shuffled_backers: Vec, - } - RequestChunksFromValidators { - // a random shuffling of the validators which indicates the order in which we connect to the validators and - // request the chunk from them. - shuffling: Vec, - received_chunks: Map, - requesting_chunks: FuturesUnordered>, - } +pub struct RecoveryTask { + sender: Sender, + params: RecoveryParams, + strategies: VecDeque>>, + state: task::State, } -struct RecoveryTask { - to_subsystems: SubsystemSender, - params: RecoveryTaskParams, - source: Source, +#[async_trait::async_trait] +/// Common trait for runnable recovery strategies. +pub trait RecoveryStrategy: Send { + /// Main entry point of the strategy. + async fn run( + mut self: Box, + state: &mut task::State, + sender: &mut Sender, + common_params: &RecoveryParams, + ) -> Result; + + /// Return the name of the strategy for logging purposes. + fn display_name(&self) -> &'static str; + + /// Return the strategy type for use as a metric label. + fn strategy_type(&self) -> &'static str; } ``` @@ -90,68 +114,71 @@ Ignore `BlockFinalized` signals. On `Conclude`, shut down the subsystem. -#### `AvailabilityRecoveryMessage::RecoverAvailableData(receipt, session, Option, response)` +#### `AvailabilityRecoveryMessage::RecoverAvailableData(...)` -1. Check the `availability_lru` for the candidate and return the data if so. -1. Check if there is already an recovery handle for the request. If so, add the response handle to it. +1. Check the `availability_lru` for the candidate and return the data if present. +1. Check if there is already a recovery handle for the request. If so, add the response handle to it. 1. Otherwise, load the session info for the given session under the state of `live_block_hash`, and initiate a recovery - task with *`launch_recovery_task`*. Add a recovery handle to the state and add the response channel to it. + task with `launch_recovery_task`. Add a recovery handle to the state and add the response channel to it. 1. If the session info is not available, return `RecoveryError::Unavailable` on the response channel. ### Recovery logic -#### `launch_recovery_task(session_index, session_info, candidate_receipt, candidate_hash, Option)` +#### `handle_recover(...) -> Result<()>` -1. Compute the threshold from the session info. It should be `f + 1`, where `n = 3f + k`, where `k in {1, 2, 3}`, and - `n` is the number of validators. -1. Set the various fields of `RecoveryParams` based on the validator lists in `session_info` and information about the - candidate. -1. If the `backing_group_index` is `Some`, start in the `RequestFromBackers` phase with a shuffling of the backing group - validator indices and a `None` requesting value. -1. Otherwise, start in the `RequestChunksFromValidators` source with `received_chunks`,`requesting_chunks`, and - `next_shuffling` all empty. -1. Set the `to_subsystems` sender to be equal to a clone of the `SubsystemContext`'s sender. -1. Initialize `received_chunks` to an empty set, as well as `requesting_chunks`. +Instantiate the appropriate `RecoveryStrategy`es, based on the subsystem configuration, params and session info. +Call `launch_recovery_task()`. -Launch the source as a background task running `run(recovery_task)`. +#### `launch_recovery_task(state, ctx, response_sender, recovery_strategies, params) -> Result<()>` -#### `run(recovery_task) -> Result` +Create the `RecoveryTask` and launch it as a background task running `recovery_task.run()`. -```rust -// How many parallel requests to have going at once. -const N_PARALLEL: usize = 50; -``` +#### `recovery_task.run(mut self) -> Result` + +* Loop: + * Pop a strategy from the queue. If none are left, return `RecoveryError::Unavailable`. + * Run the strategy. + * If the strategy returned successfully or returned `RecoveryError::Invalid`, break the loop. + +### Recovery strategies + +#### `FetchFull` + +This strategy tries requesting the full available data from the validators in the backing group to +which the node is already connected. They are tried one by one in a random order. +It is very performant if there's enough network bandwidth and the backing group is not overloaded. +The costly reed-solomon reconstruction is not needed. + +#### `FetchSystematicChunks` + +Very similar to `FetchChunks` below but requests from the validators that hold the systematic chunks, so that we avoid +reed-solomon reconstruction. Only possible if `node_features::FeatureIndex::AvailabilityChunkMapping` is enabled and +the `core_index` is supplied (currently only for recoveries triggered by approval voting). + +More info in +[RFC-47](https://github.com/polkadot-fellows/RFCs/blob/main/text/0047-assignment-of-availability-chunks.md). + +#### `FetchChunks` + +The least performant strategy but also the most comprehensive one. It's the only one that cannot fail under the +byzantine threshold assumption, so it's always added as the last one in the `recovery_strategies` queue. + +Performs parallel chunk requests to validators. When enough chunks were received, do the reconstruction. +In the worst case, all validators will be tried. + +### Default recovery strategy configuration + +#### For validators + +If the estimated available data size is smaller than a configured constant (currently 1Mib for Polkadot or 4Mib for +other networks), try doing `FetchFull` first. +Next, if the preconditions described in `FetchSystematicChunks` above are met, try systematic recovery. +As a last resort, do `FetchChunks`. + +#### For collators + +Collators currently only use `FetchChunks`, as they only attempt recoveries in rare scenarios. -* Request `AvailabilityStoreMessage::QueryAvailableData`. If it exists, return that. -* If the task contains `RequestFromBackers` - * Loop: - * If the `requesting_pov` is `Some`, poll for updates on it. If it concludes, set `requesting_pov` to `None`. - * If the `requesting_pov` is `None`, take the next backer off the `shuffled_backers`. - * If the backer is `Some`, issue a `NetworkBridgeMessage::Requests` with a network request for the - `AvailableData` and wait for the response. - * If it concludes with a `None` result, return to beginning. - * If it concludes with available data, attempt a re-encoding. - * If it has the correct erasure-root, break and issue a `Ok(available_data)`. - * If it has an incorrect erasure-root, return to beginning. - * Send the result to each member of `awaiting`. - * If the backer is `None`, set the source to `RequestChunksFromValidators` with a random shuffling of validators - and empty `received_chunks`, and `requesting_chunks` and break the loop. - -* If the task contains `RequestChunksFromValidators`: - * Request `AvailabilityStoreMessage::QueryAllChunks`. For each chunk that exists, add it to `received_chunks` and - remote the validator from `shuffling`. - * Loop: - * If `received_chunks + requesting_chunks + shuffling` lengths are less than the threshold, break and return - `Err(Unavailable)`. - * Poll for new updates from `requesting_chunks`. Check merkle proofs of any received chunks. If the request simply - fails due to network issues, insert into the front of `shuffling` to be retried. - * If `received_chunks` has more than `threshold` entries, attempt to recover the data. - * If that fails, return `Err(RecoveryError::Invalid)` - * If correct: - * If re-encoding produces an incorrect erasure-root, break and issue a `Err(RecoveryError::Invalid)`. - * break and issue `Ok(available_data)` - * Send the result to each member of `awaiting`. - * While there are fewer than `N_PARALLEL` entries in `requesting_chunks`, - * Pop the next item from `shuffling`. If it's empty and `requesting_chunks` is empty, return - `Err(RecoveryError::Unavailable)`. - * Issue a `NetworkBridgeMessage::Requests` and wait for the response in `requesting_chunks`. +Moreover, the recovery task is specially configured to not attempt requesting data from the local availability-store +(because it doesn't exist) and to not reencode the data after a succcessful recovery (because it's an expensive check +that is not needed; checking the pov_hash is enough for collators). diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index 8f00ff084941cc260dea6a9e76c0ff30d3770caf..701f6c87caff0341c36e4b2799d2444c015c411c 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -98,15 +98,11 @@ prospective validation data. This is unlikely to change. hashes. - Sent by the Provisioner when requesting backable candidates, when selecting candidates for a given relay-parent. -- `ProspectiveParachainsMessage::GetHypotheticalFrontier` +- `ProspectiveParachainsMessage::GetHypotheticalMembership` - Gets the hypothetical frontier membership of candidates with the given properties under the specified active leaves' fragment trees. - Sent by the Backing Subsystem when sanity-checking whether a candidate can be seconded based on its hypothetical frontiers. -- `ProspectiveParachainsMessage::GetTreeMembership` - - Gets the membership of the candidate in all fragment trees. - - Sent by the Backing Subsystem when it needs to update the candidates - seconded at various depths under new active leaves. - `ProspectiveParachainsMessage::GetMinimumRelayParents` - Gets the minimum accepted relay-parent number for each para in the fragment tree for the given relay-chain block hash. diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md index e6e597c531787f46ced0a6f9e38e05817f2323d7..e5eb9bd7642c1108c45e73134a00ee22b2f6475c 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -194,7 +194,7 @@ request). This doesn't fully avoid race conditions, but tries to minimize them. - Reports a peer (either good or bad). - `CandidateBackingMessage::Statement` - Note a validator's statement about a particular candidate. -- `ProspectiveParachainsMessage::GetHypotheticalFrontier` +- `ProspectiveParachainsMessage::GetHypotheticalMembership` - Gets the hypothetical frontier membership of candidates under active leaves' fragment trees. - `NetworkBridgeTxMessage::SendRequests` - Sends requests, initiating the request/response protocol. diff --git a/polkadot/roadmap/implementers-guide/src/protocol-disputes.md b/polkadot/roadmap/implementers-guide/src/protocol-disputes.md index 2a4082cc07f92aa99425e9e708ec2e90117c02c5..922cc3c3e2b56836d804f9decd8c2be44e7a7fd4 100644 --- a/polkadot/roadmap/implementers-guide/src/protocol-disputes.md +++ b/polkadot/roadmap/implementers-guide/src/protocol-disputes.md @@ -8,9 +8,9 @@ All parachain blocks that end up in the finalized relay chain should be valid. T only backed, but not included. We have two primary components for ensuring that nothing invalid ends up in the finalized relay chain: - * Approval Checking, as described [here](./protocol-approval.md) and implemented according to the [Approval - Voting](node/approval/approval-voting.md) subsystem. This protocol can be shown to prevent invalid parachain blocks - from making their way into the finalized relay chain as long as the amount of attempts are limited. + * Approval Checking, as described [here](./protocol-approval.md) and implemented accordingly in the [Approval +Voting](node/approval/approval-voting.md) subsystem. This protocol can be shown to prevent invalid parachain blocks +from making their way into the finalized relay chain as long as the amount of attempts are limited. * Disputes, this protocol, which ensures that each attempt to include something bad is caught, and the offending validators are punished. Disputes differ from backing and approval process (and can not be part of those) in that a dispute is independent of a particular fork, while both backing and approval operate on particular forks. This diff --git a/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md new file mode 100644 index 0000000000000000000000000000000000000000..9fd44c00fa0a1c9ba7f5bb0d5abd54bdc55b8e4e --- /dev/null +++ b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md @@ -0,0 +1,437 @@ +# Validator Disabling + +## Background + +As established in the [approval process](protocol-approval.md) dealing with bad parablocks is a three step process: + +1. Detection +1. Escalation +1. Consequences + +The main system responsible for dispensing **consequences** for malicious actors is the [dispute +system](protocol-disputes.md) which eventually dispenses slash events. The slashes itself can be dispensed quickly (a +matter of blocks) but for an extra layer of auditing all slashes are deferred for 27 days (in Polkadot/Kusama) which +gives time for Governance to investigate and potentially alter the punishment. Dispute concluding by itself does not +immediately remove the validator from the active validator set. + +> **Note:** \ +> There was an additional mechanism of automatically chilling the validator which removed their intent to participate in +> the next election, but the removed validator could simply re-register his intent to validate. + +There is a need to have a more immediate way to deal with malicious validators. This is where the validator disabling +comes in. It is focused on dispensing **low latency** consequences for malicious actors. It is important to note that +the validator disabling is not a replacement for the dispute or slashing systems. It is a complementary system that is +focused on lighter but immediate consequences usually in the form of restricted validator privileges. + +The primary goals are: +- Eliminate or minimize cases where attackers can get free attempts at attacking the network +- Eliminate or minimize the risks of honest nodes being pushed out of consensus when getting unjustly slashed (defense + in depth) + +The above two goals are generally at odds so a careful balance has to be struck between them. We will achieve them by +sacrificing some **liveness** in favor of **soundness** when the network is under stress. Maintaining some liveness but +absolute soundness is paramount. + +> **Note:** \ +> Liveness = Valid candidates can go through (at a decent pace) \ +> Security = Invalid candidates cannot go through (or are statistically very improbable) + +Side goals are: +- Reduce the damages to honest nodes that had a fault which might cause repeated slashes +- Reduce liveness impact of individual malicious attackers + +## System Overview + +High level assumptions and goals of the validator disabling system that will be further discussed in the following +sections: + +1. If validator gets slashed (even 0%) we mark them as disabled in the runtime and on the node side. +1. We only disable up to byzantine threshold of the validators. +1. If there are more offenders than byzantine threshold disable only the highest offenders. (Some might get re-enabled.) +1. Disablement lasts for 1 era. +1. Disabled validators remain in the active validator set but have some limited permissions. +1. Disabled validators can get re-elected. +1. Disabled validators can participate in approval checking. +1. Disabled validators can participate in GRANDPA/BEEFY, but equivocations cause disablement. +1. Disabled validators cannot author blocks. +1. Disabled validators cannot back candidates. +1. Disabled validators cannot initiate disputes, but their votes are still counted if a dispute occurs. +1. Disabled validators making dispute statements no-show in approval checking. + +


+ +# Risks + +## Risks of NOT having validator disabling + +Assume that if an offense is committed a slash is deposited but the perpetrator can still act normally. He will be +slashed 100% with a long delay (slash deferral duration which is 27 days). This is akin to the current design. + +A simple argument for disabling is that if someone is already slashed 100% and they have nothing to lose they could +cause harm to the network and should be silenced. + +What harm could they cause? + +**1. Liveness attacks:** + +- 1.1. Break sharding (with mass no-shows or mass disputes): It forces everyone to do all the work which affects + liveness but doesn't kill it completely. The chain can progress at a slow rate. + +- 1.2. Mass invalid candidate backing: Spawns a lot of worthless work that needs to be done but it is bounded by backing + numbers. Honest backers will still back valid candidates and that cannot be stopped. Honest block authors will + eventually select valid candidates and even if disputed they will win and progress the chain. + +**2. Soundness attacks:** + +- 2.1. The best and possibly only way to affect soundness is by getting lucky in the approval process. If by chance all + approval voters would be malicious, the attackers could get a single invalid candidate through. Their chances would be + relatively low but in general this risk has to be taken seriously as it significantly reduces the safety buffer around + approval checking. + +> **Note:** With 30 approvals needed chance that a malicious candidate going through is around 4\*10^-15. Assuming +> attackers can back invalid candidates on 50 cores for 48 hours straight and only those candidates get included it +> still gives a 7\*10^-9 chance of success which is still relatively small considering the cost (all malicious stake +> slashed). + +Attacks 1.2 and 2.1 should generally be pretty futile as a solo attacker while 1.1 could be possible with mass disputes +even from a single attacker. Nevertheless whatever the attack vector within the old system the attackers would get +*eventually* get slashed and pushed out of the active validator set but they had plenty of time to wreck havoc. + +## Risks of having validator disabling + +Assume we fully push out validator when they commit offenses. + +The primary risk behind having any sort of disabling is that it is a double-edged sword that in case of any dispute bugs +or sources of PVF non-determinism could disable honest nodes or be abused by attackers to specifically silence honest +nodes. + +Validators being pushed out of the validator set are an issue because that can greatly skew the numbers game in approval +checking (% for 30-ish malicious in a row). + +There are also censorship or liveness issues if backing is suddenly dominated by malicious nodes but in general even if +some honest blocks get backed liveness should be preserved. + +> **Note:** It is worth noting that is is fundamentally a defense in depth strategy because if we assume disputes are +> perfect it should not be a real concern. In reality disputes and determinism are difficult to get right, and +> non-determinism and happen so defense in depth is crucial when handling those subsystems. + +


+ +# Risks Mitigation + +## Addressing the risks of having validator disabling + +One safety measure is bounding the disabled number to 1/3 ([**Point 2.**](#system-overview)) or to be exact the +byzantine threshold. If for any reason more than 1/3 of validators are getting disabled it means that some part of the +protocol failed or there is more than 1/3 malicious nodes which breaks the assumptions. + +Even in such a dire situation where more than 1/3 got disabled the most likely scenario is a non-determinism bug or +sacrifice attack bug. Those attacks generally cause minor slashes to multiple honest nodes. In such a case the situation +could be salvaged by prioritizing highest offenders for disabling ([**Point 3.**](#system-overview)). + +> **Note:** \ +> System can be launched with re-enabling and will still provide some security improvements. Re-enabling will be +> launched in an upgrade after the initial deployment. + +Fully pushing out offending validator out of the validator set it too risky in case of a dispute bug, non-determinism or +sacrifice attacks. Main issue lies in skewing the numbers in approval checking so instead of fully blocking disabled +nodes a different approach can be taken - one were only some functionalities are disabled ([**Point +5.**](#system-overview)). Once of those functionalities can be approval voting which as pointed above is so crucial that +even in a disabled state nodes should be able to participate in it ([**Point 7.**](#system-overview)). + +> **Note:** \ +> Approval Checking statement are implicitly valid. Sending a statement for an invalid candidate is a part of the +> dispute logic which we did not yet discuss. For now we only allow nodes to state that a candidate is valid or remain +> silent. But this solves the main risk of disabling. + +Because we capped the number of disabled nodes to 1/3 there will always be at least 1/3 honest nodes to participate in +backing so liveness should be preserved. That means that backing **COULD** be safely disabled for disabled nodes +([**Point 10.**](#system-overview)). + + +## Addressing the risks of NOT having validator disabling + +To determine if backing **SHOULD** be disabled the attack vector of 1.2 (Mass invalid candidate backing) and 2.1 +(Getting lucky in approval voting) need to be considered. In both of those cases having extra backed malicious +candidates gives attackers extra chances to get lucky in approval checking. The solution is to not allow for backing in +disablement. ([**Point 10.**](#system-overview)) + +The attack vector 1.1 (Break sharding) requires a bit more nuance. If we assume that the attacker is a single entity and +that he can get a lot of disputes through he could potentially incredibly easily break sharding. This generally points +into the direction of disallowing that during disablement ([**Point 11.**](#system-overview)). + +This might seem like an issue because it takes away the escalation privileges of disabled approval checkers but this is +NOT true. By issuing a dispute statement those nodes remain silent in approval checking because they skip their approval +statement and thus will count as a no-show. This will create a mini escalation for that particular candidate. This means +that disabled nodes maintain just enough escalation that they can protect soundness (same argument as soundness +protection during a DoS attack on approval checking) but they lose their extreme escalation privilege which are only +given to flawlessly performing nodes ([**Point 12.**](#system-overview)). + +As a defense in depth measure dispute statements from disabled validators count toward confirming disputes (byzantine +threshold needed to confirm). If a dispute is confirmed everyone participates in it. This protects us from situations +where due to a bug more than byzantine threshold of validators would be disabled. + +> **Note:** \ +> The way this behavior is achieved easily in implementation is that honest nodes note down dispute statements from +> disabled validators just like they would for normal nodes, but they do not release their own dispute statements unless +> the dispute is confirmed already. This simply stops the escalation process of disputes. + +

+ +# Disabling Duration + +## Context + +A crucial point to understand is that as of the time of writing all slashing events as alluded to in the begging are +delayed for 27 days before being executed. This is primarily because it gives governance enough time to investigate and +potentially intervene. For that duration when the slash is pending the stake is locked and cannot be moved. Time to +unbond you stake is 28 days which ensures that the stake will eventually be slashed before being withdrawn. + +## Design + +A few options for the duration of disablement were considered: +- 1 epoch (4h in Polkadot) +- 1 era (24h in Polkadot) +- 2-26 eras +- 27 eras + +1 epoch is a short period and between a few epochs the validator will most likely be exactly the same. It is also very +difficult to fix any local node issues for honest validator in such a short time so the chance for a repeated offense is +high. + +1 era gives a bit more time to fix any minor issues. Additionally, it guarantees a validator set change at so many of +the currently disabled validator might no longer be present anyway. It also gives the time for the validator to chill +themselves if they have identified a cause and want to spend more time fixing it. ([**Point 4.**](#system-overview)) + +Higher values could be considered and the main arguments for those are based around the fact that it reduces the number +of repeated attacks that will be allowed before the slash execution. Generally 1 attack per era for 27 eras resulting in +27 attacks at most should not compromise our safety assumptions. Although this direction could be further explored and +might be parametrized for governance to decide. + +


+ +# Economic consequences of Disablement + +Disablement is generally a form of punishment and that will be reflected in the rewards at the end of an era. A disabled +validator will not receive any rewards for backing or block authoring. which will reduce its profits. + +That means that the opportunity cost of being disabled is a punishment by itself and thus it can be used for some cases +where a minor punishment is needed. Current implementation was using 0% slashes to mark nodes for chilling and similar +approach of 0% slashes can be used to mark validators for disablement. ([**Point 1.**](#system-overview)) 0% slashes +could for instance be used to punish approval checkers voting invalid on valid candidates. + +Anything higher than 0% will of course also lead to a disablement. + +> **Notes:** \ +> Alternative designs incorporating disabling proportional to offenses were explored but they were deemed too complex +> and not worth the effort. Main issue with those is that proportional disabling would cause back and forth between +> disabled and enabled which complicated tracking the state of disabled validators and messes with optimistic node +> optimizations. Main benefits were that minor slashes will be barely disabled which has nice properties against +> sacrifice attacks. + +


+ +# Redundancy + +Some systems can be greatly simplified or outright removed thanks to the above changes. This leads to reduced complexity +around the systems that were hard to reason about and were sources of potential bugs or new attack vectors. + +## Automatic Chilling + +Chilling is process of a validator dropping theirs intent to validate. This removes them from the upcoming NPoS +elections and effectively pushes them out of the validator set as quickly as of the next era (or 2 era in case of late +offenses). All nominators of that validator were also getting unsubscribed from that validator. Validator could +re-register their intent to validate at any time. The intent behind this logic was to protect honest stakes from +repeated slashes caused by unnoticed bugs. It would give time for validators to fix their issue before continuing as a +validator. + +Chilling had a myriad of problems. It assumes that validators and nominators remain very active and monitor everything. +If a validator got slashed he was getting automatically chilled and his nominators were getting unsubscribed. This was +an issue because of minor non-malicious slashes due to node operator mistakes or small bugs. Validators got those bugs +fixed quickly and were reimbursed but nominator had to manually re-subscribe to the validator, which they often +postponed for very lengthy amounts of time most likely due to simply not checking their stake. **This forced +unsubscribing of nominators was later disabled.** + +Automatic chilling was achieving its goals in ideal scenarios (no attackers, no lazy nominators) but it opened new +vulnerabilities for attackers. The biggest issue was that chilling in case of honest node slashes could lead to honest +validators being quickly pushed out of the next validator set within the next era. This retains the validator set size +but gives an edge to attackers as they can more easily win slots in the NPoS election. + +Disabling allows for punishment that limits the damages malicious actors can cause without having to resort to kicking +them out of the validator set. This protects us from the edge case of honest validators getting quickly pushed out of +the set by slashes. ([**Point 6.**](#system-overview)) + +> **Notes:** \ +> As long as honest slashes absolutely cannot occur automatic chilling is a sensible and desirable. This means it could +> be re-enabled once PolkaVM introduces deterministic gas metering. Then best of both worlds could be achieved. + +## Forcing New Era + +Previous implementation of disabling had some limited mechanisms allowing for validators disablement and if too many +were disabled forcing a new era (new election). Frame staking pallet offered the ability to force a new era but it was +also deemed unsafe as it could be abused and compromised the security of the network for instance by weakening the +randomness used throughout the protocol. + +


+ +# Other types of slashing + +Above slashes were specifically referring to slashing events coming from disputes against candidates, but in Polkadot +other types of offenses exist for example GRANDPA equivocations or block authoring offenses. Question is if the above +defined design can handle those offenses. + +## GRANDPA/BEEFY Offenses + +The main offences for GRANDPA/BEEFY are equivocations. It is not a very serious offense and some nodes committing do not +endanger the system and performance is barely affected. If more than byzantine threshold of nodes equivocate it is a +catastrophic failure potentially resulting in 2 finalized blocks on the same height in the case of GRANDPA. + +Honest nodes generally should not commit those offenses so the goal of protecting them does not apply here. + +> **Note:** \ +> A validator running multiple nodes with the same identity might equivocate. Doing that is highly not advised but it +> has happened before. + +It's not a game of chance so giving attackers extra chances does not compromise soundness. Also it requires a +supermajority of honest nodes to successfully finalize blocks so any disabling of honest nodes from GRANDPA might +compromise liveness. + +Best approach is to allow disabled nodes to participate in GRANDPA/BEEFY as normal and as mentioned before +GRANDPA/BABE/BEEFY equivocations should not happen to honest nodes so we can safely disable the offenders. Additionally +the slashes for singular equivocations will be very low so those offenders would easily get re-enabled in the case of +more serious offenders showing up. ([**Point 8.**](#system-overview)) + +## Block Authoring Offenses (BABE Equivocations) + +Even if all honest nodes are disabled in Block Authoring (BA) liveness is generally preserved. At least 50% of blocks +produced should still be honest. Soundness wise disabled nodes can create a decent amount of wasted work by creating bad +blocks but they only get to do it in bounded amounts. + +Disabling in BA is not a requirement as both liveness and soundness are preserved but it is the current default behavior +as well as it offers a bit less wasted work. + +Offenses in BA just like in backing can be caused by faulty PVFs or bugs. They might happen to honest nodes and +disabling here while not a requirement can also ensure that this node does not repeat the offense as it might not be +trusted with it's PVF anymore. + +Both points above don't present significant risks when disabling so the default behavior is to disable in BA and because +of offenses in BA. ([**Point 9.**](#system-overview)) This filters out honest faulty nodes as well as protects from some +attackers. + +


+ +# Extra Design Considerations + +## Disabling vs Accumulating Slashes + +Instant disabling generally allows us to remove the need for accumulating slashes. It is a more immediate punishment and +it is a more lenient punishment for honest nodes. + +The current architecture of using max slashing can be used and it works around the problems of delaying the slash for a +long period. + +An alternative design with immediate slashing and acclimating slashing could relevant to other systems but it goes +against the governance auditing mechanisms so it's not be suitable for Polkadot. + +## Disabling vs Getting Pushed Out of NPoS Elections + +Validator disabling and getting forced ouf of NPoS elections (1 era) due to slashes are actually very similar processes +in terms of outcomes but there are some differences: + +- **latency** (next few blocks for validator disabling and 27 days for getting pushed out organically) +- **pool restriction** (validator disabling could effectively lower the number of active validators during an era if we + fully disable) +- **granularity** (validator disabling could remove only a portion of validator privileges instead of all) + +Granularity is particularly crucial in the final design as only a few select functions are disabled while others remain. + +## Enabling Approval Voter Slashes + +The original Polkadot 1.0 design describes that all validators on the loosing side of the dispute are slashed. In the +current system only the backers are slashed and any approval voters on the wrong side will not be slashed. This creates +some undesirable incentives: + +- Lazy approval checkers (approvals yay`ing everything) +- Spammy approval checkers (approval voters nay`ing everything) + +Initially those slashes were disabled to reduce the complexity and to minimize the risk surface in case the system +malfunctioned. This is especially risky in case any nondeterministic bugs are present in the system. Once validator +re-enabling is launched approval voter slashes can be re-instated. Numbers need to be further explored but slashes +between 0-2% are reasonable. 0% would still disable which with the opportunity cost consideration should be enough. + + > **Note:** \ +> Spammy approval checkers are in fact not a big issue as a side effect of the offchain-disabling introduced by the +> Defense Against Past-Era Dispute Spam (**Node**) [#2225](https://github.com/paritytech/polkadot-sdk/issues/2225). It +> makes it so all validators loosing a dispute are locally disabled and ignored for dispute initiation so it effectively +> silences spammers. They can still no-show but the damage is minimized. + + +## Interaction with all types of misbehaviors + +With re-enabling in place and potentially approval voter slashes enabled the overall misbehaviour-punishment system can +be as highlighted in the table below: + +|Misbehaviour |Slash % |Onchain Disabling |Offchain Disabling |Chilling |Reputation Costs | +|------------ |------- |----------------- |------------------ |-------- |----------------- | +|Backing Invalid |100% |Yes (High Prio) |Yes (High Prio) |No |No | +|ForInvalid Vote |2% |Yes (Mid Prio) |Yes (Mid Prio) |No |No | +|AgainstValid Vote |0% |Yes (Low Prio) |Yes (Low Prio) |No |No | +|GRANDPA / BABE / BEEFY Equivocations |0.01-100% |Yes (Varying Prio) |No |No |No | +|Seconded + Valid Equivocation |- |No |No |No |No | +|Double Seconded Equivocation |- |No |No |No |Yes | + + +*Ignoring AURA offences. + +**There are some other misbehaviour types handled in rep only (DoS prevention etc) but they are not relevant to this strategy. + +*** BEEFY will soon introduce new slash types so this strategy table will need to be revised but no major changes are expected. + +


+ +# Implementation + +Implementation of the above design covers a few additional areas that allow for node-side optimizations. + +## Core Features + +1. Disabled Validators Tracking (**Runtime**) [#2950](https://github.com/paritytech/polkadot-sdk/issues/2950) + - Expose a ``disabled_validators`` map through a Runtime API +1. Enforce Backing Disabling (**Runtime**) [#1592](https://github.com/paritytech/polkadot-sdk/issues/1592) + - Filter out votes from ``disabled_validators`` in ``BackedCandidates`` in ``process_inherent_data`` +1. Substrate Byzantine Threshold (BZT) as Limit for Disabling + [#1963](https://github.com/paritytech/polkadot-sdk/issues/1963) + - Can be parametrized but default to BZT + - Disable only up to 1/3 of validators +1. Respect Disabling in Backing Statement Distribution (**Node**) + [#1591](https://github.com/paritytech/polkadot-sdk/issues/1951) + - This is an optimization as in the end it would get filtered in the runtime anyway + - Filter out backing statements coming from ``disabled_validators`` +1. Respect Disablement in Backing (**Node**) [#2951](https://github.com/paritytech/polkadot-sdk/issues/2951) + - This is an optimization as in the end it would get filtered in the runtime anyway + - Don't start backing new candidates when disabled + - Don't react to backing requests when disabled +1. Stop Automatic Chilling of Offenders [#1962](https://github.com/paritytech/polkadot-sdk/issues/1962) + - Chilling still persists as a state but is no longer automatically applied on offenses +1. Respect Disabling in Dispute Participation (**Node**) [#2225](https://github.com/paritytech/polkadot-sdk/issues/2225) + - Receive dispute statements from ``disabled_validators`` but do not release own statements + - Ensure dispute confirmation when BZT statements from disabled +1. Remove Liveness Slashes [#1964](https://github.com/paritytech/polkadot-sdk/issues/1964) + - Remove liveness slashes from the system + - The are other incentives to be online and they could be abused to attack the system +1. Defense Against Past-Era Dispute Spam (**Node**) [#2225](https://github.com/paritytech/polkadot-sdk/issues/2225) + - This is needed because runtime cannot disable validators which it no longer knows about + - Add a node-side parallel store of ``disabled_validators`` + - Add new disabled validators to node-side store when they loose a dispute in any leaf in scope + - Runtime ``disabled_validators`` always have priority over node-side ``disabled_validators`` + - Respect the BZT threshold + > **Note:** \ + > An alternative design here was considered where instead of tracking new incoming leaves a relay parent is used. + > This would guarantee determinism as different nodes can see different leaves, but this approach was leaving too + > wide of a window because of Async-Backing. Relay Parent could have been significantly in the past and it would + > give a lot of time for past session disputes to be spammed. +1. Do not block finality for "disabled" disputes [#3358](https://github.com/paritytech/polkadot-sdk/pull/3358) + - Emergency fix to not block finality for disputes initiated only by disabled validators +1. Re-enable small offender when approaching BZT (**Runtime**) #TODO + - When BZT limit is reached and there are more offenders to be disabled re-enable the smallest offenders to disable + the biggest ones diff --git a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md index 083ed2b6feac9ed6d27c44bc2267744dd49fffd1..be1e71666ad20626711e23f828820155746cbb68 100644 --- a/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/polkadot/roadmap/implementers-guide/src/runtime/scheduler.md @@ -1,7 +1,7 @@ # Scheduler Pallet > TODO: this section is still heavily under construction. key questions about availability cores and validator -> assignment are still open and the flow of the the section may be contradictory or inconsistent +> assignment are still open and the flow of the section may be contradictory or inconsistent The Scheduler module is responsible for two main tasks: diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md index e011afb97089aaf59d685b8d4bf9998d21390146..c82d89d2d8799eff6a68b49bdb01ee15bbe43d84 100644 --- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -238,6 +238,9 @@ enum AvailabilityRecoveryMessage { CandidateReceipt, SessionIndex, Option, // Backing validator group to request the data directly from. + Option, /* A `CoreIndex` needs to be specified for the recovery process to + * prefer systematic chunk recovery. This is the core that the candidate + * was occupying while pending availability. */ ResponseChannel>, ), } diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index 5af5e63b175380f7e695deb973ea48df52c326e4..1900b595d671540ee3518bd1a7bd0fc061c073f2 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -17,8 +17,10 @@ sp-blockchain = { path = "../../substrate/primitives/blockchain" } sp-keystore = { path = "../../substrate/primitives/keystore" } sp-runtime = { path = "../../substrate/primitives/runtime" } sp-api = { path = "../../substrate/primitives/api" } +sp-application-crypto = { path = "../../substrate/primitives/application-crypto" } sp-consensus = { path = "../../substrate/primitives/consensus/common" } sp-consensus-babe = { path = "../../substrate/primitives/consensus/babe" } +sp-consensus-beefy = { path = "../../substrate/primitives/consensus/beefy" } sc-chain-spec = { path = "../../substrate/client/chain-spec" } sc-rpc = { path = "../../substrate/client/rpc" } sc-rpc-spec-v2 = { path = "../../substrate/client/rpc-spec-v2" } diff --git a/polkadot/rpc/src/lib.rs b/polkadot/rpc/src/lib.rs index 4455efd3b5337be85fb975f368af9475b20b0b89..2daa246102fc24f08d0e04c9d5640aa69aced086 100644 --- a/polkadot/rpc/src/lib.rs +++ b/polkadot/rpc/src/lib.rs @@ -29,10 +29,12 @@ use sc_consensus_beefy::communication::notification::{ use sc_consensus_grandpa::FinalityProofProvider; pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; +use sp_consensus_beefy::AuthorityIdBound; use sp_keystore::KeystorePtr; use txpool_api::TransactionPool; @@ -62,9 +64,9 @@ pub struct GrandpaDeps { } /// Dependencies for BEEFY -pub struct BeefyDeps { +pub struct BeefyDeps { /// Receives notifications about finality proof events from BEEFY. - pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, + pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, /// Receives notifications about best block events from BEEFY. pub beefy_best_block_stream: BeefyBestBlockStream, /// Executor to drive the subscription manager in the BEEFY RPC handler. @@ -72,7 +74,7 @@ pub struct BeefyDeps { } /// Full client dependencies -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. @@ -88,14 +90,14 @@ pub struct FullDeps { /// GRANDPA specific dependencies. pub grandpa: GrandpaDeps, /// BEEFY specific dependencies. - pub beefy: BeefyDeps, + pub beefy: BeefyDeps, /// Backend used by the node. pub backend: Arc, } /// Instantiate all RPC extensions. -pub fn create_full( - FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, beefy, backend } : FullDeps, +pub fn create_full( + FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, beefy, backend } : FullDeps, ) -> Result> where C: ProvideRuntimeApi @@ -114,6 +116,8 @@ where SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::StateBackend>, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { use frame_rpc_system::{System, SystemApiServer}; use mmr_rpc::{Mmr, MmrApiServer}; @@ -171,7 +175,7 @@ where )?; io.merge( - Beefy::::new( + Beefy::::new( beefy.beefy_finality_proof_stream, beefy.beefy_best_block_stream, beefy.subscription_executor, diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 4219a7e7b0dcb57c50a5e709d0695dfa723b582a..3a64148817682096d409e9ea1c4e38a39b44d7b7 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index cacafd8ed3b746d35b5b064ca3d6662b313f8b86..314e101ad221e9e842eb609a0ca6d80d8c8c52ad 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] paste = "1.0" enumn = "0.1.12" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 9b24b99cfbe2a2f1d06fc842f798a6232077e2fb..92a8e46f5f9cc55888c7e8d9b4a7d743e6e6ad70 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -671,10 +671,6 @@ mod tests { type OverarchingCall = RuntimeCall; } - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -689,7 +685,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index aa4caac96f154b43f60fb4aa0dc45b9017aff1d9..e7b7c081ae4e96fd51f030fb482d9b9145a0d1d1 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -699,10 +699,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -718,7 +714,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 477530467fa105d93b41569d851b5699aadc09b0..0aecbcd531c49bba6d4ea3ca2c4f367ed3530d08 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -890,10 +890,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - type BlockNumber = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] @@ -911,7 +907,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index 85531e9c04fc47f83fb007802e30e87799c2150c..a92a05219cf8762d4903c3fbd1853e517b77f67e 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -276,7 +276,6 @@ mod tests { ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() .base_block(Weight::from_parts(10, 0)) .for_class(DispatchClass::all(), |weight| { @@ -302,7 +301,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockLength = BlockLength; type BlockWeights = BlockWeights; type DbWeight = (); diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 3e9ac1fc1b152574ea86d42c7d48f51da47ddc39..2122e75f3e2d2ac641df5429982ba3d1de679e2a 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -109,7 +109,6 @@ where use crate::{auctions::Error as AuctionsError, crowdloan::Error as CrowdloanError}; parameter_types! { - pub const BlockHashCount: u32 = 250; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( Weight::from_parts(4 * 1024 * 1024, u64::MAX), @@ -131,7 +130,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index a49ebab3e26a8df5ecfdf7615849b75d9e3c4671..c90802a40129bceb4a74f8f8fe4f7d4f54426482 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -761,7 +761,6 @@ mod tests { const NORMAL_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { - pub const BlockHashCount: u32 = 250; pub BlockWeights: limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); pub BlockLength: limits::BlockLength = @@ -780,7 +779,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type BlockWeights = BlockWeights; type BlockLength = BlockLength; diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index b90bbb3a7cfb87c8753114de74d12119ad8b6c56..3920a2c68c5532dcdca6b9363bd5a193a380c9f2 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -508,10 +508,6 @@ mod tests { type AccountId = AccountId32; - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -527,7 +523,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 738569ff4416c0049ca134e5ea94cf5e9245f81d..9da345beea3991057e65ca5d4019c0064e3c2699 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -525,10 +525,6 @@ mod tests { } ); - parameter_types! { - pub const BlockHashCount: u32 = 250; - } - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -543,7 +539,6 @@ mod tests { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index 481627542865e1e79e4be790c3ccf46cc5878c07..76c1d134fa18669768aff9e860bbaee35a201e67 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 402c6e487a1f8b2b2ad9f535ca60bdc160c69895..d00a19c6ddb8e43881473d7b22daa31b4cc6990c 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 37788a67ea0c46d6c75b45039d994b8e40641e1c..795759b3b39e18880da9e314697ee4a0832d3939 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -173,7 +173,7 @@ impl QueueStatusType { fn consume_index(&mut self, removed_index: QueueIndex) { if removed_index != self.smallest_index { self.freed_indices.push(removed_index.reverse()); - return + return; } let mut index = self.smallest_index.0.overflowing_add(1).0; // Even more to advance? @@ -368,10 +368,10 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// An order was placed at some spot price amount. - OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf }, - /// The value of the spot traffic multiplier changed. - SpotTrafficSet { traffic: FixedU128 }, + /// An order was placed at some spot price amount by orderer ordered_by + OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf, ordered_by: T::AccountId }, + /// The value of the spot price has likely changed + SpotPriceSet { spot_price: BalanceOf }, } #[pallet::error] @@ -410,12 +410,11 @@ pub mod pallet { /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] pub fn place_order_allow_death( @@ -437,12 +436,11 @@ pub mod pallet { /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] pub fn place_order_keep_alive( @@ -539,12 +537,11 @@ where /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation - /// - `InvalidParaId` /// - `QueueFull` /// - `SpotPriceHigherThanMaxAmount` /// /// Events: - /// - `SpotOrderPlaced` + /// - `OnDemandOrderPlaced` fn do_place_order( sender: ::AccountId, max_amount: BalanceOf, @@ -578,6 +575,12 @@ where Error::::QueueFull ); Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + Pallet::::deposit_event(Event::::OnDemandOrderPlaced { + para_id, + spot_price, + ordered_by: sender, + }); + Ok(()) }) } @@ -599,7 +602,14 @@ where // Only update storage on change if new_traffic != old_traffic { queue_status.traffic = new_traffic; - Pallet::::deposit_event(Event::::SpotTrafficSet { traffic: new_traffic }); + + // calculate the new spot price + let spot_price: BalanceOf = new_traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); + + // emit the event for updated new price + Pallet::::deposit_event(Event::::SpotPriceSet { spot_price }); } }, Err(err) => { @@ -721,7 +731,7 @@ where "Decreased affinity for a para that has not been served on a core?" ); if affinity != Some(0) { - return + return; } // No affinity more for entries on this core, free any entries: // @@ -754,7 +764,7 @@ where } else { *maybe_affinity = None; } - return Some(new_count) + return Some(new_count); } else { None } diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 94bce4c83e6ff917c9e04d2036e276e7cf85627d..33cbcb98fb29992a4eac77db9eb7def4088af16f 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The runtime's definition of a Currency. type Currency: Currency; - /// The ParaId of the broker system parachain. + /// The ParaId of the coretime chain. #[pallet::constant] type BrokerId: Get; /// Something that provides the weight of this pallet. @@ -139,10 +139,16 @@ pub mod pallet { #[pallet::call] impl Pallet { + /// Request the configuration to be updated with the specified number of cores. Warning: + /// Since this only schedules a configuration update, it takes two sessions to come into + /// effect. + /// + /// - `origin`: Root or the Coretime Chain + /// - `count`: total number of cores #[pallet::weight(::WeightInfo::request_core_count())] #[pallet::call_index(1)] pub fn request_core_count(origin: OriginFor, count: u16) -> DispatchResult { - // Ignore requests not coming from the broker parachain or root. + // Ignore requests not coming from the coretime chain or root. Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; configuration::Pallet::::set_coretime_cores_unchecked(u32::from(count)) @@ -155,7 +161,7 @@ pub mod pallet { // origin: OriginFor, // _when: BlockNumberFor, //) -> DispatchResult { - // // Ignore requests not coming from the broker parachain or root. + // // Ignore requests not coming from the coretime chain or root. // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; // Ok(()) //} @@ -168,7 +174,7 @@ pub mod pallet { // _who: T::AccountId, // _amount: BalanceOf, //) -> DispatchResult { - // // Ignore requests not coming from the broker parachain or root. + // // Ignore requests not coming from the coretime chain or root. // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; // Ok(()) //} @@ -177,7 +183,7 @@ pub mod pallet { /// to be used. /// /// Parameters: - /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the Broker system parachain. + /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the coretime chain. /// -`core`: The core that should be scheduled. /// -`begin`: The starting blockheight of the instruction. /// -`assignment`: How the blockspace should be utilised. @@ -193,7 +199,7 @@ pub mod pallet { assignment: Vec<(CoreAssignment, PartsOf57600)>, end_hint: Option>, ) -> DispatchResult { - // Ignore requests not coming from the broker parachain or root. + // Ignore requests not coming from the coretime chain or root. Self::ensure_root_or_para(origin, T::BrokerId::get().into())?; let core = u32::from(core).into(); @@ -243,7 +249,7 @@ impl Pallet { } } - // Handle legacy swaps in coretime. Notifies broker parachain that a lease swap has occurred via + // Handle legacy swaps in coretime. Notifies coretime chain that a lease swap has occurred via // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { let message = Xcm(vec![ diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 31befefa32201b4bf343301c2956abcfbfb6a896..0c7274984085835e69e2b06b49e44f0ee7cc2e85 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -421,6 +421,7 @@ impl From for AcceptanceCheckErr { /// An error returned by [`Pallet::check_upward_messages`] that indicates a violation of one of /// acceptance criteria rules. #[cfg_attr(test, derive(PartialEq))] +#[allow(dead_code)] pub(crate) enum UmpAcceptanceCheckErr { /// The maximal number of messages that can be submitted in one batch was exceeded. MoreMessagesThanPermitted { sent: u32, permitted: u32 }, diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index a32c9d11b36e737e436d998ebddea1e5d38143b2..75b835b175414fc933d2915ea0ab1bf6bb8e30a6 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -100,7 +100,6 @@ where } parameter_types! { - pub const BlockHashCount: u32 = 250; pub static BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( Weight::from_parts(4 * 1024 * 1024, u64::MAX), @@ -125,7 +124,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index f4d8fb51b3fa2bfa95bd409cd8bd7a2aec17c1f8..c78f3e668b9c901fe2e9a71913b3b295307bf6ef 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } serde = { workspace = true } @@ -95,6 +95,7 @@ pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-feat pallet-root-testing = { path = "../../../substrate/frame/root-testing", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } hex-literal = { version = "0.4.1" } @@ -134,6 +135,7 @@ std = [ "block-builder-api/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -324,6 +326,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = ["rococo-runtime-constants/fast-runtime"] @@ -332,4 +337,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index 403c31ff21c70f679059fa5b7e65478d309ba6a3..7aae84cd5e0fedcb29265f98d40bcd9ccd83ae43 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#[cfg(feature = "std")] +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); @@ -24,5 +24,18 @@ fn main() { .build(); } +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("ROC", 12) + .build(); + + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .enable_metadata_hash("ROC", 12) + .build(); +} + #[cfg(not(feature = "std"))] fn main() {} diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 9209045364c28bc585c548d6d2b30176bd52bb20..89d5deb86f1a0ff78a05255f9f5f3bdaca8333a6 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -57,7 +57,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 22e6183e59460899e6eceb938d93222798ee3a34..a77c0188a1da67821be6843480329d33e1427360 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -106,10 +106,7 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{ - latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, - VersionedXcm, -}; +use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -135,7 +132,7 @@ use governance::{ TreasurySpender, }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -165,10 +162,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 25, + transaction_version: 26, state_version: 1, }; @@ -643,7 +640,9 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::new(true), ); + let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); @@ -1528,6 +1527,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -1769,15 +1769,8 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { @@ -1806,63 +1799,13 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_call::(origin, call) } fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let xcm: Xcm = xcm.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - xcm, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + XcmPallet::dry_run_xcm::(origin_location, xcm) } } @@ -2134,7 +2077,7 @@ sp_api::impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -2148,7 +2091,7 @@ sp_api::impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2161,7 +2104,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 6552ed4ef8aeaa2e36aa1f410879cc4cf0eeaf07..596cc974c82599289eb9203704d37af81811b004 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } diff --git a/polkadot/runtime/test-runtime/constants/src/lib.rs b/polkadot/runtime/test-runtime/constants/src/lib.rs index 77c83b063cf0f8fb7771545faa074b315fa75a03..2422762ca38e925cf9666f1c42e2aa10cf70dd90 100644 --- a/polkadot/runtime/test-runtime/constants/src/lib.rs +++ b/polkadot/runtime/test-runtime/constants/src/lib.rs @@ -45,7 +45,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 0509ba382b2e8b262a391af97dae42cb24291c23..9eb0fcca6678b234ab2b744e3fbf2f6f837cbdbe 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1044,11 +1044,11 @@ sp_api::impl_runtime_apis! { fn generate_proof( _block_numbers: Vec, _best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_proof(_leaves: Vec, _proof: mmr::Proof) + fn verify_proof(_leaves: Vec, _proof: mmr::LeafProof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -1057,7 +1057,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::Proof + _proof: mmr::LeafProof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 2cb719dbb907d5a3b592aa68a0f2e4e8f106ba11..ae1145a28bdfe2709a00b81894550bb64bfbb03c 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } @@ -45,6 +45,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def frame-election-provider-support = { path = "../../../substrate/frame/election-provider-support", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false, features = ["experimental", "tuples-96"] } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } @@ -83,6 +84,7 @@ pallet-staking = { path = "../../../substrate/frame/staking", default-features = pallet-stake-tracker = { path = "../../../substrate/frame/staking/stake-tracker", default-features = false } pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", path = "../../../substrate/frame/staking/reward-curve" } pallet-staking-runtime-api = { path = "../../../substrate/frame/staking/runtime-api", default-features = false } +pallet-delegated-staking = { path = "../../../substrate/frame/delegated-staking", default-features = false } pallet-state-trie-migration = { path = "../../../substrate/frame/state-trie-migration", default-features = false } pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } @@ -142,6 +144,7 @@ std = [ "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -160,6 +163,7 @@ std = [ "pallet-beefy/std", "pallet-collective/std", "pallet-conviction-voting/std", + "pallet-delegated-staking/std", "pallet-democracy/std", "pallet-election-provider-multi-phase/std", "pallet-election-provider-support-benchmarking?/std", @@ -244,6 +248,7 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-conviction-voting/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", @@ -305,6 +310,7 @@ try-runtime = [ "pallet-beefy/try-runtime", "pallet-collective/try-runtime", "pallet-conviction-voting/try-runtime", + "pallet-delegated-staking/try-runtime", "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", @@ -342,6 +348,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] @@ -350,4 +359,4 @@ runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller, like logging for example. -on-chain-release-build = ["sp-api/disable-logging"] +on-chain-release-build = ["metadata-hash", "sp-api/disable-logging"] diff --git a/polkadot/runtime/westend/build.rs b/polkadot/runtime/westend/build.rs index 0b3e12c78c746517a32538c8c1e5f9da63747fc5..8ff3a4fb9112c670882cd9794d6291d74cee194f 100644 --- a/polkadot/runtime/westend/build.rs +++ b/polkadot/runtime/westend/build.rs @@ -14,8 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use substrate_wasm_builder::WasmBuilder; +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::build_using_defaults(); +} +#[cfg(all(feature = "metadata-hash", feature = "std"))] fn main() { - WasmBuilder::build_using_defaults(); + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); } + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index c98f4b114fd88241dea1e2e5ffcf694848448007..1a4c1f3110614508cf3d934fe82cd5884f49d365 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -52,7 +52,7 @@ pub mod time { // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. // The choice of is done in accordance to the slot duration and expected target // block time, for safely resisting network delays of maximum two seconds. - // + // pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 578f822352d532b03bbd44c8c92c060df820adc2..30242ef61d4618cc1f1bdf7d5778f91eb8fd82e4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -102,14 +102,11 @@ use sp_std::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{ - latest::prelude::*, IntoVersion, VersionedAssetId, VersionedAssets, VersionedLocation, - VersionedXcm, -}; +use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; use xcm_builder::PayOverXcm; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, }; @@ -156,10 +153,10 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 1_011_000, + spec_version: 1_012_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, - transaction_version: 25, + transaction_version: 26, state_version: 1, }; @@ -666,7 +663,7 @@ impl pallet_staking::Config for Runtime { type HistoryDepth = frame_support::traits::ConstU32<84>; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; - type EventListeners = (StakeTracker, NominationPools); + type EventListeners = (StakeTracker, NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } @@ -816,6 +813,7 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1380,7 +1378,8 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; type PostUnbondingPoolsWindow = ConstU32<4>; type MaxMetadataLen = ConstU32<256>; // we use the same number of allowed unlocking chunks as with staking. @@ -1390,6 +1389,21 @@ impl pallet_nomination_pools::Config for Runtime { type AdminOrigin = EitherOf, StakingAdmin>; } +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} + +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + impl pallet_root_testing::Config for Runtime { type RuntimeEvent = RuntimeEvent; } @@ -1538,6 +1552,10 @@ mod runtime { #[runtime::pallet_index(37)] pub type Treasury = pallet_treasury; + // Staking extension for delegation + #[runtime::pallet_index(38)] + pub type DelegatedStaking = pallet_delegated_staking; + // Parachains pallets. Start indices at 40 to leave room. #[runtime::pallet_index(41)] pub type ParachainsOrigin = parachains_origin; @@ -1646,13 +1664,15 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, ); -pub struct NominationPoolsMigrationV4OldPallet; -impl Get for NominationPoolsMigrationV4OldPallet { - fn get() -> Perbill { - Perbill::from_percent(100) - } +parameter_types! { + // This is the max pools that will be migrated in the runtime upgrade. Westend has more pools + // than this, but we want to emulate some non migrated pools. In prod runtimes, if weight is not + // a concern, it is recommended to set to (existing pools + 10) to also account for any new + // pools getting created before the migration is actually executed. + pub const MaxPoolsToMigrate: u32 = 250; } /// All migrations that will run on the next runtime upgrade. @@ -1685,7 +1705,15 @@ pub mod migrations { } /// Unreleased migrations. Add new ones here: - pub type Unreleased = (); + pub type Unreleased = ( + // Migrate NominationPools to `DelegateStake` adapter. This is unversioned upgrade and + // should not be applied yet in Kusama/Polkadot. + pallet_nomination_pools::migration::unversioned::DelegationStakeMigration< + Runtime, + MaxPoolsToMigrate, + >, + pallet_staking::migrations::single_block::v15::MigrateV14ToV15, + ); } /// Unchecked extrinsic type as expected by this runtime. @@ -2043,7 +2071,7 @@ sp_api::impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -2057,7 +2085,7 @@ sp_api::impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2070,7 +2098,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) @@ -2233,15 +2261,8 @@ sp_api::impl_runtime_apis! { impl xcm_fee_payment_runtime_api::fees::XcmPaymentApi for Runtime { fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { - let acceptable = vec![ - // native token - VersionedAssetId::from(AssetId(xcm_config::TokenLocation::get())) - ]; - - Ok(acceptable - .into_iter() - .filter_map(|asset| asset.into_version(xcm_version).ok()) - .collect()) + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { @@ -2270,63 +2291,13 @@ sp_api::impl_runtime_apis! { } } - impl xcm_fee_payment_runtime_api::dry_run::XcmDryRunApi for Runtime { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - use xcm_executor::RecordXcm; - pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + impl xcm_fee_payment_runtime_api::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_call::(origin, call) } fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { - use xcm_builder::InspectMessageQueues; - let origin_location: Location = origin_location.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Location version conversion failed with error: {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let xcm: Xcm = xcm.try_into().map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", - "Xcm version conversion failed with error {:?}", - error, - ); - XcmDryRunApiError::VersionedConversionFailed - })?; - let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); - let result = xcm_executor::XcmExecutor::::prepare_and_execute( - origin_location, - xcm, - &mut hash, - Weight::MAX, // Max limit available for execution. - Weight::zero(), - ); - let forwarded_xcms = xcm_config::XcmRouter::get_messages(); - let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); - Ok(XcmDryRunEffects { - forwarded_xcms, - emitted_events: events, - execution_result: result, - }) + XcmPallet::dry_run_xcm::(origin_location, xcm) } } @@ -2346,6 +2317,22 @@ sp_api::impl_runtime_apis! { fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } + + fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_pending_slash(pool_id) + } + + fn member_pending_slash(member: AccountId) -> Balance { + NominationPools::api_member_pending_slash(member) + } + + fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + NominationPools::api_pool_needs_delegate_migration(pool_id) + } + + fn member_needs_delegate_migration(member: AccountId) -> bool { + NominationPools::api_member_needs_delegate_migration(member) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { diff --git a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs index 6aa5ddd1ec8fb5251ac6bbd5059938e4bad93b22..35eef199fb7a974f7665a83c09318ee06b37fea2 100644 --- a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs +++ b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_nomination_pools` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dcu62vjg-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,7 +54,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) @@ -62,7 +62,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `NominationPools::MaxPoolMembersPerPool` (r:1 w:0) /// Proof: `NominationPools::MaxPoolMembersPerPool` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -70,10 +70,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -82,13 +88,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3355` + // Measured: `3606` // Estimated: `8877` - // Minimum execution time: 173_707_000 picoseconds. - Weight::from_parts(179_920_000, 0) + // Minimum execution time: 204_877_000 picoseconds. + Weight::from_parts(210_389_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(20)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(15)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) @@ -98,16 +104,20 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:3 w:2) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -116,13 +126,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3365` + // Measured: `3762` // Estimated: `8877` - // Minimum execution time: 174_414_000 picoseconds. - Weight::from_parts(178_068_000, 0) + // Minimum execution time: 203_362_000 picoseconds. + Weight::from_parts(209_899_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) @@ -134,16 +144,20 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:0) + /// Storage: `Staking::Bonded` (r:2 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:2 w:2) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -152,13 +166,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3312` - // Estimated: `8799` - // Minimum execution time: 198_864_000 picoseconds. - Weight::from_parts(203_783_000, 0) - .saturating_add(Weight::from_parts(0, 8799)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(13)) + // Measured: `3709` + // Estimated: `6248` + // Minimum execution time: 230_686_000 picoseconds. + Weight::from_parts(237_502_000, 0) + .saturating_add(Weight::from_parts(0, 6248)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) @@ -176,8 +190,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `1138` // Estimated: `4182` - // Minimum execution time: 70_250_000 picoseconds. - Weight::from_parts(72_231_000, 0) + // Minimum execution time: 70_821_000 picoseconds. + Weight::from_parts(72_356_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -194,7 +208,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:1) + /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -202,10 +216,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListNodes` (r:3 w:3) /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) @@ -216,13 +228,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3545` + // Measured: `3341` // Estimated: `8877` - // Minimum execution time: 155_853_000 picoseconds. - Weight::from_parts(161_032_000, 0) + // Minimum execution time: 156_714_000 picoseconds. + Weight::from_parts(158_305_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(20)) - .saturating_add(T::DbWeight::get().writes(13)) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(11)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) @@ -232,23 +244,25 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1744` - // Estimated: `4764` - // Minimum execution time: 62_933_000 picoseconds. - Weight::from_parts(65_847_171, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_476 - .saturating_add(Weight::from_parts(59_648, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `1767` + // Estimated: `4556` + // Minimum execution time: 56_836_000 picoseconds. + Weight::from_parts(59_738_398, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 1_478 + .saturating_add(Weight::from_parts(60_085, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -259,18 +273,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -278,15 +298,15 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2134` - // Estimated: `4764` - // Minimum execution time: 123_641_000 picoseconds. - Weight::from_parts(127_222_589, 0) - .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_493 - .saturating_add(Weight::from_parts(83_361, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(9)) + // Measured: `2405` + // Estimated: `4556` + // Minimum execution time: 136_737_000 picoseconds. + Weight::from_parts(141_757_658, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 2_609 + .saturating_add(Weight::from_parts(84_538, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(11)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) @@ -296,28 +316,38 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Locks` (r:2 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:2 w:1) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:0) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) @@ -326,6 +356,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForRewardPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForSubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForBondedPools` (r:1 w:1) @@ -337,13 +371,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2453` - // Estimated: `8538` - // Minimum execution time: 219_469_000 picoseconds. - Weight::from_parts(227_526_000, 0) - .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(20)) + // Measured: `2809` + // Estimated: `6274` + // Minimum execution time: 241_043_000 picoseconds. + Weight::from_parts(250_578_253, 0) + .saturating_add(Weight::from_parts(0, 6274)) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(26)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -365,16 +399,30 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:1 w:1) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:2 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:2 w:1) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -391,22 +439,28 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1102` - // Estimated: `8538` - // Minimum execution time: 166_466_000 picoseconds. - Weight::from_parts(171_425_000, 0) - .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(23)) - .saturating_add(T::DbWeight::get().writes(17)) + // Measured: `1168` + // Estimated: `6196` + // Minimum execution time: 180_902_000 picoseconds. + Weight::from_parts(187_769_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(23)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) @@ -426,14 +480,14 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1738` + // Measured: `1921` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 59_650_000 picoseconds. - Weight::from_parts(60_620_077, 0) + // Minimum execution time: 78_369_000 picoseconds. + Weight::from_parts(79_277_958, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 7_316 - .saturating_add(Weight::from_parts(1_467_406, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(12)) + // Standard Error: 8_343 + .saturating_add(Weight::from_parts(1_493_255, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) @@ -446,10 +500,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1363` + // Measured: `1406` // Estimated: `4556` - // Minimum execution time: 31_170_000 picoseconds. - Weight::from_parts(32_217_000, 0) + // Minimum execution time: 32_631_000 picoseconds. + Weight::from_parts(33_356_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -465,11 +519,11 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3735` - // Minimum execution time: 12_603_000 picoseconds. - Weight::from_parts(13_241_702, 0) + // Minimum execution time: 12_514_000 picoseconds. + Weight::from_parts(13_232_732, 0) .saturating_add(Weight::from_parts(0, 3735)) - // Standard Error: 116 - .saturating_add(Weight::from_parts(1_428, 0).saturating_mul(n.into())) + // Standard Error: 150 + .saturating_add(Weight::from_parts(2_371, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -489,8 +543,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_608_000 picoseconds. - Weight::from_parts(3_801_000, 0) + // Minimum execution time: 3_107_000 picoseconds. + Weight::from_parts(3_255_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -500,18 +554,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 16_053_000 picoseconds. - Weight::from_parts(16_473_000, 0) + // Minimum execution time: 16_568_000 picoseconds. + Weight::from_parts(17_019_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Staking::Validators` (r:1 w:0) /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -526,12 +584,12 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1901` + // Measured: `2138` // Estimated: `4556` - // Minimum execution time: 57_251_000 picoseconds. - Weight::from_parts(59_390_000, 0) + // Minimum execution time: 73_717_000 picoseconds. + Weight::from_parts(77_030_000, 0) .saturating_add(Weight::from_parts(0, 4556)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -546,8 +604,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `770` // Estimated: `3719` - // Minimum execution time: 29_888_000 picoseconds. - Weight::from_parts(31_056_000, 0) + // Minimum execution time: 30_770_000 picoseconds. + Weight::from_parts(31_556_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -560,8 +618,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `538` // Estimated: `3719` - // Minimum execution time: 15_769_000 picoseconds. - Weight::from_parts(16_579_000, 0) + // Minimum execution time: 16_257_000 picoseconds. + Weight::from_parts(16_891_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -572,8 +630,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 15_385_000 picoseconds. - Weight::from_parts(16_402_000, 0) + // Minimum execution time: 16_548_000 picoseconds. + Weight::from_parts(18_252_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -584,8 +642,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `498` // Estimated: `3719` - // Minimum execution time: 14_965_000 picoseconds. - Weight::from_parts(15_548_000, 0) + // Minimum execution time: 16_085_000 picoseconds. + Weight::from_parts(17_218_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -598,8 +656,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `508` // Estimated: `4182` - // Minimum execution time: 13_549_000 picoseconds. - Weight::from_parts(14_307_000, 0) + // Minimum execution time: 13_648_000 picoseconds. + Weight::from_parts(13_990_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -616,8 +674,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `968` // Estimated: `3719` - // Minimum execution time: 60_153_000 picoseconds. - Weight::from_parts(61_369_000, 0) + // Minimum execution time: 60_321_000 picoseconds. + Weight::from_parts(61_512_000, 0) .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -632,12 +690,135 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `867` + // Measured: `876` // Estimated: `4764` - // Minimum execution time: 64_985_000 picoseconds. - Weight::from_parts(66_616_000, 0) + // Minimum execution time: 65_609_000 picoseconds. + Weight::from_parts(67_320_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `3328` + // Estimated: `4556` + // Minimum execution time: 99_605_000 picoseconds. + Weight::from_parts(101_986_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:0) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `3070` + // Estimated: `4556` + // Minimum execution time: 58_103_000 picoseconds. + Weight::from_parts(59_680_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `1359` + // Estimated: `6196` + // Minimum execution time: 144_098_000 picoseconds. + Weight::from_parts(146_590_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(11)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:2) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `2275` + // Estimated: `6180` + // Minimum execution time: 148_594_000 picoseconds. + Weight::from_parts(152_119_000, 0) + .saturating_add(Weight::from_parts(0, 6180)) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().writes(6)) + } } diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 37b8a99d640a2d23e0d7a532adc0c43b04bba1f2..ad4a053fa3f9e434f8cc3f7ca9ef54f4435eba9c 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -10,7 +10,7 @@ description = "Stores messages other authorities issue about candidates in Polka workspace = true [dependencies] -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index f10f45b0b4fafa4ad59eb075aff0a9e94fc9aefc..2cd8e822ae16b39b4d9465b91b45869b1776dbf8 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -15,7 +15,7 @@ bounded-collections = { version = "0.2.0", default-features = false, features = derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { features = ["alloc", "derive", "rc"], workspace = true } diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9820bd36dc0b1fb84fb6c4e43e4b1608308432a2 --- /dev/null +++ b/polkadot/xcm/docs/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "xcm-docs" +description = "Documentation and guides for XCM" +version = "0.1.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true +authors.workspace = true +edition.workspace = true +publish = false + +[dependencies] +# For XCM stuff +xcm = { path = "../../xcm", package = "staging-xcm" } +xcm-executor = { path = "../../xcm/xcm-executor", package = "staging-xcm-executor" } +xcm-builder = { path = "../../xcm/xcm-builder", package = "staging-xcm-builder" } +xcm-simulator = { path = "../../xcm/xcm-simulator" } +pallet-xcm = { path = "../../xcm/pallet-xcm" } + +# For building FRAME runtimes +frame = { package = "polkadot-sdk-frame", path = "../../../substrate/frame", features = ["experimental", "runtime"] } +codec = { package = "parity-scale-codec", version = "3.6.9" } +scale-info = { version = "2.6.0", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain" } +polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains" } +polkadot-primitives = { path = "../../../polkadot/primitives" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-std = { path = "../../../substrate/primitives/std" } +sp-io = { path = "../../../substrate/primitives/io" } + +# Some pallets +pallet-message-queue = { path = "../../../substrate/frame/message-queue" } +pallet-balances = { path = "../../../substrate/frame/balances" } + +# For building docs +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } +docify = "0.2.6" + +[dev-dependencies] +test-log = "0.2.14" diff --git a/polkadot/xcm/docs/mermaid/location_hierarchy.mmd b/polkadot/xcm/docs/mermaid/location_hierarchy.mmd new file mode 100644 index 0000000000000000000000000000000000000000..54fcfc8072a9aa08032da54f3b4332ef7db5d7c1 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/location_hierarchy.mmd @@ -0,0 +1,9 @@ +flowchart + relay[Relaychain] --> paraA["Parachain(1000)"] + relay --> paraB["Parachain(2000)"] + + paraA --> pallet[Pallet] + pallet --> indexA[Index 1] + pallet --> indexB[Index 2] + + paraA --> account[Account] diff --git a/polkadot/xcm/docs/mermaid/structure.mmd b/polkadot/xcm/docs/mermaid/structure.mmd new file mode 100644 index 0000000000000000000000000000000000000000..17f60467241a351ab3c623b451b8ff4d79df2b9b --- /dev/null +++ b/polkadot/xcm/docs/mermaid/structure.mmd @@ -0,0 +1,4 @@ +flowchart + docs[xcm_docs] --> fundamentals + docs --> guides + docs --> cookbook diff --git a/polkadot/xcm/docs/mermaid/transport_protocols.mmd b/polkadot/xcm/docs/mermaid/transport_protocols.mmd new file mode 100644 index 0000000000000000000000000000000000000000..c0340db0651a3f273bbd35dd5fa51afe15a11c24 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/transport_protocols.mmd @@ -0,0 +1,6 @@ +flowchart + relay[Relaychain] --"DMP"--> paraA["Parachain(2000)"] + relay --"DMP"--> paraB["Parachain(2001)"] + + paraA --"UMP"--> relay + paraB --"UMP"--> relay diff --git a/polkadot/xcm/docs/mermaid/universal_location.mmd b/polkadot/xcm/docs/mermaid/universal_location.mmd new file mode 100644 index 0000000000000000000000000000000000000000..97bfa747319db21dfe07032a3e7fea4d7a54f056 --- /dev/null +++ b/polkadot/xcm/docs/mermaid/universal_location.mmd @@ -0,0 +1,3 @@ +flowchart + universe[Universal Location] --> polkadot[Polkadot] + universe --> ethereum[Ethereum] diff --git a/polkadot/xcm/docs/mermaid/usdt_location.mmd b/polkadot/xcm/docs/mermaid/usdt_location.mmd new file mode 100644 index 0000000000000000000000000000000000000000..5e9222f6098ec900ed18b99a16a51c9e2584ee6e --- /dev/null +++ b/polkadot/xcm/docs/mermaid/usdt_location.mmd @@ -0,0 +1,6 @@ +flowchart + relay[Polkadot] --> assetHub["Asset Hub"] + relay --> anotherPara["Another parachain"] + + assetHub --> assetsPallet["Assets Pallet"] + assetsPallet --> usdt[1984] diff --git a/polkadot/xcm/docs/src/cookbook/mod.rs b/polkadot/xcm/docs/src/cookbook/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..1c69bf0ead6f81e4a11010cbf314f9638dda9ccb --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/mod.rs @@ -0,0 +1,27 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Cookbook +//! +//! A collection of XCM recipes. +//! +//! Each recipe is tested and explains all the code necessary to run it -- they're not just snippets +//! to copy and paste. + +/// Configuring a parachain that only uses the Relay Chain native token. +/// In the case of Polkadot, this recipe will show you how to launch a parachain with no native +/// token -- dealing only on DOT. +pub mod relay_token_transactor; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..279dd71a35f7410b6a6899e5c018c4f55ca18d15 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/mod.rs @@ -0,0 +1,51 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Relay Asset Transactor +//! +//! This example shows how to configure a parachain to only deal with the Relay Chain token. +//! +//! The first step is using the [`xcm_builder::FungibleAdapter`] to create an `AssetTransactor` that +//! can handle the relay chain token. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/parachain/xcm_config.rs", asset_transactor)] +//! +//! The second step is to configure `IsReserve` to recognize the relay chain as a reserve for its +//! own asset. +//! With this, you'll be able to easily mint a derivative asset, backed one-to-one from the Relay +//! Chain, by using the xcm pallet's `transfer_assets` extrinsic. +//! +//! The `IsReserve` type takes a type that implements `ContainsPair`. +//! In this case, we want a type that contains the pair `(relay_chain_native_token, relay_chain)`. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/parachain/xcm_config.rs", is_reserve)] +//! +//! With this setup, we are able to do a reserve asset transfer to and from the parachain and relay +//! chain. +#![doc = docify::embed!("src/cookbook/relay_token_transactor/tests.rs", reserve_asset_transfers_work)] +//! +//! For the rest of the code, be sure to check the contents of this module. + +/// The parachain runtime for this example +pub mod parachain; + +/// The relay chain runtime for this example. +pub mod relay_chain; + +/// The network for this example. +pub mod network; + +/// Tests for this example. +#[cfg(test)] +pub mod tests; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs new file mode 100644 index 0000000000000000000000000000000000000000..46ac0e5df6372babf84b3494436e554353ba2820 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs @@ -0,0 +1,90 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mock network + +use frame::deps::{ + frame_system, + sp_io::TestExternalities, + sp_runtime::{AccountId32, BuildStorage}, +}; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +use super::{parachain, relay_chain}; + +pub const ALICE: AccountId32 = AccountId32::new([0u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([1u8; 32]); +pub const UNITS: u64 = 10_000_000_000; +pub const CENTS: u64 = 100_000_000; +pub const INITIAL_BALANCE: u64 = UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MessageQueue, + DmpMessageHandler = parachain::MessageQueue, + new_ext = para_ext(), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (2222, ParaA), + ], + } +} + +pub fn para_ext() -> TestExternalities { + use parachain::{MessageQueue, Runtime, System}; + + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = frame::deps::sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + MessageQueue::set_para_id(2222.into()); + }); + ext +} + +pub fn relay_ext() -> TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { balances: vec![(ALICE, INITIAL_BALANCE)] } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e3fdda2e733376ca3eb780acbc17256c12e6acac --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/mod.rs @@ -0,0 +1,56 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Runtime + +use frame::{deps::frame_system, prelude::*, runtime::prelude::*, traits::IdentityLookup}; +use xcm_executor::XcmExecutor; +use xcm_simulator::mock_message_queue; + +mod xcm_config; +use xcm_config::XcmConfig; + +pub type Block = frame_system::mocking::MockBlock; +pub type AccountId = frame::deps::sp_runtime::AccountId32; +pub type Balance = u64; + +construct_runtime! { + pub struct Runtime { + System: frame_system, + MessageQueue: mock_message_queue, + Balances: pallet_balances, + XcmPallet: pallet_xcm, + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type AccountData = pallet_balances::AccountData; +} + +impl mock_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type AccountStore = System; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..99f17693093e7f0472d78caf54f842847a8a3e84 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -0,0 +1,189 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Configuration + +use frame::{ + deps::frame_system, + runtime::prelude::*, + traits::{Everything, Nothing}, +}; +use xcm::v4::prelude::*; +use xcm_builder::{ + AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + SignedToAccountId32, +}; +use xcm_executor::XcmExecutor; + +use super::{AccountId, Balances, MessageQueue, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin}; + +parameter_types! { + pub RelayLocation: Location = Location::parent(); + pub ThisNetwork: NetworkId = NetworkId::Polkadot; +} + +pub type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +/// Configuration related to asset transactors +#[docify::export] +mod asset_transactor { + use super::*; + + parameter_types! { + pub ParentRelayLocation: Location = Location::parent(); + } + + /// AssetTransactor for handling the relay chain token + pub type FungibleTransactor = FungibleAdapter< + // Use this implementation of the `fungible::*` traits. + // `Balances` is the name given to the balances pallet in this particular recipe. + // Any implementation of the traits would suffice. + Balances, + // This transactor deals with the native token of the Relay Chain. + // This token is referenced by the Location of the Relay Chain relative to this chain + // -- Location::parent(). + IsConcrete, + // How to convert an XCM Location into a local account id. + // This is also something that's configured in the XCM executor. + LocationToAccountId, + // The type for account ids, only needed because `fungible` is generic over it. + AccountId, + // Not tracking teleports. + // This recipe only uses reserve asset transfers to handle the Relay Chain token. + (), + >; + + /// Actual configuration item that'll be set in the XCM config. + /// A tuple could be used here to have multiple transactors, each (potentially) handling + /// different assets. + /// In this recipe, we only have one. + pub type AssetTransactor = FungibleTransactor; +} + +/// Configuration related to token reserves +#[docify::export] +mod is_reserve { + use super::*; + + parameter_types! { + /// Reserves are specified using a pair `(AssetFilter, Location)`. + /// Each pair means that the specified Location is a reserve for all the assets in AssetsFilter. + /// Here, we are specifying that the Relay Chain is the reserve location for its native token. + pub RelayTokenForRelay: (AssetFilter, Location) = + (Wild(AllOf { id: AssetId(Parent.into()), fun: WildFungible }), Parent.into()); + } + + /// The wrapper type xcm_builder::Case is needed in order to use this in the configuration. + pub type IsReserve = xcm_builder::Case; +} + +mod weigher { + use super::*; + use xcm_builder::FixedWeightBounds; + + parameter_types! { + pub const WeightPerInstruction: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; + } + + pub type Weigher = FixedWeightBounds; +} + +parameter_types! { + pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Polkadot), Parachain(2222)].into(); +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = (); + // The declaration of which Locations are reserves for which Assets. + type IsReserve = is_reserve::IsReserve; + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + // This is not safe, you should use `xcm_builder::AllowTopLevelPaidExecutionFrom` in a + // production chain + type Barrier = xcm_builder::AllowUnpaidExecutionFrom; + type Weigher = weigher::Weigher; + type Trader = (); + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = frame::traits::ConstU32<1>; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + // We turn off sending for these tests + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = super::super::network::ParachainXcmRouter; // Provided by xcm-simulator + // Anyone can execute XCM programs + type ExecuteXcmOrigin = EnsureXcmOrigin; + // We execute any type of program + type XcmExecuteFilter = Everything; + // How we execute programs + type XcmExecutor = XcmExecutor; + // We don't allow teleports + type XcmTeleportFilter = Nothing; + // We allow all reserve transfers + type XcmReserveTransferFilter = Everything; + // Same weigher executor uses to weigh XCM programs + type Weigher = weigher::Weigher; + // Same universal location + type UniversalLocation = UniversalLocation; + // No version discovery needed + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; + type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdminOrigin = frame_system::EnsureRoot; + // No locking + type TrustedLockers = (); + type MaxLockers = frame::traits::ConstU32<0>; + type MaxRemoteLockConsumers = frame::traits::ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + // How to turn locations into accounts + type SovereignAccountOf = LocationToAccountId; + // A currency to pay for things and its matcher, we are using the relay token + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + // Pallet benchmarks, no need for this recipe + type WeightInfo = pallet_xcm::TestWeightInfo; + // Runtime types + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..25c35dd4aaa83c12b0176ea528d09f7d84bc87ce --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -0,0 +1,103 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame::{ + deps::{frame_support::weights::WeightMeter, sp_runtime::AccountId32}, + prelude::*, + runtime::prelude::*, + traits::{IdentityLookup, ProcessMessage, ProcessMessageError}, +}; +use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; +use xcm::v4::prelude::*; + +mod xcm_config; +pub use xcm_config::LocationToAccountId; +use xcm_config::XcmConfig; + +pub type AccountId = AccountId32; +pub type Balance = u64; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] +impl pallet_balances::Config for Runtime { + type AccountStore = System; +} + +type Block = frame_system::mocking::MockBlock; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type QueuePausedQuery = (); + type WeightInfo = (); + type IdleMaxServiceWeight = MessageQueueServiceWeight; +} + +construct_runtime! { + pub struct Runtime { + System: frame_system, + Balances: pallet_balances, + MessageQueue: pallet_message_queue, + XcmPallet: pallet_xcm, + } +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..987bb3f9ab6649bc299edafa97dc1d06166db440 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -0,0 +1,163 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain XCM configuration + +use frame::{ + deps::frame_system, + runtime::prelude::*, + traits::{Everything, Nothing}, +}; +use xcm::v4::prelude::*; +use xcm_builder::{ + AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, + SignedToAccountId32, +}; +use xcm_executor::XcmExecutor; + +use super::{AccountId, Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin}; + +parameter_types! { + pub HereLocation: Location = Location::here(); + pub ThisNetwork: NetworkId = NetworkId::Polkadot; +} + +/// Converter from XCM Locations to accounts. +/// This generates sovereign accounts for Locations and converts +/// local AccountId32 junctions to local accounts. +pub type LocationToAccountId = ( + HashedDescription>, + AccountId32Aliases, +); + +mod asset_transactor { + use super::*; + + /// AssetTransactor for handling the Relay Chain token. + pub type FungibleTransactor = FungibleAdapter< + // Use this `fungible` implementation. + Balances, + // This transactor handles the native token. + IsConcrete, + // How to convert an XCM Location into a local account id. + // Whenever assets are handled, the location is turned into an account. + // This account is the one where balances are withdrawn/deposited. + LocationToAccountId, + // The account id type, needed because `fungible` is generic over it. + AccountId, + // Not tracking teleports. + (), + >; + + /// All asset transactors, in this case only one + pub type AssetTransactor = FungibleTransactor; +} + +mod weigher { + use super::*; + use xcm_builder::FixedWeightBounds; + + parameter_types! { + pub const WeightPerInstruction: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 100; + } + + pub type Weigher = FixedWeightBounds; +} + +parameter_types! { + pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Polkadot)].into(); +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); + type AssetTransactor = asset_transactor::AssetTransactor; + type OriginConverter = (); + // We don't need to recognize anyone as a reserve + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + // This is not safe, you should use `xcm_builder::AllowTopLevelPaidExecutionFrom` in a + // production chain + type Barrier = xcm_builder::AllowUnpaidExecutionFrom; + type Weigher = weigher::Weigher; + type Trader = (); + type ResponseHandler = (); + type AssetTrap = (); + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type FeeManager = (); + type MaxAssetsIntoHolding = frame::traits::ConstU32<1>; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + // No one can call `send` + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = super::super::network::RelayChainXcmRouter; // Provided by xcm-simulator + // Anyone can execute XCM programs + type ExecuteXcmOrigin = EnsureXcmOrigin; + // We execute any type of program + type XcmExecuteFilter = Everything; + // How we execute programs + type XcmExecutor = XcmExecutor; + // We don't allow teleports + type XcmTeleportFilter = Nothing; + // We allow all reserve transfers. + // This is so it can act as a reserve for its native token. + type XcmReserveTransferFilter = Everything; + // Same weigher executor uses to weigh XCM programs + type Weigher = weigher::Weigher; + // Same universal location + type UniversalLocation = UniversalLocation; + // No version discovery needed + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; + type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdminOrigin = frame_system::EnsureRoot; + // No locking + type TrustedLockers = (); + type MaxLockers = frame::traits::ConstU32<0>; + type MaxRemoteLockConsumers = frame::traits::ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + // How to turn locations into accounts + type SovereignAccountOf = LocationToAccountId; + // A currency to pay for things and its matcher, we are using the relay token + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + // Pallet benchmarks, no need for this example + type WeightInfo = pallet_xcm::TestWeightInfo; + // Runtime types + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; +} diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..792cf6149e7cb418f5ffa5720f41ae44956ff036 --- /dev/null +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs @@ -0,0 +1,128 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame::testing_prelude::*; +use test_log::test; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::TestExt; + +use super::{ + network::{MockNet, ParaA, Relay, ALICE, BOB, CENTS, INITIAL_BALANCE}, + parachain, relay_chain, +}; + +#[docify::export] +#[test] +fn reserve_asset_transfers_work() { + // Scenario: + // ALICE on the relay chain holds some of Relay Chain's native tokens. + // She transfers them to BOB's account on the parachain using a reserve transfer. + // BOB receives Relay Chain native token derivatives on the parachain, + // which are backed one-to-one with the real tokens on the Relay Chain. + // + // NOTE: We could've used ALICE on both chains because it's a different account, + // but using ALICE and BOB makes it clearer. + + // We restart the mock network. + MockNet::reset(); + + // ALICE starts with INITIAL_BALANCE on the relay chain + Relay::execute_with(|| { + assert_eq!(relay_chain::Balances::free_balance(&ALICE), INITIAL_BALANCE); + }); + + // BOB starts with 0 on the parachain + ParaA::execute_with(|| { + assert_eq!(parachain::Balances::free_balance(&BOB), 0); + }); + + // ALICE on the Relay Chain sends some Relay Chain native tokens to BOB on the parachain. + // The transfer is done with the `transfer_assets` extrinsic in the XCM pallet. + // The extrinsic figures out it should do a reserve asset transfer + // with the local chain as reserve. + Relay::execute_with(|| { + // The parachain id is specified in the network.rs file in this recipe. + let destination: Location = Parachain(2222).into(); + let beneficiary: Location = + AccountId32 { id: BOB.clone().into(), network: Some(NetworkId::Polkadot) }.into(); + // We need to use `u128` here for the conversion to work properly. + // If we don't specify anything, it will be a `u64`, which the conversion + // will turn into a non-fungible token instead of a fungible one. + let assets: Assets = (Here, 50u128 * CENTS as u128).into(); + assert_ok!(relay_chain::XcmPallet::transfer_assets( + relay_chain::RuntimeOrigin::signed(ALICE), + Box::new(VersionedLocation::V4(destination.clone())), + Box::new(VersionedLocation::V4(beneficiary)), + Box::new(VersionedAssets::V4(assets)), + 0, + WeightLimit::Unlimited, + )); + + // ALICE now has less Relay Chain tokens. + assert_eq!(relay_chain::Balances::free_balance(&ALICE), INITIAL_BALANCE - 50 * CENTS); + + // The funds of the sovereign account of the parachain increase by 50 cents, + // the ones transferred over to BOB. + // The funds in this sovereign account represent how many Relay Chain tokens + // have been sent to this parachain. + // If the parachain wants to send those assets somewhere else they have to go + // via the reserve, and this balance is updated accordingly. + // This is why the derivatives are backed one-to-one. + let parachains_sovereign_account = + relay_chain::LocationToAccountId::convert_location(&destination).unwrap(); + assert_eq!(relay_chain::Balances::free_balance(parachains_sovereign_account), 50 * CENTS); + }); + + ParaA::execute_with(|| { + // On the parachain, BOB has received the derivative tokens + assert_eq!(parachain::Balances::free_balance(&BOB), 50 * CENTS); + + // BOB gives back half to ALICE in the relay chain + let destination: Location = Parent.into(); + let beneficiary: Location = + AccountId32 { id: ALICE.clone().into(), network: Some(NetworkId::Polkadot) }.into(); + // We specify `Parent` because we are referencing the Relay Chain token. + // This chain doesn't have a token of its own, so we always refer to this token, + // and we do so by the Location of the Relay Chain. + let assets: Assets = (Parent, 25u128 * CENTS as u128).into(); + assert_ok!(parachain::XcmPallet::transfer_assets( + parachain::RuntimeOrigin::signed(BOB), + Box::new(VersionedLocation::V4(destination)), + Box::new(VersionedLocation::V4(beneficiary)), + Box::new(VersionedAssets::V4(assets)), + 0, + WeightLimit::Unlimited, + )); + + // BOB's balance decreased + assert_eq!(parachain::Balances::free_balance(&BOB), 25 * CENTS); + }); + + Relay::execute_with(|| { + // ALICE's balance increases + assert_eq!( + relay_chain::Balances::free_balance(&ALICE), + INITIAL_BALANCE - 50 * CENTS + 25 * CENTS + ); + + // The funds in the parachain's sovereign account decrease. + let parachain: Location = Parachain(2222).into(); + let parachains_sovereign_account = + relay_chain::LocationToAccountId::convert_location(¶chain).unwrap(); + assert_eq!(relay_chain::Balances::free_balance(parachains_sovereign_account), 25 * CENTS); + }); +} diff --git a/polkadot/xcm/docs/src/fundamentals.rs b/polkadot/xcm/docs/src/fundamentals.rs new file mode 100644 index 0000000000000000000000000000000000000000..28899df801aa4f533cdfedaea8815804dc8bd7c2 --- /dev/null +++ b/polkadot/xcm/docs/src/fundamentals.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Fundamentals +//! +//! XCM standardizes usual actions users take in consensus systems, for example +//! dealing with assets locally, on other chains, and locking them. +//! XCM programs can both be executed locally or sent to a different consensus system. +//! Examples of consensus systems are blockchains and smart contracts. +//! +//! The goal of XCM is to allow multi-chain ecosystems to thrive via specialization. +//! Very specific functionalities can be abstracted away and standardized in this common language. +//! Then, every member of the ecosystem can implement the subset of the language that makes sense +//! for them. +//! +//! The language evolves over time to accomodate the needs of the community +//! via the [RFC process](https://github.com/paritytech/xcm-format/blob/master/proposals/0032-process.md). +//! +//! XCM is the language, it deals with interpreting and executing programs. +//! It does not deal with actually **sending** these programs from one consensus system to another. +//! This responsibility falls to a transport protocol. +//! XCM can even be interpreted on the local system, with no need of a transport protocol. +//! However, automatic and composable workflows can be achieved via the use of one. +//! +//! At the core of XCM lies the XCVM, the Cross-Consensus Virtual Machine. +//! It's the virtual machine that executes XCM programs. +//! It is a specification that comes with the language. +//! +//! For these docs, we'll use a Rust implementation of XCM and the XCVM, consisting of the following +//! parts: +//! - [`XCM`](xcm): Holds the definition of an XCM program, the instructions and main concepts. +//! - [`Executor`](xcm_executor): Implements the XCVM, capable of executing XCMs. Highly +//! configurable. +//! - [`Builder`](xcm_builder): A collection of types used to configure the executor. +//! - [`XCM Pallet`](pallet_xcm): A FRAME pallet for interacting with the executor. +//! - [`Simulator`](xcm_simulator): A playground to tinker with different XCM programs and executor +//! configurations. +//! +//! XCM programs are composed of Instructions, which reference Locations and Assets. +//! +//! ## Locations +//! +//! Locations are XCM's vocabulary of places we want to talk about in our XCM programs. +//! They are used to reference things like 32-byte accounts, governance bodies, smart contracts, +//! blockchains and more. +//! +//! Locations are hierarchical. +//! This means some places in consensus are wholly encapsulated in other places. +//! Say we have two systems A and B. +//! If any change in A's state implies a change in B's state, then we say A is interior to B. +#![doc = simple_mermaid::mermaid!("../mermaid/location_hierarchy.mmd")] +//! +//! Parachains are interior to their Relay Chain, since a change in their state implies a change in +//! the Relay Chain's state. +//! +//! Because of this hierarchy, the way we represent locations is with both a number of **parents**, +//! times we move __up__ the hierarchy, and a sequence of **junctions**, the steps we take __down__ +//! the hierarchy after going up the specified number of parents. +//! +//! In Rust, this is specified with the following datatype: +//! ```ignore +//! pub struct Location { +//! parents: u8, +//! interior: Junctions, +//! } +//! ``` +//! +//! Many junctions are available; parachains, pallets, 32 and 20 byte accounts, governance bodies, +//! and arbitrary indices are the most common. +//! A full list of available junctions can be found in the [format](https://github.com/paritytech/xcm-format#interior-locations--junctions) +//! and [Junction enum](xcm::v4::prelude::Junction). +//! +//! We'll use a file system notation to represent locations, and start with relative locations. +//! In the diagram, the location of parachain 1000 as seen from all other locations is as follows: +//! - From the relaychain: `Parachain(1000)` +//! - From parachain 1000 itself: `Here` +//! - From parachain 2000: `../Parachain(1000)` +//! +//! Relative locations are interpreted by the system that is executing an XCM program, which is the +//! receiver of a message in the case where it's sent. +//! +//! Locations can also be absolute. +//! Keeping in line with our filesystem analogy, we can imagine the root of our filesystem to exist. +//! This would be a location with no parents, that is also the parent of all systems that derive +//! their own consensus, say Polkadot or Ethereum or Bitcoin. +//! Such a location does not exist concretely, but we can still use this definition for it. +//! This is the **universal location**. +//! We need the universal location to be able to describe locations in an absolute way. +#![doc = simple_mermaid::mermaid!("../mermaid/universal_location.mmd")] +//! +//! Here, the absolute location of parachain 1000 would be +//! `GlobalConsensus(Polkadot)/Parachain(1000)`. +//! +//! ## Assets +//! +//! We want to be able to reference assets in our XCM programs, if only to be able to pay for fees. +//! Assets are represented using locations. +//! +//! The native asset of a chain is represented by the location of that chain. +//! For example, DOT is represented by the location of the Polkadot relaychain. +//! If the interpreting chain has its own asset, it would be represented by `Here`. +//! +//! How do we represent other assets? +//! The asset hub system parachain in Polkadot, for example, holds a lot of assets. +//! To represent each of them, it uses the indices we mentioned, and it makes them interior to the +//! assets pallet instance it uses. +//! USDT, an example asset that lives on asset hub, is identified by the location +//! `Parachain(1000)/PalletInstance(53)/GeneralIndex(1984)`, when seen from the Polkadot relaychain. +#![doc = simple_mermaid::mermaid!("../mermaid/usdt_location.mmd")] +//! +//! Asset Hub also has another type of assets called `ForeignAssets`. +//! These assets are identified by the XCM Location to their origin. +//! Two such assets are a Parachain asset, like Moonbeam's GLMR, and KSM, from the cousin Kusama +//! network. These are represented as `../Parachain(2004)/PalletInstance(10)` and +//! `../../GlobalConsensus(Kusama)` respectively. +//! +//! The whole type can be seen in the [format](https://github.com/paritytech/xcm-format#6-universal-asset-identifiers) +//! and [rust docs](xcm::v4::prelude::Asset). +//! +//! ## Instructions +//! +//! Given the vocabulary to talk about both locations -- chains and accounts -- and assets, we now +//! need a way to express what we want the consensus system to do when executing our programs. +//! We need a way of writing our programs. +//! +//! XCM programs are composed of a sequence of instructions. +//! +//! All available instructions can be seen in the [format](https://github.com/paritytech/xcm-format#5-the-xcvm-instruction-set) +//! and the [Instruction enum](xcm::v4::prelude::Instruction). +//! +//! A very simple example is the following: +//! +//! ```ignore +//! let message = Xcm(vec![ +//! TransferAsset { assets, beneficiary }, +//! ]); +//! ``` +//! +//! This instruction is enough to transfer `assets` from the account of the **origin** of a message +//! to the `beneficiary` account. However, because of XCM's generality, fees need to be paid +//! explicitly. This next example sheds more light on this: +//! +//! ```ignore +//! let message = Xcm(vec![ +//! WithdrawAsset(assets), +//! BuyExecution { fees: assets, weight_limit }, +//! DepositAsset { assets: AssetFilter(Wild(All)), beneficiary }, +//! ]); +//! ``` +//! +//! Here we see the process of transferring assets was broken down into smaller instructions, and we +//! add the explicit fee payment step in the middle. +//! `WithdrawAsset` withdraws assets from the account of the **origin** of the message for usage +//! inside this message's execution. `BuyExecution` explicitly buys execution for this program using +//! the assets specified in `fees`, with a sanity check of `weight_limit`. `DepositAsset` uses a +//! wildcard, specifying all remaining `assets` after subtracting the fees and a `beneficiary` +//! account. +//! +//! ## Next steps +//! +//! Continue with the [guides](crate::guides) for step-by-step tutorials on XCM, +//! or jump to the [cookbook](crate::cookbook) to see examples. +//! +//! The [glossary](crate::glossary) can be useful if some of the terms are confusing. diff --git a/polkadot/xcm/docs/src/glossary.rs b/polkadot/xcm/docs/src/glossary.rs new file mode 100644 index 0000000000000000000000000000000000000000..6035888ab733b2751a4f24d51336132111d6ef57 --- /dev/null +++ b/polkadot/xcm/docs/src/glossary.rs @@ -0,0 +1,123 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # Glossary +//! +//! ## XCM (Cross-Consensus Messaging) +//! +//! A messaging format meant to communicate intentions between consensus systems. +//! XCM could also refer to a single message. +//! +//! ## Instructions +//! +//! XCMs are composed of a sequence of instructions. +//! Each instruction aims to convey a particular intention. +//! There are instructions for transferring and locking assets, handling fees, calling arbitrary +//! blobs, and more. +//! +//! ## Consensus system +//! +//! A system that can reach any kind of consensus. +//! For example, relay chains, parachains, smart contracts. +//! Most messaging between consensus systems has to be done asynchronously, for this, XCM is used. +//! Between two smart contracts on the same parachain, however, communication can be done +//! synchronously. +//! +//! ## [`Location`](xcm::v4::prelude::Location) +//! +//! A way of addressing consensus systems. +//! These could be relative or absolute. +//! +//! ## [`Junction`](xcm::v4::prelude::Junction) +//! +//! The different ways of descending down a [`Location`](xcm::v4::prelude::Location) hierarchy. +//! A junction can be a Parachain, an Account, or more. +//! +//! ## [`Asset`](xcm::v4::prelude::Asset) +//! +//! A way of identifying assets in the same or another consensus system, by using a +//! [`Location`](xcm::v4::prelude::Location). +//! +//! ## Sovereign account +//! +//! An account in a consensus system that is controlled by an account in another consensus system. +//! +//! Runtimes use a converter between a [`Location`](xcm::v4::prelude::Location) and an account. +//! These converters implement the [`ConvertLocation`](xcm_executor::traits::ConvertLocation) trait. +//! +//! ## Teleport +//! +//! A way of transferring assets between two consensus systems without the need of a third party. +//! It consists of the sender system burning the asset that wants to be sent over and the recipient +//! minting an equivalent amount of that asset. It requires a lot of trust between the two systems, +//! since failure to mint or burn will reduce or increase the total issuance of the token. +//! +//! ## Reserve asset transfer +//! +//! A way of transferring assets between two consensus systems that don't trust each other, by using +//! a third system they both trust, called the reserve. The real asset only exists on the reserve, +//! both sender and recipient only deal with derivatives. It consists of the sender burning a +//! certain amount of derivatives, telling the reserve to move real assets from its sovereign +//! account to the destination's sovereign account, and then telling the recipient to mint the right +//! amount of derivatives. +//! In practice, the reserve chain can also be one of the source or destination. +//! +//! ## XCVM +//! +//! The virtual machine behind XCM. +//! Every XCM is an XCVM programme. +//! Holds state in registers. +//! +//! An implementation of the virtual machine is the [`xcm-executor`](xcm_executor::XcmExecutor). +//! +//! ## Holding register +//! +//! An XCVM register used to hold arbitrary `Asset`s during the execution of an XCVM programme. +//! +//! ## Barrier +//! +//! An XCM executor configuration item that works as a firewall for incoming XCMs. +//! All XCMs have to pass the barrier to be executed, else they are dropped. +//! It can be used for whitelisting only certain types or messages or messages from certain senders. +//! +//! Lots of barrier definitions exist in [`xcm-builder`](xcm_builder). +//! +//! ## VMP (Vertical Message Passing) +//! +//! Umbrella term for both UMP (Upward Message Passing) and DMP (Downward Message Passing). +//! +//! The following diagram shows the uses of both protocols: +#![doc = simple_mermaid::mermaid!("../mermaid/transport_protocols.mmd")] +//! +//! ## UMP (Upward Message Passing) +//! +//! Transport-layer protocol that allows parachains to send messages upwards to their relay chain. +//! +//! ## DMP (Downward Message Passing) +//! +//! Transport-layer protocol that allows the relay chain to send messages downwards to one of their +//! parachains. +//! +//! ## XCMP (Cross-Consensus Message Passing) +//! +//! Transport-layer protocol that allows parachains to send messages between themselves, without +//! going through the relay chain. +//! +//! ## HRMP (Horizontal Message Passing) +//! +//! Transport-layer protocol that allows a parachain to send messages to a sibling parachain going +//! through the relay chain. It's a precursor to XCMP, also known as XCMP-lite. +//! It uses a mixture of UMP and DMP. diff --git a/polkadot/xcm/docs/src/guides/mod.rs b/polkadot/xcm/docs/src/guides/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..5af89428d9a4c570831c17f48b5974e5ded5fd58 --- /dev/null +++ b/polkadot/xcm/docs/src/guides/mod.rs @@ -0,0 +1,25 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Guides +//! +//! These guides aim to get you up and running with XCM. +//! +//! Coming soon. +//! +//! ## Next steps +//! +//! Jump to the [cookbook](crate::cookbook) for different examples. diff --git a/polkadot/xcm/docs/src/lib.rs b/polkadot/xcm/docs/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..287c97140c91b371765257475b0d7a3ade2e2f06 --- /dev/null +++ b/polkadot/xcm/docs/src/lib.rs @@ -0,0 +1,63 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! # XCM Docs +//! +//! Documentation and guides for XCM +//! +//! Welcome to the Cross-Consensus Messaging documentation! +//! +//! XCM is a **language** for communicating **intentions** between **consensus systems**. +//! Whether you're a developer, a blockchain enthusiast, or just interested in Polkadot, this guide +//! aims to provide you with an easy-to-understand and comprehensive introduction to XCM. +//! +//! ## Getting started +//! +//! Head over to the [fundamentals](fundamentals) section. +//! Then, go to the [guides](guides), to learn about how to do things with XCM. +//! +//! ## Cookbook +//! +//! There's also the [cookbook](cookbook) for useful recipes for XCM. +//! +//! ## Glossary +//! +//! There's a [glossary](glossary) with common terms used throughout the docs. +//! +//! ## Contribute +//! +//! To contribute to the format, check out the [RFC process](https://github.com/paritytech/xcm-format/blob/master/proposals/0032-process.md). +//! To contribute to these docs, [make a PR](https://github.com/paritytech/polkadot-sdk). +//! +//! ## Why Rust Docs? +//! +//! Rust Docs allow docs to be as close to the source as possible. +//! They're also available offline automatically for anyone who has the `polkadot-sdk` repo locally. +//! +//! ## Docs structure +#![doc = simple_mermaid::mermaid!("../mermaid/structure.mmd")] + +/// Fundamentals of the XCM language. The virtual machine, instructions, locations and assets. +pub mod fundamentals; + +/// Step-by-step guides to set up an XCM environment and start hacking. +pub mod guides; + +/// Useful recipes for programs and configurations. +pub mod cookbook; + +/// Glossary +pub mod glossary; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 9691ddd48168e5e8339353a10c475ad263ce5f14..8bf3b9abf66349834b3c2b7d7cd367e4b0b835d8 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -13,7 +13,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index fc4d23426fbcce3f5da855c4bc157b6dd986376d..6f9b389ab6f12db50c27fab0f5f20961c7f171c6 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 081a4235b7794b72beda33895c2e5e5c9577d219..da46a6a37c0654f6ef34daf7bbd37cd5b655ac45 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -15,12 +15,11 @@ // along with Polkadot. If not, see . use super::*; -use bounded_collections::{ConstU32, WeakBoundedVec}; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; -use xcm::{latest::prelude::*, v2}; +use xcm::latest::prelude::*; use xcm_builder::EnsureDelivery; use xcm_executor::traits::FeeReason; @@ -313,15 +312,17 @@ benchmarks! { } notify_target_migration_fail { - let bad_loc: v2::MultiLocation = v2::Junction::Plurality { - id: v2::BodyId::Named(WeakBoundedVec::>::try_from(vec![0; 32]) - .expect("vec has a length of 32 bits; qed")), - part: v2::BodyPart::Voice, - } - .into(); - let bad_loc = VersionedLocation::from(bad_loc); + let newer_xcm_version = xcm::prelude::XCM_VERSION; + let older_xcm_version = newer_xcm_version - 1; + let bad_location: Location = Plurality { + id: BodyId::Unit, + part: BodyPart::Voice, + }.into(); + let bad_location = VersionedLocation::from(bad_location) + .into_version(older_xcm_version) + .expect("Version convertion should work"); let current_version = T::AdvertisedXcmVersion::get(); - VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); + VersionNotifyTargets::::insert(current_version, bad_location, (0, Weight::zero(), current_version)); }: { crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 37fc121ba2174c95f3a8ac82131d525ff8279f48..8f67e6e7d949693e25556af272ea73e443b620df 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -29,7 +29,9 @@ pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchErrorWithPostInfo, GetDispatchInfo, WithPostDispatchInfo}, + dispatch::{ + DispatchErrorWithPostInfo, GetDispatchInfo, PostDispatchInfo, WithPostDispatchInfo, + }, pallet_prelude::*, traits::{ Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, @@ -50,18 +52,22 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, - SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, InspectMessageQueues, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, - TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, + RecordXcm, TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, + XcmAssetTransfers, }, AssetsInHolding, }; -use xcm_fee_payment_runtime_api::fees::Error as XcmPaymentApiError; +use xcm_fee_payment_runtime_api::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; #[cfg(any(feature = "try-runtime", test))] use sp_runtime::TryRuntimeError; @@ -2432,6 +2438,100 @@ impl Pallet { AccountIdConversion::::into_account_truncating(&ID) } + /// Dry-runs `call` with the given `origin`. + /// + /// Returns not only the call result and events, but also the local XCM, if any, + /// and any XCMs forwarded to other locations. + /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + pub fn dry_run_call( + origin: OriginCaller, + call: RuntimeCall, + ) -> Result::RuntimeEvent>, XcmDryRunApiError> + where + Runtime: crate::Config, + Router: InspectMessageQueues, + RuntimeCall: Dispatchable, + ::RuntimeOrigin: From, + { + crate::Pallet::::set_record_xcm(true); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = call.dispatch(origin.into()); + crate::Pallet::::set_record_xcm(false); + let local_xcm = crate::Pallet::::recorded_xcm(); + let forwarded_xcms = Router::get_messages(); + let events: Vec<::RuntimeEvent> = + frame_system::Pallet::::read_events_no_consensus() + .map(|record| record.event.clone()) + .collect(); + Ok(CallDryRunEffects { + local_xcm: local_xcm.map(VersionedXcm::<()>::from), + forwarded_xcms, + emitted_events: events, + execution_result: result, + }) + } + + /// Dry-runs `xcm` with the given `origin_location`. + /// + /// Returns execution result, events, and any forwarded XCMs to other locations. + /// Meant to be used in the `xcm_fee_payment_runtime_api::dry_run::DryRunApi` runtime API. + pub fn dry_run_xcm( + origin_location: VersionedLocation, + xcm: VersionedXcm, + ) -> Result::RuntimeEvent>, XcmDryRunApiError> + where + Runtime: frame_system::Config, + Router: InspectMessageQueues, + XcmConfig: xcm_executor::Config, + { + let origin_location: Location = origin_location.try_into().map_err(|error| { + log::error!( + target: "xcm::DryRunApi::dry_run_xcm", + "Location version conversion failed with error: {:?}", + error, + ); + XcmDryRunApiError::VersionedConversionFailed + })?; + let xcm: Xcm = xcm.try_into().map_err(|error| { + log::error!( + target: "xcm::DryRunApi::dry_run_xcm", + "Xcm version conversion failed with error {:?}", + error, + ); + XcmDryRunApiError::VersionedConversionFailed + })?; + let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); + frame_system::Pallet::::reset_events(); // To make sure we only record events from current call. + let result = xcm_executor::XcmExecutor::::prepare_and_execute( + origin_location, + xcm, + &mut hash, + Weight::MAX, // Max limit available for execution. + Weight::zero(), + ); + let forwarded_xcms = Router::get_messages(); + let events: Vec<::RuntimeEvent> = + frame_system::Pallet::::read_events_no_consensus() + .map(|record| record.event.clone()) + .collect(); + Ok(XcmDryRunEffects { forwarded_xcms, emitted_events: events, execution_result: result }) + } + + /// Given a list of asset ids, returns the correct API response for + /// `XcmPaymentApi::query_acceptable_payment_assets`. + /// + /// The assets passed in have to be supported for fee payment. + pub fn query_acceptable_payment_assets( + version: xcm::Version, + asset_ids: Vec, + ) -> Result, XcmPaymentApiError> { + Ok(asset_ids + .into_iter() + .map(|asset_id| VersionedAssetId::from(asset_id)) + .filter_map(|asset_id| asset_id.into_version(version).ok()) + .collect()) + } + pub fn query_xcm_weight(message: VersionedXcm<()>) -> Result { let message = Xcm::<()>::try_from(message) .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; @@ -3126,7 +3226,7 @@ impl CheckSuspension for Pallet { } } -impl xcm_executor::traits::RecordXcm for Pallet { +impl RecordXcm for Pallet { fn should_record() -> bool { ShouldRecordXcm::::get() } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index b3b7529217f5a4929ce7beedcc1ffa423227112a..ead98e1d046005360743b921fe3d70cd93c06a1b 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -238,10 +238,6 @@ impl SendXcm for TestPaidForPara3000SendXcm { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -253,7 +249,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index f42e220d693203f7fa6e1391676f36a84feb9be0..af81ac9cf43a978f08c4cee86adf4d72cee3a64b 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -76,7 +76,7 @@ fn limited_teleport_assets_works() { )] ); let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + let _check_v3_ok: xcm::v3::Xcm<()> = versioned_sent.try_into().unwrap(); let mut last_events = last_events(3).into_iter(); assert_eq!( diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 02aeafd68e83dffd01a7630ce357a4e9cd78cdf1..c16c1a1ba986e5c95da3ce63dd58c52393408464 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -602,11 +602,11 @@ fn basic_subscription_works() { let weight = BaseXcmWeight::get(); let mut message = Xcm::<()>(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(1), + response: Response::Version(3), querier: None, }, ]); @@ -764,14 +764,14 @@ fn subscription_side_upgrades_work_with_notify() { new_test_ext_with_balances(vec![]).execute_with(|| { AdvertisedXcmVersion::set(1); - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + // An entry from a previous runtime with v3 XCM. + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // New version. - AdvertisedXcmVersion::set(3); + AdvertisedXcmVersion::set(4); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -780,13 +780,13 @@ fn subscription_side_upgrades_work_with_notify() { let instr1 = QueryResponse { query_id: 70, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr3 = QueryResponse { query_id: 72, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let mut sent = take_sent_xcm(); @@ -807,8 +807,8 @@ fn subscription_side_upgrades_work_with_notify() { assert_eq!( contents, vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), + (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 4)), ] ); }); @@ -817,11 +817,11 @@ fn subscription_side_upgrades_work_with_notify() { #[test] fn subscription_side_upgrades_work_without_notify() { new_test_ext_with_balances(vec![]).execute_with(|| { - // An entry from a previous runtime with v2 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 2)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 2)); + // An entry from a previous runtime with v3 XCM. + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -854,11 +854,11 @@ fn subscriber_side_subscription_works() { let weight = BaseXcmWeight::get(); let message = Xcm(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(1), + response: Response::Version(3), querier: None, }, ]); @@ -872,18 +872,21 @@ fn subscriber_side_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); assert_eq!(take_sent_xcm(), vec![]); - assert_eq!(XcmPallet::get_version_for(&remote), Some(1)); + assert_eq!(XcmPallet::get_version_for(&remote), Some(3)); - // This message cannot be sent to a v2 remote. - let v2_msg = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); - assert_eq!(XcmPallet::wrap_version(&remote, v2_msg.clone()), Err(())); + // This message will be sent as v3. + let v4_msg = xcm::v4::Xcm::<()>(vec![xcm::v4::Instruction::Trap(0)]); + assert_eq!( + XcmPallet::wrap_version(&remote, v4_msg.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::Trap(0)]))) + ); let message = Xcm(vec![ - // Remote upgraded to XCM v2 + // Remote upgraded to XCM v4 QueryResponse { query_id: 0, max_weight: Weight::zero(), - response: Response::Version(2), + response: Response::Version(4), querier: None, }, ]); @@ -897,12 +900,12 @@ fn subscriber_side_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); assert_eq!(take_sent_xcm(), vec![]); - assert_eq!(XcmPallet::get_version_for(&remote), Some(2)); + assert_eq!(XcmPallet::get_version_for(&remote), Some(4)); - // This message can now be sent to remote as it's v2. + // This message is now sent as v4. assert_eq!( - XcmPallet::wrap_version(&remote, v2_msg.clone()), - Ok(VersionedXcm::from(v2_msg)) + XcmPallet::wrap_version(&remote, v4_msg.clone()), + Ok(VersionedXcm::from(v4_msg)) ); }); } @@ -911,30 +914,36 @@ fn subscriber_side_subscription_works() { #[test] fn auto_subscription_works() { new_test_ext_with_balances_and_xcm_version(vec![], None).execute_with(|| { - let remote_v2: Location = Parachain(1000).into(); + let remote_v3: Location = Parachain(1000).into(); let remote_v4: Location = Parachain(1001).into(); - assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(2))); + assert_ok!(XcmPallet::force_default_xcm_version(RuntimeOrigin::root(), Some(3))); // Wrapping a version for a destination we don't know elicits a subscription. - let msg_v2 = xcm::v2::Xcm::<()>(vec![xcm::v2::Instruction::Trap(0)]); + let msg_v3 = xcm::v3::Xcm::<()>(vec![xcm::v3::Instruction::Trap(0)]); let msg_v4 = xcm::v4::Xcm::<()>(vec![xcm::v4::Instruction::ClearTopic]); assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), + XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone())), + ); + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v4.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2)]; + let expected = vec![(remote_v3.clone().into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); assert_eq!( - XcmPallet::wrap_version(&remote_v4, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone())), + XcmPallet::wrap_version(&remote_v4, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone())), + ); + assert_eq!( + XcmPallet::wrap_version(&remote_v4, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v4, msg_v4.clone()), Err(())); - let expected = vec![(remote_v2.clone().into(), 2), (remote_v4.clone().into(), 2)]; + let expected = vec![(remote_v3.clone().into(), 2), (remote_v4.clone().into(), 2)]; assert_eq!(VersionDiscoveryQueue::::get().into_inner(), expected); XcmPallet::on_initialize(1); @@ -968,10 +977,10 @@ fn auto_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); - // V2 messages can be sent to remote_v4 under XCM v4. + // V3 messages can be sent to remote_v4 under XCM v4. assert_eq!( - XcmPallet::wrap_version(&remote_v4, msg_v2.clone()), - Ok(VersionedXcm::from(msg_v2.clone()).into_version(4).unwrap()), + XcmPallet::wrap_version(&remote_v4, msg_v3.clone()), + Ok(VersionedXcm::from(msg_v3.clone()).into_version(4).unwrap()), ); // This message can now be sent to remote_v4 as it's v4. assert_eq!( @@ -983,26 +992,26 @@ fn auto_subscription_works() { assert_eq!( take_sent_xcm(), vec![( - remote_v2.clone(), + remote_v3.clone(), Xcm(vec![SubscribeVersion { query_id: 1, max_response_weight: Weight::zero() }]), )] ); - // Assume remote_v2 is working ok and XCM version 2. + // Assume remote_v3 is working ok and XCM version 3. let weight = BaseXcmWeight::get(); let message = Xcm(vec![ - // Remote supports XCM v2 + // Remote supports XCM v3 QueryResponse { query_id: 1, max_weight: Weight::zero(), - response: Response::Version(2), + response: Response::Version(3), querier: None, }, ]); let mut hash = fake_message_hash(&message); let r = XcmExecutor::::prepare_and_execute( - remote_v2.clone(), + remote_v3.clone(), message, &mut hash, weight, @@ -1010,12 +1019,15 @@ fn auto_subscription_works() { ); assert_eq!(r, Outcome::Complete { used: weight }); - // v4 messages cannot be sent to remote_v2... + // v4 messages cannot be sent to remote_v3... + assert_eq!( + XcmPallet::wrap_version(&remote_v3, msg_v3.clone()), + Ok(VersionedXcm::V3(msg_v3)) + ); assert_eq!( - XcmPallet::wrap_version(&remote_v2, msg_v2.clone()), - Ok(VersionedXcm::V2(msg_v2)) + XcmPallet::wrap_version(&remote_v3, msg_v4.clone()), + Ok(VersionedXcm::V3(xcm::v3::Xcm(vec![xcm::v3::Instruction::ClearTopic]))) ); - assert_eq!(XcmPallet::wrap_version(&remote_v2, msg_v4.clone()), Err(())); }) } @@ -1025,15 +1037,15 @@ fn subscription_side_upgrades_work_with_multistage_notify() { AdvertisedXcmVersion::set(1); // An entry from a previous runtime with v0 XCM. - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1001).into()); - VersionNotifyTargets::::insert(1, v2_location, (70, Weight::zero(), 1)); - let v2_location = VersionedLocation::V2(xcm::v2::Junction::Parachain(1002).into()); - VersionNotifyTargets::::insert(2, v2_location, (71, Weight::zero(), 1)); - let v3_location = Parachain(1003).into_versioned(); - VersionNotifyTargets::::insert(3, v3_location, (72, Weight::zero(), 1)); + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1001).into()); + VersionNotifyTargets::::insert(3, v3_location, (70, Weight::zero(), 3)); + let v3_location = VersionedLocation::V3(xcm::v3::Junction::Parachain(1002).into()); + VersionNotifyTargets::::insert(3, v3_location, (71, Weight::zero(), 3)); + let v4_location = Parachain(1003).into_versioned(); + VersionNotifyTargets::::insert(4, v4_location, (72, Weight::zero(), 3)); // New version. - AdvertisedXcmVersion::set(3); + AdvertisedXcmVersion::set(4); // A runtime upgrade which alters the version does send notifications. CurrentMigration::::put(VersionMigrationStage::default()); @@ -1049,19 +1061,19 @@ fn subscription_side_upgrades_work_with_multistage_notify() { let instr1 = QueryResponse { query_id: 70, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr2 = QueryResponse { query_id: 71, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let instr3 = QueryResponse { query_id: 72, max_weight: Weight::zero(), - response: Response::Version(3), + response: Response::Version(4), querier: None, }; let mut sent = take_sent_xcm(); @@ -1083,9 +1095,9 @@ fn subscription_side_upgrades_work_with_multistage_notify() { assert_eq!( contents, vec![ - (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1002).into_versioned(), (71, Weight::zero(), 3)), - (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 3)), + (XCM_VERSION, Parachain(1001).into_versioned(), (70, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1002).into_versioned(), (71, Weight::zero(), 4)), + (XCM_VERSION, Parachain(1003).into_versioned(), (72, Weight::zero(), 4)), ] ); }); diff --git a/polkadot/xcm/procedural/tests/conversion_functions.rs b/polkadot/xcm/procedural/tests/conversion_functions.rs index 5b6965167fcd318d3a7b10b3e56deeebf13ba7cb..7d2698d2cd776fe13f03a2a361edd32b552482ea 100644 --- a/polkadot/xcm/procedural/tests/conversion_functions.rs +++ b/polkadot/xcm/procedural/tests/conversion_functions.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use xcm::v2::prelude::*; +use xcm::v3::prelude::*; #[test] -fn slice_syntax_in_v2_works() { +fn slice_syntax_in_v3_works() { let old_junctions = Junctions::X2(Parachain(1), PalletInstance(1)); let new_junctions = Junctions::from([Parachain(1), PalletInstance(1)]); assert_eq!(old_junctions, new_junctions); diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 513dfe5501ba6c6ee8c233717c86b7f24f6eeb5f..8b0030e59b5ffbb8c3af94d30368c0a824f8a119 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -21,6 +21,8 @@ // // Hence, `no_std` rather than sp-runtime. #![cfg_attr(not(feature = "std"), no_std)] +// Because of XCMv2. +#![allow(deprecated)] extern crate alloc; @@ -28,6 +30,9 @@ use derivative::Derivative; use parity_scale_codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use scale_info::TypeInfo; +#[deprecated( + note = "XCMv2 will be removed once XCMv5 is released. Please use XCMv3 or XCMv4 instead." +)] pub mod v2; pub mod v3; pub mod v4; @@ -425,6 +430,7 @@ pub type VersionedMultiAssets = VersionedAssets; #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum VersionedXcm { #[codec(index = 2)] + #[deprecated] V2(v2::Xcm), #[codec(index = 3)] V3(v3::Xcm), diff --git a/polkadot/xcm/src/tests.rs b/polkadot/xcm/src/tests.rs index 1aabbcef281d6638b4eabf3c077b5091514d07f0..4c666063f3f4706e77869b7aea5e16000f6dbc1d 100644 --- a/polkadot/xcm/src/tests.rs +++ b/polkadot/xcm/src/tests.rs @@ -158,18 +158,6 @@ fn encode_decode_versioned_multi_assets_v3() { assert_eq!(assets, decoded); } -#[test] -fn encode_decode_versioned_xcm_v2() { - let xcm = VersionedXcm::V2(v2::Xcm::<()>::new()); - let encoded = xcm.encode(); - - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); - - let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); - assert_eq!(xcm, decoded); -} - #[test] fn encode_decode_versioned_xcm_v3() { let xcm = VersionedXcm::V3(v3::Xcm::<()>::new()); diff --git a/polkadot/xcm/src/v2/mod.rs b/polkadot/xcm/src/v2/mod.rs index 347f3f2c29206222ca546b7d543409da83cbedcf..7b6858e6a5c212cd50326390a1fa995f88f95d2d 100644 --- a/polkadot/xcm/src/v2/mod.rs +++ b/polkadot/xcm/src/v2/mod.rs @@ -15,6 +15,9 @@ // along with Cumulus. If not, see . //! # XCM Version 2 +//! +//! WARNING: DEPRECATED, please use version 3 or 4. +//! //! Version 2 of the Cross-Consensus Message format data structures. The comprehensive list of //! changes can be found in //! [this PR description](https://github.com/paritytech/polkadot/pull/3629#issue-968428279). @@ -52,8 +55,8 @@ use super::{ v3::{ BodyId as NewBodyId, BodyPart as NewBodyPart, Instruction as NewInstruction, - NetworkId as NewNetworkId, Response as NewResponse, WeightLimit as NewWeightLimit, - Xcm as NewXcm, + NetworkId as NewNetworkId, OriginKind as NewOriginKind, Response as NewResponse, + WeightLimit as NewWeightLimit, Xcm as NewXcm, }, DoubleEncoded, }; @@ -104,6 +107,18 @@ pub enum OriginKind { Xcm, } +impl From for OriginKind { + fn from(new: NewOriginKind) -> Self { + use NewOriginKind::*; + match new { + Native => Self::Native, + SovereignAccount => Self::SovereignAccount, + Superuser => Self::Superuser, + Xcm => Self::Xcm, + } + } +} + /// A global identifier of an account-bearing consensus system. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] @@ -262,6 +277,7 @@ pub const VERSION: super::Version = 2; /// An identifier for a query. pub type QueryId = u64; +/// DEPRECATED. Please use XCMv3 or XCMv4 instead. #[derive(Derivative, Default, Encode, Decode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] @@ -1065,7 +1081,7 @@ impl TryFrom> for Instruction Self::HrmpChannelClosing { initiator, sender, recipient }, Transact { origin_kind, require_weight_at_most, call } => Self::Transact { - origin_type: origin_kind, + origin_type: origin_kind.into(), require_weight_at_most: require_weight_at_most.ref_time(), call: call.into(), }, diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index e7c57f414eb786a50f3689a3d3706632bdc4f902..8ff661a9bbac8b3a8236f783f0dc7b24129aa005 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -16,15 +16,14 @@ //! Version 3 of the Cross-Consensus Message format data structures. -use super::{ - v2::{ - Instruction as OldInstruction, Response as OldResponse, WeightLimit as OldWeightLimit, - Xcm as OldXcm, - }, - v4::{ - Instruction as NewInstruction, PalletInfo as NewPalletInfo, - QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, - }, +#[allow(deprecated)] +use super::v2::{ + Instruction as OldInstruction, OriginKind as OldOriginKind, Response as OldResponse, + WeightLimit as OldWeightLimit, Xcm as OldXcm, +}; +use super::v4::{ + Instruction as NewInstruction, PalletInfo as NewPalletInfo, + QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, }; use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; @@ -53,11 +52,46 @@ pub use multilocation::{ Ancestor, AncestorThen, InteriorMultiLocation, Location, MultiLocation, Parent, ParentThen, }; pub use traits::{ - send_xcm, validate_send, Error, ExecuteXcm, Outcome, PreparedMessage, Result, SendError, - SendResult, SendXcm, Weight, XcmHash, + send_xcm, validate_send, Error, ExecuteXcm, GetWeight, Outcome, PreparedMessage, Result, + SendError, SendResult, SendXcm, Weight, XcmHash, }; -// These parts of XCM v2 are unchanged in XCM v3, and are re-imported here. -pub use super::v2::{GetWeight, OriginKind}; + +/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub enum OriginKind { + /// Origin should just be the native dispatch origin representation for the sender in the + /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin + /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a + /// primary/native dispatch origin form. + Native, + + /// Origin should just be the standard account-based origin with the sovereign account of + /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. + SovereignAccount, + + /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. + /// This will not usually be an available option. + Superuser, + + /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be + /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be + /// the `pallet_xcm::Origin::Xcm` type. + Xcm, +} + +impl From for OriginKind { + fn from(old: OldOriginKind) -> Self { + use OldOriginKind::*; + match old { + Native => Self::Native, + SovereignAccount => Self::SovereignAccount, + Superuser => Self::Superuser, + Xcm => Self::Xcm, + } + } +} /// This module's XCM version. pub const VERSION: super::Version = 3; @@ -1310,6 +1344,7 @@ impl TryFrom for Response { } // Convert from a v2 XCM to a v3 XCM. +#[allow(deprecated)] impl TryFrom> for Xcm { type Error = (); fn try_from(old_xcm: OldXcm) -> result::Result { @@ -1500,7 +1535,7 @@ impl TryFrom> for Instruction { HrmpChannelClosing { initiator, sender, recipient } => Self::HrmpChannelClosing { initiator, sender, recipient }, Transact { origin_type, require_weight_at_most, call } => Self::Transact { - origin_kind: origin_type, + origin_kind: origin_type.into(), require_weight_at_most: Weight::from_parts( require_weight_at_most, DEFAULT_PROOF_SIZE, @@ -1572,118 +1607,6 @@ impl TryFrom> for Instruction { #[cfg(test)] mod tests { use super::{prelude::*, *}; - use crate::v2::{ - Junctions::Here as OldHere, MultiAssetFilter as OldMultiAssetFilter, - WildMultiAsset as OldWildMultiAsset, - }; - - #[test] - fn basic_roundtrip_works() { - let xcm = Xcm::<()>(vec![TransferAsset { - assets: (Here, 1u128).into(), - beneficiary: Here.into(), - }]); - let old_xcm = OldXcm::<()>(vec![OldInstruction::TransferAsset { - assets: (OldHere, 1).into(), - beneficiary: OldHere.into(), - }]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn teleport_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - ReceiveTeleportedAsset((Here, 1u128).into()), - ClearOrigin, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm: OldXcm<()> = OldXcm::<()>(vec![ - OldInstruction::ReceiveTeleportedAsset((OldHere, 1).into()), - OldInstruction::ClearOrigin, - OldInstruction::DepositAsset { - assets: crate::v2::MultiAssetFilter::Wild(crate::v2::WildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn reserve_deposit_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - ReserveAssetDeposited((Here, 1u128).into()), - ClearOrigin, - BuyExecution { - fees: (Here, 1u128).into(), - weight_limit: Some(Weight::from_parts(1, DEFAULT_PROOF_SIZE)).into(), - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::ReserveAssetDeposited((OldHere, 1).into()), - OldInstruction::ClearOrigin, - OldInstruction::BuyExecution { - fees: (OldHere, 1).into(), - weight_limit: Some(1).into(), - }, - OldInstruction::DepositAsset { - assets: crate::v2::MultiAssetFilter::Wild(crate::v2::WildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn deposit_asset_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1).into()), - OldInstruction::DepositAsset { - assets: OldMultiAssetFilter::Wild(OldWildMultiAsset::All), - max_assets: 1, - beneficiary: OldHere.into(), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } - - #[test] - fn deposit_reserve_asset_roundtrip_works() { - let xcm = Xcm::<()>(vec![ - WithdrawAsset((Here, 1u128).into()), - DepositReserveAsset { - assets: Wild(AllCounted(1)), - dest: Here.into(), - xcm: Xcm::<()>(vec![]), - }, - ]); - let old_xcm = OldXcm::<()>(vec![ - OldInstruction::WithdrawAsset((OldHere, 1).into()), - OldInstruction::DepositReserveAsset { - assets: OldMultiAssetFilter::Wild(OldWildMultiAsset::All), - max_assets: 1, - dest: OldHere.into(), - xcm: OldXcm::<()>(vec![]), - }, - ]); - assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); - let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); - assert_eq!(new_xcm, xcm); - } #[test] fn decoding_respects_limit() { diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index cfe387df1a86c3aa21da69ce37eadb307df1c51b..680e0bacd0c9b96832b646d2896cdf4ec2c064e0 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -25,6 +25,11 @@ pub use sp_weights::Weight; use super::*; +// A simple trait to get the weight of some object. +pub trait GetWeight { + fn weight(&self) -> sp_weights::Weight; +} + /// Error codes used in XCM. The first errors codes have explicit indices and are part of the XCM /// format. Those trailing are merely part of the XCM implementation; there is no expectation that /// they will retain the same index over time. diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 77b6d915fcb5fa902e5168e46b38cdb677f98cbf..e1ca60087b19024e88a9a1345f1e00a624e77fda 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -16,7 +16,7 @@ //! Version 4 of the Cross-Consensus Message format data structures. -pub use super::v2::GetWeight; +pub use super::v3::GetWeight; use super::v3::{ Instruction as OldInstruction, PalletInfo as OldPalletInfo, QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 997ca99fb12c831ea63d7dbb199f2c029dabb495..707e4aac7968a3032f717e802122903014d669ef 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.1" -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 7760274f6e2451f1b5f695884086a31cd36f3209..449cda3d2323372ea5fddbb9a08d81dbadeba632 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -124,7 +124,7 @@ mod tests { }; use parity_scale_codec::Encode; use polkadot_test_runtime::*; - use xcm::{v2, v3, VersionedXcm}; + use xcm::{v3, v4, VersionedXcm}; const ORIGIN: Junction = Junction::OnlyChild; /// The processor to use for tests. @@ -134,8 +134,8 @@ mod tests { #[test] fn process_message_trivial_works() { // ClearOrigin works. - assert!(process(v2_xcm(true)).unwrap()); assert!(process(v3_xcm(true)).unwrap()); + assert!(process(v4_xcm(true)).unwrap()); } #[test] @@ -194,7 +194,7 @@ mod tests { #[test] fn process_message_overweight_fails() { - for msg in [v3_xcm(true), v3_xcm(false), v3_xcm(false), v2_xcm(false)] { + for msg in [v4_xcm(true), v4_xcm(false), v4_xcm(false), v3_xcm(false)] { let msg = &msg.encode()[..]; // Errors if we stay below a weight limit of 1000. @@ -216,7 +216,7 @@ mod tests { } } - fn v2_xcm(success: bool) -> VersionedXcm { + fn v3_xcm(success: bool) -> VersionedXcm { let instr = if success { v3::Instruction::::ClearOrigin } else { @@ -225,13 +225,13 @@ mod tests { VersionedXcm::V3(v3::Xcm::(vec![instr])) } - fn v3_xcm(success: bool) -> VersionedXcm { + fn v4_xcm(success: bool) -> VersionedXcm { let instr = if success { - v2::Instruction::::ClearOrigin + v4::Instruction::::ClearOrigin } else { - v2::Instruction::::Trap(1) + v4::Instruction::::Trap(1) }; - VersionedXcm::V2(v2::Xcm::(vec![instr])) + VersionedXcm::V4(v4::Xcm::(vec![instr])) } fn process(msg: VersionedXcm) -> Result { diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 34b204b434d6e3e547a8b1f5ec48d787bb1fded5..076ff4184f0cb9c8ba0b7a633534384e9dd7ef7c 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -54,7 +54,6 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; type AccountId = AccountId; - type BlockHashCount = ConstU32<256>; type Lookup = sp_runtime::traits::IdentityLookup; } diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 45bfba2355630e4e5b40d1bb9e00114c889811a1..7f7ff17e2115a96f0f7f3e53191759b4da6d8f20 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -74,10 +74,6 @@ pub type TestXcmRouter = EnsureDecodableXcm; pub const UNITS: Balance = 1_000_000_000_000; pub const CENTS: Balance = UNITS / 30_000; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -89,7 +85,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index aebc768bb906fd38d372d6d735a9e6bc0390ae9b..64b2d405b9068213d88582ec79ca2897d78dcc1f 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index 9c9c53f0ee1ba497f044477ec857afb6774a1f05..37c2117e7b06fc62f33e71c3c57164caf26d4c9b 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } futures = "0.3.30" diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml index cec76e7327ec6145e8513a33264c9419414b0991..6fa0236dfb41d5e2a4ec2749a402a8f73be60eb4 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/Cargo.toml @@ -11,7 +11,7 @@ description = "XCM fee payment runtime API" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs index 62a422d6efeb0928e4f279798382dd139d95ee0a..9828acab402300a55abf2ece820cf8871c76a858 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/src/dry_run.rs @@ -19,16 +19,15 @@ //! that need to be paid. use codec::{Decode, Encode}; -use frame_support::pallet_prelude::{DispatchResult, TypeInfo}; -use sp_runtime::traits::Block as BlockT; +use frame_support::pallet_prelude::{DispatchResultWithPostInfo, TypeInfo}; use sp_std::vec::Vec; use xcm::prelude::*; /// Effects of dry-running an extrinsic. #[derive(Encode, Decode, Debug, TypeInfo)] -pub struct ExtrinsicDryRunEffects { +pub struct CallDryRunEffects { /// The result of executing the extrinsic. - pub execution_result: DispatchResult, + pub execution_result: DispatchResultWithPostInfo, /// The list of events fired by the extrinsic. pub emitted_events: Vec, /// The local XCM that was attempted to be executed, if any. @@ -55,12 +54,12 @@ sp_api::decl_runtime_apis! { /// If there's local execution, the location will be "Here". /// This vector can be used to calculate both execution and delivery fees. /// - /// Extrinsics or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. + /// Calls or XCMs might fail when executed, this doesn't mean the result of these calls will be an `Err`. /// In those cases, there might still be a valid result, with the execution error inside it. /// The only reasons why these calls might return an error are listed in the [`Error`] enum. - pub trait XcmDryRunApi { - /// Dry run extrinsic. - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, Error>; + pub trait DryRunApi { + /// Dry run call. + fn dry_run_call(origin: OriginCaller, call: Call) -> Result, Error>; /// Dry run XCM program fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, Error>; @@ -76,8 +75,4 @@ pub enum Error { /// Converting a versioned data structure from one version to another failed. #[codec(index = 1)] VersionedConversionFailed, - - /// Extrinsic was invalid. - #[codec(index = 2)] - InvalidExtrinsic, } diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs index 7a9bfa4a7968129a13395f2520b6ca1a7b14ff69..33611c8a471c03a34c125a921b24a51880dfbb3f 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/fee_estimation.rs @@ -16,19 +16,17 @@ //! Tests for using both the XCM fee payment API and the dry-run API. -use frame_support::{ - dispatch::DispatchInfo, - pallet_prelude::{DispatchClass, Pays}, -}; +use frame_system::RawOrigin; use sp_api::ProvideRuntimeApi; use sp_runtime::testing::H256; use xcm::prelude::*; -use xcm_fee_payment_runtime_api::{dry_run::XcmDryRunApi, fees::XcmPaymentApi}; +use xcm_fee_payment_runtime_api::{dry_run::DryRunApi, fees::XcmPaymentApi}; mod mock; use mock::{ - extra, fake_message_hash, new_test_ext_with_balances, new_test_ext_with_balances_and_assets, - DeliveryFees, ExistentialDeposit, HereLocation, RuntimeCall, RuntimeEvent, TestClient, TestXt, + fake_message_hash, new_test_ext_with_balances, new_test_ext_with_balances_and_assets, + DeliveryFees, ExistentialDeposit, HereLocation, OriginCaller, RuntimeCall, RuntimeEvent, + TestClient, }; // Scenario: User `1` in the local chain (id 2000) wants to transfer assets to account `[0u8; 32]` @@ -50,26 +48,26 @@ fn fee_estimation_for_teleport() { new_test_ext_with_balances_and_assets(balances, assets).execute_with(|| { let client = TestClient; let runtime_api = client.runtime_api(); - let extrinsic = TestXt::new( - RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::V4((Parent, Parachain(1000)).into())), - beneficiary: Box::new(VersionedLocation::V4( - AccountId32 { id: [0u8; 32], network: None }.into(), - )), - assets: Box::new(VersionedAssets::V4( - vec![(Here, 100u128).into(), (Parent, 20u128).into()].into(), - )), - fee_asset_item: 1, // Fees are paid with the RelayToken - weight_limit: Unlimited, - }), - Some((who, extra())), - ); + let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from(vec![ + (Here, 100u128).into(), + (Parent, 20u128).into(), + ])), + fee_asset_item: 1, // Fees are paid with the RelayToken + weight_limit: Unlimited, + }); + let origin = OriginCaller::system(RawOrigin::Signed(who)); let dry_run_effects = - runtime_api.dry_run_extrinsic(H256::zero(), extrinsic).unwrap().unwrap(); + runtime_api.dry_run_call(H256::zero(), origin, call).unwrap().unwrap(); assert_eq!( dry_run_effects.local_xcm, - Some(VersionedXcm::V4( + Some(VersionedXcm::from( Xcm::builder_unsafe() .withdraw_asset((Parent, 20u128)) .burn_asset((Parent, 20u128)) @@ -89,8 +87,8 @@ fn fee_estimation_for_teleport() { assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4(send_destination.clone()), - vec![VersionedXcm::V4(send_message.clone())], + VersionedLocation::from(send_destination.clone()), + vec![VersionedXcm::from(send_message.clone())], ),], ); @@ -128,14 +126,6 @@ fn fee_estimation_for_teleport() { message: send_message.clone(), message_id: fake_message_hash(&send_message), }), - RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_parts(107074070, 0), /* Will break if weights get - * updated. */ - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } - }), ] ); @@ -153,7 +143,7 @@ fn fee_estimation_for_teleport() { .query_weight_to_asset_fee( H256::zero(), weight, - VersionedAssetId::V4(HereLocation::get().into()), + VersionedAssetId::from(AssetId(HereLocation::get())), ) .unwrap() .unwrap(); @@ -168,7 +158,7 @@ fn fee_estimation_for_teleport() { .query_delivery_fees(H256::zero(), destination.clone(), remote_message.clone()) .unwrap() .unwrap(); - assert_eq!(delivery_fees, VersionedAssets::V4((Here, 20u128).into())); + assert_eq!(delivery_fees, VersionedAssets::from((Here, 20u128))); // This would have to be the runtime API of the destination, // which we have the location for. @@ -182,7 +172,7 @@ fn fee_estimation_for_teleport() { .query_weight_to_asset_fee( H256::zero(), remote_execution_weight, - VersionedAssetId::V4(HereLocation::get().into()), + VersionedAssetId::from(AssetId(HereLocation::get())), ) .unwrap() .unwrap(); @@ -214,24 +204,23 @@ fn dry_run_reserve_asset_transfer() { new_test_ext_with_balances_and_assets(balances, assets).execute_with(|| { let client = TestClient; let runtime_api = client.runtime_api(); - let extrinsic = TestXt::new( - RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { - dest: Box::new(VersionedLocation::V4((Parent, Parachain(1000)).into())), - beneficiary: Box::new(VersionedLocation::V4( - AccountId32 { id: [0u8; 32], network: None }.into(), - )), - assets: Box::new(VersionedAssets::V4((Parent, 100u128).into())), - fee_asset_item: 0, - weight_limit: Unlimited, - }), - Some((who, extra())), - ); + let call = RuntimeCall::XcmPallet(pallet_xcm::Call::transfer_assets { + dest: Box::new(VersionedLocation::from((Parent, Parachain(1000)))), + beneficiary: Box::new(VersionedLocation::from(AccountId32 { + id: [0u8; 32], + network: None, + })), + assets: Box::new(VersionedAssets::from((Parent, 100u128))), + fee_asset_item: 0, + weight_limit: Unlimited, + }); + let origin = OriginCaller::system(RawOrigin::Signed(who)); let dry_run_effects = - runtime_api.dry_run_extrinsic(H256::zero(), extrinsic).unwrap().unwrap(); + runtime_api.dry_run_call(H256::zero(), origin, call).unwrap().unwrap(); assert_eq!( dry_run_effects.local_xcm, - Some(VersionedXcm::V4( + Some(VersionedXcm::from( Xcm::builder_unsafe() .withdraw_asset((Parent, 100u128)) .burn_asset((Parent, 100u128)) @@ -251,8 +240,8 @@ fn dry_run_reserve_asset_transfer() { assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4(send_destination.clone()), - vec![VersionedXcm::V4(send_message.clone())], + VersionedLocation::from(send_destination.clone()), + vec![VersionedXcm::from(send_message.clone())], ),], ); @@ -278,14 +267,6 @@ fn dry_run_reserve_asset_transfer() { message: send_message.clone(), message_id: fake_message_hash(&send_message), }), - RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_parts(107074066, 0), /* Will break if weights get - * updated. */ - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } - }), ] ); }); @@ -310,11 +291,15 @@ fn dry_run_xcm() { let client = TestClient; let runtime_api = client.runtime_api(); let xcm_weight = runtime_api - .query_xcm_weight(H256::zero(), VersionedXcm::V4(xcm_to_weigh.clone().into())) + .query_xcm_weight(H256::zero(), VersionedXcm::from(xcm_to_weigh.clone().into())) .unwrap() .unwrap(); let execution_fees = runtime_api - .query_weight_to_asset_fee(H256::zero(), xcm_weight, VersionedAssetId::V4(Here.into())) + .query_weight_to_asset_fee( + H256::zero(), + xcm_weight, + VersionedAssetId::from(AssetId(Here.into())), + ) .unwrap() .unwrap(); let xcm = Xcm::::builder_unsafe() @@ -331,16 +316,16 @@ fn dry_run_xcm() { let dry_run_effects = runtime_api .dry_run_xcm( H256::zero(), - VersionedLocation::V4(AccountIndex64 { index: 1, network: None }.into()), - VersionedXcm::V4(xcm), + VersionedLocation::from([AccountIndex64 { index: 1, network: None }]), + VersionedXcm::from(xcm), ) .unwrap() .unwrap(); assert_eq!( dry_run_effects.forwarded_xcms, vec![( - VersionedLocation::V4((Parent, Parachain(2100)).into()), - vec![VersionedXcm::V4( + VersionedLocation::from((Parent, Parachain(2100))), + vec![VersionedXcm::from( Xcm::<()>::builder_unsafe() .reserve_asset_deposited(( (Parent, Parachain(2000)), diff --git a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs index d7b18d90a501815018fde21e25481872e33753a6..aa6c1422b608c955fc74934811f4f136212507cb 100644 --- a/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs +++ b/polkadot/xcm/xcm-fee-payment-runtime-api/tests/mock.rs @@ -29,7 +29,7 @@ use frame_support::{ use frame_system::{EnsureRoot, RawOrigin as SystemRawOrigin}; use pallet_xcm::TestWeightInfo; use sp_runtime::{ - traits::{Block as BlockT, Get, IdentityLookup, MaybeEquivalence, TryConvert}, + traits::{Dispatchable, Get, IdentityLookup, MaybeEquivalence, TryConvert}, BuildStorage, SaturatedConversion, }; use sp_std::{cell::RefCell, marker::PhantomData}; @@ -45,7 +45,7 @@ use xcm_executor::{ }; use xcm_fee_payment_runtime_api::{ - dry_run::{Error as XcmDryRunApiError, ExtrinsicDryRunEffects, XcmDryRunApi, XcmDryRunEffects}, + dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, }; @@ -58,30 +58,13 @@ construct_runtime! { } } -pub type SignedExtra = ( - // frame_system::CheckEra, - // frame_system::CheckNonce, - frame_system::CheckWeight, -); +pub type SignedExtra = (frame_system::CheckWeight,); pub type TestXt = sp_runtime::testing::TestXt; type Block = sp_runtime::testing::Block; type Balance = u128; type AssetIdForAssetsPallet = u32; type AccountId = u64; -pub fn extra() -> SignedExtra { - (frame_system::CheckWeight::new(),) -} - -type Executive = frame_executive::Executive< - TestRuntime, - Block, - frame_system::ChainContext, - TestRuntime, - AllPalletsWithSystem, - (), ->; - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for TestRuntime { type Block = Block; @@ -429,8 +412,11 @@ impl sp_api::ProvideRuntimeApi for TestClient { sp_api::mock_impl_runtime_apis! { impl XcmPaymentApi for RuntimeApi { fn query_acceptable_payment_assets(xcm_version: XcmVersion) -> Result, XcmPaymentApiError> { - if xcm_version != 4 { return Err(XcmPaymentApiError::UnhandledXcmVersion) }; - Ok(vec![VersionedAssetId::V4(HereLocation::get().into())]) + Ok(vec![ + VersionedAssetId::from(AssetId(HereLocation::get())) + .into_version(xcm_version) + .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)? + ]) } fn query_xcm_weight(message: VersionedXcm<()>) -> Result { @@ -438,14 +424,25 @@ sp_api::mock_impl_runtime_apis! { } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { - let local_asset = VersionedAssetId::V4(HereLocation::get().into()); - let asset = asset - .into_version(4) - .map_err(|_| XcmPaymentApiError::VersionedConversionFailed)?; - - if asset != local_asset { return Err(XcmPaymentApiError::AssetNotFound); } - - Ok(WeightToFee::weight_to_fee(&weight)) + match asset.try_as::() { + Ok(asset_id) if asset_id.0 == HereLocation::get() => { + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!( + target: "xcm::XcmPaymentApi::query_weight_to_asset_fee", + "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!" + ); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!( + target: "xcm::XcmPaymentApi::query_weight_to_asset_fee", + "query_weight_to_asset_fee - failed to convert asset: {asset:?}!" + ); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } } fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { @@ -453,30 +450,22 @@ sp_api::mock_impl_runtime_apis! { } } - impl XcmDryRunApi for RuntimeApi { - fn dry_run_extrinsic(extrinsic: ::Extrinsic) -> Result, XcmDryRunApiError> { + impl DryRunApi for RuntimeApi { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall) -> Result, XcmDryRunApiError> { use xcm_executor::RecordXcm; - // We want to record the XCM that's executed, so we can return it. pallet_xcm::Pallet::::set_record_xcm(true); - let result = Executive::apply_extrinsic(extrinsic).map_err(|error| { - log::error!( - target: "xcm::XcmDryRunApi::dry_run_extrinsic", - "Applying extrinsic failed with error {:?}", - error, - ); - XcmDryRunApiError::InvalidExtrinsic - })?; - // Nothing gets committed to storage in runtime APIs, so there's no harm in leaving the flag as true. + let result = call.dispatch(origin.into()); + pallet_xcm::Pallet::::set_record_xcm(false); let local_xcm = pallet_xcm::Pallet::::recorded_xcm(); let forwarded_xcms = sent_xcm() - .into_iter() - .map(|(location, message)| ( - VersionedLocation::V4(location), - vec![VersionedXcm::V4(message)], - )).collect(); - let events: Vec = System::events().iter().map(|record| record.event.clone()).collect(); - Ok(ExtrinsicDryRunEffects { - local_xcm: local_xcm.map(VersionedXcm::<()>::V4), + .into_iter() + .map(|(location, message)| ( + VersionedLocation::from(location), + vec![VersionedXcm::from(message)], + )).collect(); + let events: Vec = System::read_events_no_consensus().map(|record| record.event.clone()).collect(); + Ok(CallDryRunEffects { + local_xcm: local_xcm.map(VersionedXcm::<()>::from), forwarded_xcms, emitted_events: events, execution_result: result, @@ -486,7 +475,7 @@ sp_api::mock_impl_runtime_apis! { fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { let origin_location: Location = origin_location.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Location version conversion failed with error: {:?}", error, ); @@ -494,7 +483,7 @@ sp_api::mock_impl_runtime_apis! { })?; let xcm: Xcm = xcm.try_into().map_err(|error| { log::error!( - target: "xcm::XcmDryRunApi::dry_run_xcm", + target: "xcm::DryRunApi::dry_run_xcm", "Xcm version conversion failed with error {:?}", error, ); @@ -511,8 +500,8 @@ sp_api::mock_impl_runtime_apis! { let forwarded_xcms = sent_xcm() .into_iter() .map(|(location, message)| ( - VersionedLocation::V4(location), - vec![VersionedXcm::V4(message)], + VersionedLocation::from(location), + vec![VersionedXcm::from(message)], )).collect(); let events: Vec = System::events().iter().map(|record| record.event.clone()).collect(); Ok(XcmDryRunEffects { diff --git a/polkadot/xcm/xcm-simulator/Cargo.toml b/polkadot/xcm/xcm-simulator/Cargo.toml index c1c48b6d4c5eb6195a60152f29bdb61e2ad80a79..fc09b5e31861c0cb6470e7d7c121e8c94e1f60f8 100644 --- a/polkadot/xcm/xcm-simulator/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/Cargo.toml @@ -10,16 +10,20 @@ license.workspace = true workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } +scale-info = { version = "2.6.0", default-features = false } paste = "1.0.7" frame-support = { path = "../../../substrate/frame/support" } +frame-system = { path = "../../../substrate/frame/system" } sp-io = { path = "../../../substrate/primitives/io" } sp-std = { path = "../../../substrate/primitives/std" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } xcm = { package = "staging-xcm", path = ".." } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor" } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } +polkadot-primitives = { path = "../../primitives" } polkadot-core-primitives = { path = "../../core-primitives" } polkadot-parachain-primitives = { path = "../../parachain" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } diff --git a/polkadot/xcm/xcm-simulator/example/Cargo.toml b/polkadot/xcm/xcm-simulator/example/Cargo.toml index 0e13a10a14106c92d56d288d210b57f3888068b2..8b04170e3032f90adb6f4c3c5d3954729e009b20 100644 --- a/polkadot/xcm/xcm-simulator/example/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/example/Cargo.toml @@ -10,7 +10,7 @@ version = "7.0.0" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs index 8021f9551658075fa672c02306ade1bd12298e4d..93c8302757cb061bc1f97d003ce8b7de2374448e 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/mod.rs @@ -16,7 +16,6 @@ //! Parachain runtime mock. -mod mock_msg_queue; mod xcm_config; pub use xcm_config::*; @@ -36,6 +35,7 @@ use sp_std::prelude::*; use xcm::latest::prelude::*; use xcm_builder::{EnsureXcmOrigin, SignedToAccountId32}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +use xcm_simulator::mock_message_queue; pub type AccountId = AccountId32; pub type Balance = u128; @@ -121,7 +121,7 @@ parameter_types! { pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); } -impl mock_msg_queue::Config for Runtime { +impl mock_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmExecutor = XcmExecutor; } @@ -175,7 +175,7 @@ construct_runtime!( pub struct Runtime { System: frame_system, Balances: pallet_balances, - MsgQueue: mock_msg_queue, + MsgQueue: mock_message_queue, PolkadotXcm: pallet_xcm, ForeignUniques: pallet_uniques, } diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs index f6d0174def8f40162f8f2c75231f9b2bad7df0fd..0769507ec37b70e5f6d60886c4db7db9a866dc0d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain/xcm_config/constants.rs @@ -14,9 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::parachain::MsgQueue; +use crate::parachain::Runtime; use frame_support::parameter_types; use xcm::latest::prelude::*; +use xcm_simulator::mock_message_queue::ParachainId; parameter_types! { pub KsmPerSecondPerByte: (AssetId, u128, u128) = (AssetId(Parent.into()), 1, 1); @@ -26,5 +27,5 @@ parameter_types! { parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainId::::get().into())].into(); } diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index 6486a849af363af6b41bb40ff440efa5424e92eb..34c1feb6e946876bb62a5624915e414df65ed35e 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use codec::Encode; use frame_support::{assert_ok, weights::Weight}; use xcm::latest::QueryResponseInfo; -use xcm_simulator::TestExt; +use xcm_simulator::{mock_message_queue::ReceivedDmp, TestExt}; // Helper function for forming buy execution message fn buy_execution(fees: impl Into) -> Instruction { @@ -171,7 +171,7 @@ fn remote_locking_and_unlocking() { ParaA::execute_with(|| { assert_eq!( - parachain::MsgQueue::received_dmp(), + ReceivedDmp::::get(), vec![Xcm(vec![NoteUnlockable { owner: (Parent, Parachain(2)).into(), asset: (Parent, locked_amount).into() @@ -501,7 +501,7 @@ fn query_holding() { // Check that QueryResponse message was received ParaA::execute_with(|| { assert_eq!( - parachain::MsgQueue::received_dmp(), + ReceivedDmp::::get(), vec![Xcm(vec![QueryResponse { query_id: query_id_set, response: Response::Assets(Assets::new()), diff --git a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml index ca794a07bfb0c73df251d49c2e08a970ea5cf994..6b3b4018d9fbb30862eee8fa5ea1408c96b2f68e 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml +++ b/polkadot/xcm/xcm-simulator/fuzzer/Cargo.toml @@ -11,7 +11,7 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } honggfuzz = "0.5.55" arbitrary = "1.3.2" scale-info = { version = "2.11.1", features = ["derive"] } diff --git a/polkadot/xcm/xcm-simulator/src/lib.rs b/polkadot/xcm/xcm-simulator/src/lib.rs index 7efbc658bbfb8bc3fadfa037ae966ca935816ba6..a6747a4789edf8065462b831893ebc8b0284dd18 100644 --- a/polkadot/xcm/xcm-simulator/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/src/lib.rs @@ -16,6 +16,10 @@ //! Test kit to simulate cross-chain message passing and XCM execution. +/// Implementation of a simple message queue. +/// Used for sending messages. +pub mod mock_message_queue; + pub use codec::Encode; pub use paste; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs similarity index 72% rename from polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs rename to polkadot/xcm/xcm-simulator/src/mock_message_queue.rs index 17cde921f3e20cbfe4da23c68009e30afbd6fca6..96b47999fe952145b09faa93aaf2cefd5143acb9 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain/mock_msg_queue.rs +++ b/polkadot/xcm/xcm-simulator/src/mock_message_queue.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -14,14 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -pub use pallet::*; -use polkadot_core_primitives::BlockNumber as RelayBlockNumber; +//! Simple mock message queue. + +use codec::{Decode, Encode}; + use polkadot_parachain_primitives::primitives::{ DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, }; +use polkadot_primitives::BlockNumber as RelayBlockNumber; use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; use xcm::{latest::prelude::*, VersionedXcm}; +pub use pallet::*; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -41,15 +48,15 @@ pub mod pallet { pub struct Pallet(_); #[pallet::storage] - pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + pub type ParachainId = StorageValue<_, ParaId, ValueQuery>; #[pallet::storage] /// A queue of received DMP messages - pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + pub type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; impl Get for Pallet { fn get() -> ParaId { - Self::parachain_id() + ParachainId::::get() } } @@ -60,45 +67,34 @@ pub mod pallet { pub enum Event { // XCMP /// Some XCM was executed OK. - Success(Option), + Success { message_id: Option }, /// Some XCM failed. - Fail(Option, XcmError), + Fail { message_id: Option, error: XcmError }, /// Bad XCM version used. - BadVersion(Option), + BadVersion { message_id: Option }, /// Bad XCM format used. - BadFormat(Option), + BadFormat { message_id: Option }, // DMP /// Downward message is invalid XCM. - InvalidFormat(MessageId), + InvalidFormat { message_id: MessageId }, /// Downward message is unsupported version of XCM. - UnsupportedVersion(MessageId), + UnsupportedVersion { message_id: MessageId }, /// Downward message executed with the given outcome. - ExecutedDownward(MessageId, Outcome), + ExecutedDownward { message_id: MessageId, outcome: Outcome }, } impl Pallet { - /// Get the Parachain Id. - pub fn parachain_id() -> ParaId { - ParachainId::::get() - } - - /// Set the Parachain Id. pub fn set_para_id(para_id: ParaId) { ParachainId::::put(para_id); } - /// Get the queue of receieved DMP messages. - pub fn received_dmp() -> Vec> { - ReceivedDmp::::get() - } - fn handle_xcmp_message( sender: ParaId, _sent_at: RelayBlockNumber, xcm: VersionedXcm, - max_weight: Weight, - ) -> Result { + max_weight: xcm::latest::Weight, + ) -> Result { let hash = Encode::using_encoded(&xcm, T::Hashing::hash); let mut message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); let (result, event) = match Xcm::::try_from(xcm) { @@ -111,15 +107,20 @@ pub mod pallet { max_weight, Weight::zero(), ) { - Outcome::Error { error } => (Err(error), Event::Fail(Some(hash), error)), - Outcome::Complete { used } => (Ok(used), Event::Success(Some(hash))), + Outcome::Error { error } => + (Err(error), Event::Fail { message_id: Some(hash), error }), + Outcome::Complete { used } => + (Ok(used), Event::Success { message_id: Some(hash) }), // As far as the caller is concerned, this was dispatched without error, so // we just report the weight used. Outcome::Incomplete { used, error } => - (Ok(used), Event::Fail(Some(hash), error)), + (Ok(used), Event::Fail { message_id: Some(hash), error }), } }, - Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + Err(()) => ( + Err(XcmError::UnhandledXcmVersion), + Event::BadVersion { message_id: Some(hash) }, + ), }; Self::deposit_event(event); result @@ -129,8 +130,8 @@ pub mod pallet { impl XcmpMessageHandler for Pallet { fn handle_xcmp_messages<'a, I: Iterator>( iter: I, - max_weight: Weight, - ) -> Weight { + max_weight: xcm::latest::Weight, + ) -> xcm::latest::Weight { for (sender, sent_at, data) in iter { let mut data_ref = data; let _ = XcmpMessageFormat::decode(&mut data_ref) @@ -156,15 +157,16 @@ pub mod pallet { iter: impl Iterator)>, limit: Weight, ) -> Weight { - for (_i, (_sent_at, data)) in iter.enumerate() { + for (_sent_at, data) in iter { let mut id = sp_io::hashing::blake2_256(&data[..]); let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); match maybe_versioned { Err(_) => { - Self::deposit_event(Event::InvalidFormat(id)); + Self::deposit_event(Event::InvalidFormat { message_id: id }); }, Ok(versioned) => match Xcm::try_from(versioned) { - Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Err(()) => + Self::deposit_event(Event::UnsupportedVersion { message_id: id }), Ok(x) => { let outcome = T::XcmExecutor::prepare_and_execute( Parent, @@ -173,8 +175,11 @@ pub mod pallet { limit, Weight::zero(), ); - >::append(x); - Self::deposit_event(Event::ExecutedDownward(id, outcome)); + ReceivedDmp::::append(x); + Self::deposit_event(Event::ExecutedDownward { + message_id: id, + outcome, + }); }, }, } diff --git a/polkadot/zombienet_tests/functional/0013-enable-node-feature.js b/polkadot/zombienet_tests/functional/0013-enable-node-feature.js new file mode 100644 index 0000000000000000000000000000000000000000..5fe2e38dad7d4f0ba340e48617445ae7f907fce5 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-enable-node-feature.js @@ -0,0 +1,35 @@ +async function run(nodeName, networkInfo, index) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml new file mode 100644 index 0000000000000000000000000000000000000000..67925a3d3a7c64201cede004b20fc52dfbae181c --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.toml @@ -0,0 +1,46 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + validator = "true" + + [[relaychain.node_groups]] + name = "validator" + count = 3 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace"] + +{% for id in range(2000,2002) %} +[[parachains]] +id = {{id}} +addToGenesis = true +cumulus_based = true +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..e9e5a429e2a2c4913a61e30fc45114f65a939300 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0013-systematic-chunk-recovery.zndsl @@ -0,0 +1,43 @@ +Description: Systematic chunk recovery is used if the chunk mapping feature is enabled. +Network: ./0013-systematic-chunk-recovery.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 +validator: reports node_roles is 4 + +# Ensure parachains are registered. +validator: parachain 2000 is registered within 60 seconds +validator: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress and approval checking works. +validator: parachain 2000 block height is at least 15 within 600 seconds +validator: parachain 2001 block height is at least 15 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +validator: reports polkadot_parachain_approval_checking_finality_lag < 3 + +validator: reports polkadot_parachain_approvals_no_shows_total < 3 within 100 seconds + +# Ensure we used regular chunk recovery and that there are no failed recoveries. +validator: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +validator: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from systematic chunks is not possible" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +validator: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds + +# Enable the chunk mapping feature +alice: js-script ./0013-enable-node-feature.js with "2" return is 0 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 60 within 400 seconds + +validator: reports polkadot_parachain_approval_checking_finality_lag < 3 + +validator: reports polkadot_parachain_approvals_no_shows_total < 3 within 100 seconds + +# Ensure we used systematic chunk recovery and that there are no failed recoveries. +validator: count of log lines containing "Data recovery from systematic chunks complete" is at least 10 within 300 seconds +validator: count of log lines containing "Data recovery from systematic chunks is not possible" is 0 within 10 seconds +validator: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +validator: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml new file mode 100644 index 0000000000000000000000000000000000000000..881abab64fd07b8495189982119b3f985332b8a7 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + # Use an image that doesn't speak /req_chunk/2 protocol. + image = "{{POLKADOT_IMAGE}}:master-bde0bbe5" + name = "old" + count = 2 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace"] + + [[relaychain.node_groups]] + name = "new" + count = 2 + args = ["-lparachain=debug,parachain::availability-recovery=trace,parachain::availability-distribution=trace,sub-libp2p=trace"] + +{% for id in range(2000,2002) %} +[[parachains]] +id = {{id}} +addToGenesis = true +cumulus_based = true +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator" + image = "{{CUMULUS_IMAGE}}" + args = ["-lparachain=debug"] + +{% endfor %} diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..2ac5012db668d102b1078b4bf694e98b6c0ff302 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.zndsl @@ -0,0 +1,53 @@ +Description: Validators preserve backwards compatibility with peers speaking an older version of the /req_chunk protocol +Network: ./0014-chunk-fetching-network-compatibility.toml +Creds: config + +# Check authority status. +new: reports node_roles is 4 +old: reports node_roles is 4 + +# Ensure parachains are registered. +new: parachain 2000 is registered within 60 seconds +old: parachain 2000 is registered within 60 seconds +old: parachain 2001 is registered within 60 seconds +new: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress and approval checking works. +new: parachain 2000 block height is at least 15 within 600 seconds +old: parachain 2000 block height is at least 15 within 600 seconds +new: parachain 2001 block height is at least 15 within 600 seconds +old: parachain 2001 block height is at least 15 within 600 seconds + +new: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds +old: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +new: reports polkadot_parachain_approval_checking_finality_lag < 3 +old: reports polkadot_parachain_approval_checking_finality_lag < 3 + +new: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds +old: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds + +# Ensure that there are no failed recoveries. +new: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +old: count of log lines containing "Data recovery from chunks complete" is at least 10 within 300 seconds +new: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +old: count of log lines containing "Data recovery from chunks is not possible" is 0 within 10 seconds +new: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds +old: reports polkadot_parachain_availability_recovery_recoveries_finished{result="failure"} is 0 within 10 seconds + +# Ensure we used the fallback network request. +new: log line contains "Trying the fallback protocol" within 100 seconds + +# Ensure systematic recovery was not used. +old: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds +new: count of log lines containing "Data recovery from systematic chunks complete" is 0 within 10 seconds + +# Ensure availability-distribution worked fine +new: reports polkadot_parachain_fetched_chunks_total{success="succeeded"} is at least 10 within 400 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="succeeded"} is at least 10 within 400 seconds + +new: reports polkadot_parachain_fetched_chunks_total{success="failed"} is 0 within 10 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="failed"} is 0 within 10 seconds + +new: reports polkadot_parachain_fetched_chunks_total{success="not-found"} is 0 within 10 seconds +old: reports polkadot_parachain_fetched_chunks_total{success="not-found"} is 0 within 10 seconds diff --git a/prdoc/pr_2226.prdoc b/prdoc/1.12.0/pr_2226.prdoc similarity index 100% rename from prdoc/pr_2226.prdoc rename to prdoc/1.12.0/pr_2226.prdoc diff --git a/prdoc/pr_3444.prdoc b/prdoc/1.12.0/pr_3444.prdoc similarity index 100% rename from prdoc/pr_3444.prdoc rename to prdoc/1.12.0/pr_3444.prdoc diff --git a/prdoc/pr_3701.prdoc b/prdoc/1.12.0/pr_3701.prdoc similarity index 100% rename from prdoc/pr_3701.prdoc rename to prdoc/1.12.0/pr_3701.prdoc diff --git a/prdoc/pr_3865.prdoc b/prdoc/1.12.0/pr_3865.prdoc similarity index 100% rename from prdoc/pr_3865.prdoc rename to prdoc/1.12.0/pr_3865.prdoc diff --git a/prdoc/pr_3872.prdoc b/prdoc/1.12.0/pr_3872.prdoc similarity index 100% rename from prdoc/pr_3872.prdoc rename to prdoc/1.12.0/pr_3872.prdoc diff --git a/prdoc/1.12.0/pr_3904.prdoc b/prdoc/1.12.0/pr_3904.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..694f9b443877329e02f47d12de4d375b42fefb36 --- /dev/null +++ b/prdoc/1.12.0/pr_3904.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Introduce pallet-delegated-staking + +doc: + - audience: Runtime Dev + description: | + Adds a new pallet `delegated-staking` that allows delegators to delegate their funds to agents who can stake + these funds on behalf of them. This would be used by Nomination Pools to migrate into a delegation staking based + pool. + +crates: + - name: pallet-delegated-staking + bump: patch + - name: pallet-staking + bump: patch + - name: sp-staking + bump: minor diff --git a/prdoc/1.12.0/pr_3962.prdoc b/prdoc/1.12.0/pr_3962.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..7ef59d38ce5cdea9dbf623e71f14c07752d06828 --- /dev/null +++ b/prdoc/1.12.0/pr_3962.prdoc @@ -0,0 +1,12 @@ +title: Change fork calculation algorithm. + +doc: + - audience: Node Dev + description: | + This PR changes the fork calculation and pruning algorithm to enable future block header pruning. + During the finalization of the block we prune known stale forks, so forks are pruned faster. + +crates: + - name: sc-client-api + - name: sc-client-db + - name: sp-blockchain diff --git a/prdoc/pr_3964.prdoc b/prdoc/1.12.0/pr_3964.prdoc similarity index 100% rename from prdoc/pr_3964.prdoc rename to prdoc/1.12.0/pr_3964.prdoc diff --git a/prdoc/pr_4034.prdoc b/prdoc/1.12.0/pr_4034.prdoc similarity index 100% rename from prdoc/pr_4034.prdoc rename to prdoc/1.12.0/pr_4034.prdoc diff --git a/prdoc/1.12.0/pr_4035.prdoc b/prdoc/1.12.0/pr_4035.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0617a6a261898b743d74dc8fa55224012080391b --- /dev/null +++ b/prdoc/1.12.0/pr_4035.prdoc @@ -0,0 +1,24 @@ +title: "Prospective parachains rework" + +doc: + - audience: Node Dev + description: | + Changes prospective-parachains from dealing with trees of unincluded candidates to maintaining only candidate chains + and a number of unconnected candidates (for which we don't yet know the parent candidate but which otherwise seem potentially viable). + This is needed for elastic scaling, in order to have full throughput even if a candidate is validated by a backing group before the parent candidate + is fetched from the other backing group. + Also simplifies the subsystem by no longer allowing parachain cycles. + +crates: + - name: polkadot-node-core-prospective-parachains + bump: major + - name: polkadot-node-core-backing + bump: minor + - name: polkadot-collator-protocol + bump: minor + - name: polkadot-statement-distribution + bump: minor + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-subsystem-util + bump: major diff --git a/prdoc/1.12.0/pr_4091.prdoc b/prdoc/1.12.0/pr_4091.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5c38a344bd8ad1405dd8d1daaf8f16ee7ffaf06b --- /dev/null +++ b/prdoc/1.12.0/pr_4091.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the authority-discovery pallet + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-authority-discovery`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + When accessed outside the pallet, use the getters current_authorities() and next_authorities() instead. + +crates: + - name: pallet-authority-discovery + bump: major diff --git a/prdoc/pr_4102.prdoc b/prdoc/1.12.0/pr_4102.prdoc similarity index 100% rename from prdoc/pr_4102.prdoc rename to prdoc/1.12.0/pr_4102.prdoc diff --git a/prdoc/pr_4157.prdoc b/prdoc/1.12.0/pr_4157.prdoc similarity index 100% rename from prdoc/pr_4157.prdoc rename to prdoc/1.12.0/pr_4157.prdoc diff --git a/prdoc/pr_4175.prdoc b/prdoc/1.12.0/pr_4175.prdoc similarity index 100% rename from prdoc/pr_4175.prdoc rename to prdoc/1.12.0/pr_4175.prdoc diff --git a/prdoc/pr_4185.prdoc b/prdoc/1.12.0/pr_4185.prdoc similarity index 100% rename from prdoc/pr_4185.prdoc rename to prdoc/1.12.0/pr_4185.prdoc diff --git a/prdoc/pr_4202.prdoc b/prdoc/1.12.0/pr_4202.prdoc similarity index 100% rename from prdoc/pr_4202.prdoc rename to prdoc/1.12.0/pr_4202.prdoc diff --git a/prdoc/pr_4211.prdoc b/prdoc/1.12.0/pr_4211.prdoc similarity index 100% rename from prdoc/pr_4211.prdoc rename to prdoc/1.12.0/pr_4211.prdoc diff --git a/prdoc/pr_4213.prdoc b/prdoc/1.12.0/pr_4213.prdoc similarity index 100% rename from prdoc/pr_4213.prdoc rename to prdoc/1.12.0/pr_4213.prdoc diff --git a/prdoc/pr_4220.prdoc b/prdoc/1.12.0/pr_4220.prdoc similarity index 100% rename from prdoc/pr_4220.prdoc rename to prdoc/1.12.0/pr_4220.prdoc diff --git a/prdoc/pr_4281.prdoc b/prdoc/1.12.0/pr_4281.prdoc similarity index 100% rename from prdoc/pr_4281.prdoc rename to prdoc/1.12.0/pr_4281.prdoc diff --git a/prdoc/pr_4295.prdoc b/prdoc/1.12.0/pr_4295.prdoc similarity index 100% rename from prdoc/pr_4295.prdoc rename to prdoc/1.12.0/pr_4295.prdoc diff --git a/prdoc/pr_4301.prdoc b/prdoc/1.12.0/pr_4301.prdoc similarity index 100% rename from prdoc/pr_4301.prdoc rename to prdoc/1.12.0/pr_4301.prdoc diff --git a/prdoc/1.12.0/pr_4302.prdoc b/prdoc/1.12.0/pr_4302.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bb4331f280239ffb3a8ee44d912916f8b23b91cb --- /dev/null +++ b/prdoc/1.12.0/pr_4302.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "migrations: take() should consume read and write operation weight" + +doc: + - audience: Runtime Dev + description: | + `take()` consumes only 1 read worth of weight in `single-block-migrations` example, while `take()` is `get() + kill()`, + i.e should be 1 read + 1 write. Since this could mislead developers writing migrations following the example, + this PR fixes the weight calculation. + +crates: + - name: pallet-example-single-block-migrations + bump: minor diff --git a/prdoc/pr_4311.prdoc b/prdoc/1.12.0/pr_4311.prdoc similarity index 100% rename from prdoc/pr_4311.prdoc rename to prdoc/1.12.0/pr_4311.prdoc diff --git a/prdoc/pr_4312.prdoc b/prdoc/1.12.0/pr_4312.prdoc similarity index 100% rename from prdoc/pr_4312.prdoc rename to prdoc/1.12.0/pr_4312.prdoc diff --git a/prdoc/1.12.0/pr_4326.prdoc b/prdoc/1.12.0/pr_4326.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b448bd7e52e76be92924761b8fd925e9b5083247 --- /dev/null +++ b/prdoc/1.12.0/pr_4326.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: CheckWeight checks for combined extrinsic length and proof size + +doc: + - audience: Runtime Dev + description: | + The `CheckWeight` `SignedExtension` will now perform an additional check. The extension was verifying the extrinsic length and + weight limits individually. However, the proof size dimension of the weight and extrinsic length together are bound by the PoV size limit. + The `CheckWeight` extension will now check that the combined size of the proof and the extrinsic lengths will not + exceed the PoV size limit. + +crates: + - name: frame-system + bump: minor diff --git a/prdoc/pr_4329.prdoc b/prdoc/1.12.0/pr_4329.prdoc similarity index 100% rename from prdoc/pr_4329.prdoc rename to prdoc/1.12.0/pr_4329.prdoc diff --git a/prdoc/pr_4346.prdoc b/prdoc/1.12.0/pr_4346.prdoc similarity index 100% rename from prdoc/pr_4346.prdoc rename to prdoc/1.12.0/pr_4346.prdoc diff --git a/prdoc/1.12.0/pr_4349.prdoc b/prdoc/1.12.0/pr_4349.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..fdc9e816e1b9630cb505f7d8270850ad9450c57e --- /dev/null +++ b/prdoc/1.12.0/pr_4349.prdoc @@ -0,0 +1,9 @@ +title: "Store Header in RemoteExt Snapshot" + +doc: + - audience: Runtime Dev + description: Replaces the block hash in the RemoteExt snapshot with the block header. + +crates: + - name: frame-remote-externalities + bump: major diff --git a/prdoc/pr_4364.prdoc b/prdoc/1.12.0/pr_4364.prdoc similarity index 100% rename from prdoc/pr_4364.prdoc rename to prdoc/1.12.0/pr_4364.prdoc diff --git a/prdoc/pr_4394.prdoc b/prdoc/1.12.0/pr_4394.prdoc similarity index 100% rename from prdoc/pr_4394.prdoc rename to prdoc/1.12.0/pr_4394.prdoc diff --git a/prdoc/pr_4406.prdoc b/prdoc/1.12.0/pr_4406.prdoc similarity index 100% rename from prdoc/pr_4406.prdoc rename to prdoc/1.12.0/pr_4406.prdoc diff --git a/prdoc/1.12.0/pr_4414.prdoc b/prdoc/1.12.0/pr_4414.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..864e816be91a9632859c9b11bf992af5ae7832b4 --- /dev/null +++ b/prdoc/1.12.0/pr_4414.prdoc @@ -0,0 +1,10 @@ +title: "Rococo Asset Hub: undeploy state-trie migration" + +doc: + - audience: Runtime Dev + description: | + The state-trie migration on the Rococo Asset Hub is completed and is now removed. + +crates: + - name: asset-hub-rococo-runtime + bump: major diff --git a/prdoc/1.12.0/pr_4417.prdoc b/prdoc/1.12.0/pr_4417.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5aa72edd066a7de2a8e66ca1464446c503802644 --- /dev/null +++ b/prdoc/1.12.0/pr_4417.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-contracts-mock-network + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-contracts-mock-network`s storage items. + +crates: + - name: pallet-contracts-mock-network + bump: minor diff --git a/prdoc/1.12.0/pr_4426.prdoc b/prdoc/1.12.0/pr_4426.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5beccbd2a57a593b4937b9f9fbd7c3552d80cc2c --- /dev/null +++ b/prdoc/1.12.0/pr_4426.prdoc @@ -0,0 +1,15 @@ +title: "Remove warning about `BadCertificate`" + +doc: + - audience: Node Operator + description: | + The node was printing the following warning from time to time: + ``` + Sending fatal alert BadCertificate + ``` + + This is not an user error and thus, the warning will now not be printed + anymore. + +crates: + - name: sc-cli diff --git a/prdoc/1.12.0/pr_4442.prdoc b/prdoc/1.12.0/pr_4442.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ee6ac29e1a1045c67679d4014b893f3eafa8a9f8 --- /dev/null +++ b/prdoc/1.12.0/pr_4442.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Improve mock relay in --dev mode to support async backing + +doc: + - audience: Node Dev + description: | + Support async backing in --dev mode. Improve the relay mock MockValidationDataInherentDataProvider to mach expectations of async backing runtimes. + +crates: + - name: cumulus-client-parachain-inherent + bump: patch diff --git a/prdoc/1.12.0/pr_4457.prdoc b/prdoc/1.12.0/pr_4457.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5c9bd9822769745adcb916b52e98923fc3e555a5 --- /dev/null +++ b/prdoc/1.12.0/pr_4457.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "sc-service: export all public functions" + +doc: + - audience: Node Dev + description: | + A PR #3166 converted private functions used in `spawn_tasks()` to public to make it possible to have custom + implementation of the `spawn_tasks()`. However, not all functions were included in the list of exports from + `sc-service` crate. + +crates: + - name: sc-service + bump: minor diff --git a/prdoc/1.12.0/pr_4461.prdoc b/prdoc/1.12.0/pr_4461.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2dafa3812878b4a2767a91d5ad2121bd23652310 --- /dev/null +++ b/prdoc/1.12.0/pr_4461.prdoc @@ -0,0 +1,10 @@ +title: Fix extrinsics count logging in frame-system + +doc: + - audience: Runtime Dev + description: | + Fixes the issue of the number of extrinsics in the block always being 0 in the log of frame-system. + +crates: + - name: frame-system + bump: patch diff --git a/prdoc/pr_1644.prdoc b/prdoc/pr_1644.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cc43847fa09b2fee66924b08ea0aa9ff9842b036 --- /dev/null +++ b/prdoc/pr_1644.prdoc @@ -0,0 +1,59 @@ +title: Add availability-recovery from systematic chunks + +doc: + - audience: Node Operator + description: | + Implements https://github.com/polkadot-fellows/RFCs/pull/47. This optimisation is guarded by a configuration bit in + the runtime and will only be enabled once a supermajority of the validators have upgraded to this version. + It's strongly advised to upgrade to this version. + - audience: Node Dev + description: | + Implements https://github.com/polkadot-fellows/RFCs/pull/47 and adds the logic for availability recovery from systematic chunks. + The /req_chunk/1 req-response protocol is now considered deprecated in favour of /req_chunk/2. Systematic recovery is guarded + by a configuration bit in the runtime (bit with index 2 of the node_features field from the HostConfiguration) + and must not be enabled until all (or almost all) validators have upgraded to the node version that includes + this PR. + +crates: + - name: sc-network + bump: minor + - name: polkadot-primitives + bump: minor + - name: cumulus-client-pov-recovery + bump: none + - name: polkadot-overseer + bump: none + - name: polkadot-node-primitives + bump: major + - name: polkadot-erasure-coding + bump: major + - name: polkadot-node-jaeger + bump: major + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-network-protocol + bump: major + - name: polkadot-service + bump: major + - name: polkadot-node-subsystem-util + bump: major + - name: polkadot-availability-distribution + bump: major + - name: polkadot-availability-recovery + bump: major + - name: polkadot-node-core-approval-voting + bump: minor + - name: polkadot-node-core-av-store + bump: major + - name: polkadot-network-bridge + bump: minor + - name: polkadot-node-core-backing + bump: none + - name: polkadot-node-core-bitfield-signing + bump: none + - name: polkadot-node-core-dispute-coordinator + bump: none + - name: cumulus-relay-chain-minimal-node + bump: minor + - name: polkadot + bump: minor diff --git a/prdoc/pr_3393.prdoc b/prdoc/pr_3393.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..27ebb385930324f0b0821af3d3ac9d3a6d98f531 --- /dev/null +++ b/prdoc/pr_3393.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add `MaxRank` Config to `pallet-core-fellowship` + +doc: + - audience: Runtime User + description: | + This PR adds a new Config `MaxRank` to the core fellowship pallet. Initially, the maximum rank was set to IX (Grand Master) on the core-fellowship pallet, corresponding to the establishment of the Technical Fellowship and setting the default member count to nine. However, with the introduction of new collectives, this maximum rank is expected to evolve. + +crates: + - name: pallet-core-fellowship diff --git a/prdoc/pr_3905.prdoc b/prdoc/pr_3905.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d1c03650c9b2365d8f33345fbcef98b4f3885a51 --- /dev/null +++ b/prdoc/pr_3905.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Allows Nomination Pool to use different staking strategies including a new DelegateStake strategy. + +doc: + - audience: Runtime Dev + description: | + This PR introduces a new staking strategy called `DelegateStake`. This strategy allows the nomination pool to + delegate its stake to a validator, that is, funds are locked in user account itself instead of being transferred + to the pool account. Includes migration of pools to this strategy for Westend. + +crates: + - name: pallet-nomination-pools + bump: major + - name: pallet-nomination-pools-benchmarking + bump: major + - name: sp-staking + bump: patch + - name: pallet-staking + bump: patch + - name: pallet-delegated-staking + bump: patch + - name: westend-runtime + bump: major diff --git a/prdoc/pr_3935.prdoc b/prdoc/pr_3935.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..93b0fba5d99b47fa918a1100e644ea32f5cc4aa8 --- /dev/null +++ b/prdoc/pr_3935.prdoc @@ -0,0 +1,30 @@ +title: "Introduce Polkadot-SDK umbrella crate" + +doc: + - audience: Runtime Dev + description: | + Introduces a new "umbrella" crate that re-exports all published crates of the Polkadot-SDK. + This helps developers to select a valid set of versions for all underlying dependencies. + + You can now use this create and remove lots of dependencies from your runtime and node crates. + The `staging-node-cli` and `kitchensink-runtime` both adopt this pattern as an example. + + Full docs in `docs/sdk/src/reference_docs/umbrella_crate.rs`. + +crates: + - name: cumulus-pallet-parachain-system + bump: patch + - name: sc-chain-spec-derive + bump: patch + - name: frame-election-provider-solution-type + bump: patch + - name: pallet-staking-reward-curve + bump: patch + - name: frame-support-procedural-tools + bump: patch + - name: sp-api-proc-macro + bump: patch + - name: polkadot-runtime-parachains + bump: patch + - name: polkadot-sdk + bump: major diff --git a/prdoc/pr_3952.prdoc b/prdoc/pr_3952.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2401adbb76c13cbee7c467df1a0284fc9b4fcaa4 --- /dev/null +++ b/prdoc/pr_3952.prdoc @@ -0,0 +1,35 @@ +title: Storage bound the XCMP queue pallet + +doc: + - audience: Runtime Dev + description: | + Enforce upper limits for the number of active XCMP channels, the number of outgoing XCMP + messages per channel and the number of signals per channel. + + ## Integration + + If you see this error in your try-runtime-cli: + ```pre + Max message size for channel is too large. This means that the V5 migration can be front-run and an + attacker could place a large message just right before the migration to make other messages un-decodable. + Please either increase `MaxPageSize` or decrease the `max_message_size` for this channel. Channel max: + 102400, MaxPageSize: 65535 + ``` + + Then increase the `MaxPageSize` of the `cumulus_pallet_xcmp_queue` to something like this: + ```rust + type MaxPageSize = ConstU32<{ 103 * 1024 }>; + ``` + +migrations: + db: [] + + runtime: + - reference: cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5 + description: A No-OP migration is deployed to ensure that all `BoundedVec`s` still decode as expected. + +crates: + - name: cumulus-pallet-xcmp-queue + bump: major + +host_functions: [] diff --git a/prdoc/pr_4131.prdoc b/prdoc/pr_4131.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b0619eabe13b575cc6aa10f7215e68a57698b159 --- /dev/null +++ b/prdoc/pr_4131.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Deprecate XCMv2 + +doc: + - audience: Runtime Dev + description: | + XCMv2 has been deprecated. It will be removed when XCMv5 is released. + Use version 3 or 4 instead. + - audience: Runtime User + description: | + XCMv2 has been deprecated. It will be removed when XCMv5 is released. + Use version 3 or 4 instead. + +crates: +- name: staging-xcm + bump: minor +- name: xcm-procedural + bump: minor +- name: staging-xcm-builder + bump: minor +- name: pallet-xcm + bump: minor +- name: cumulus-pallet-xcmp-queue + bump: minor diff --git a/prdoc/pr_4198.prdoc b/prdoc/pr_4198.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cff956812606fc18a500a9cf0de0cc7252955283 --- /dev/null +++ b/prdoc/pr_4198.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Replace `Multiaddr` & related types with substrate-specific types + +doc: + - audience: Node Dev + description: | + Introduce custom types / substrate wrappers for `Multiaddr`, `multiaddr::Protocol`, + `Multihash`, `ed25519::*` and supplementary types like errors and iterators. + + Common code in substrate uses these custom types, while `libp2p` & `litep2p` network + backends use their corresponding libraries types. + + This is needed to independently upgrade `libp2p` & `litep2p` dependencies. + +crates: + - name: sc-network-types + bump: minor + - name: sc-network + bump: minor + - name: sc-network-sync + bump: minor + - name: sc-authority-discovery + bump: minor + - name: sc-cli + bump: patch + - name: sc-mixnet + bump: patch + - name: sc-telemetry + bump: patch diff --git a/prdoc/pr_4233.prdoc b/prdoc/pr_4233.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c593fec68a66a133b2ee7db8a08eea38a9a05883 --- /dev/null +++ b/prdoc/pr_4233.prdoc @@ -0,0 +1,14 @@ +title: "[pallet_contracts] Update Host fn benchnmarks" + +doc: + - audience: Runtime Dev + description: | + Update how the host functions are benchmarked. + Instead of benchnarking a contract that calls the host functions, we now benchmark the host functions directly. + +crates: + - name: pallet-contracts + bump: minor + - name: pallet-contracts-proc-macro + bump: minor + diff --git a/prdoc/pr_4249.prdoc b/prdoc/pr_4249.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1a267e263924b7f5d0df62d1e1af6800e3ad255f --- /dev/null +++ b/prdoc/pr_4249.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Moves runtime macro out of experimental flag + +doc: + - audience: Runtime Dev + description: | + Now that the runtime macro (Construct Runtime V2) has been successfully deployed on Westend, + this PR moves it out of the experimental feature flag and makes it generally available for + runtime devs. + +crates: + - name: frame-support + bump: minor + - name: frame-support-procedural + bump: minor diff --git a/prdoc/pr_4274.prdoc b/prdoc/pr_4274.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..77f5d1387cf7bfc19cd548dbed1c7d7031e39c73 --- /dev/null +++ b/prdoc/pr_4274.prdoc @@ -0,0 +1,39 @@ +title: Introduce `CheckMetadataHash` signed extension + +doc: + - audience: Runtime Dev + description: | + Introduces the new `CheckMetadataHash` signed extension. This extension can be added to a + runtime to support verifying the metadata hash as described in + [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + This removes the requirement for having a metadata portal and in general a centralized + authentication of the metadata. With this signed extension the runtime is able to verify + that the metadata used by the wallet was correct. This is mainly useful for offline wallets + which users need to trust any way, not that useful for online wallets. + + There is a guide `generate_metadata_hash` for how to integrate this into a runtime that + should make it quite easy to integrate the signed extension. + - audience: Runtime User + description: | + This brings support for the new Ledger app and similar hardware wallets. These hardware + wallets will be able to decode the transaction using the metadata. The runtime will + ensure that the metadata used for this decoding process is correct and that the online + wallet did not tried to trick you. + +crates: + - name: substrate-wasm-builder + bump: minor + - name: sc-executor-wasmtime + bump: patch + - name: frame-metadata-hash-extension + bump: major + - name: polkadot-service + bump: none + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major diff --git a/prdoc/pr_4339.prdoc b/prdoc/pr_4339.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..634ccfa1a339df615db5957729c7833c972047ac --- /dev/null +++ b/prdoc/pr_4339.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Improving on_demand_assigner emitted events + +doc: + - audience: Runtime User + description: | + Registering OnDemandOrderPlaced event that is useful for indexers to save data related to on demand orders. Adds SpotPriceSet as a new event to monitor on-demand spot prices. It updates whenever the price changes due to traffic. + +crates: + - name: polkadot-runtime-parachains + bump: minor \ No newline at end of file diff --git a/prdoc/pr_4380.prdoc b/prdoc/pr_4380.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1420409656b320c390ab05eecb15df0eca9e3432 --- /dev/null +++ b/prdoc/pr_4380.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `parametrized-consensus-hook` feature + +doc: + - audience: Runtime Dev + description: | + `parametrized-consensus-hook` feature is obsolete and is removed by this PR. The + long-deprecated `CheckInherents` trait is set to be removed by September 2024. + +crates: + - name: cumulus-pallet-parachain-system + bump: major + diff --git a/prdoc/pr_4392.prdoc b/prdoc/pr_4392.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..898ce9be069e20a09ccbe1ee4a85bcec36893f02 --- /dev/null +++ b/prdoc/pr_4392.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `pallet::getter` usage from both bounties and child bounties pallet + +doc: + - audience: Runtime Dev + description: | + This PR removes `pallet::getter`s from `pallet-bounties` and `pallet-child-bounties`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-bounties + bump: major + - name: pallet-child-bounties + bump: major diff --git a/prdoc/pr_4410.prdoc b/prdoc/pr_4410.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1dc1d4c1f87a728ef0edb60d7334ba97a59f539d --- /dev/null +++ b/prdoc/pr_4410.prdoc @@ -0,0 +1,37 @@ +title: "[sc-chain-spec] Remove deprecated code" + +doc: + - audience: Node Dev + description: | + The RuntimeGenesisConfig generic type parameter was removed from GenericChainSpec struct. + ChainSpec::from_genesis method was removed. + Removed related deprecated code from `sc-chain-spec`. + This change simplifies the codebase and ensures the use of up-to-date definitions. + +crates: + - name: sc-service + bump: minor + - name: minimal-template-node + bump: minor + - name: sc-cli + bump: patch + - name: polkadot-test-service + bump: major + - name: sc-service-test + bump: major + - name: staging-node-cli + bump: major + - name: parachain-template-node + bump: minor + - name: solochain-template-node + bump: minor + - name: polkadot-parachain-bin + bump: major + - name: polkadot-service + bump: major + - name: sc-consensus-grandpa + bump: patch + - name: cumulus-test-service + bump: minor + - name: sc-chain-spec + bump: major diff --git a/prdoc/pr_4465.prdoc b/prdoc/pr_4465.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cbeff09f871f00d5ce3e646fa4adfb11148971a2 --- /dev/null +++ b/prdoc/pr_4465.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridge: added force_set_pallet_state call to pallet-bridge-grandpa" + +doc: + - audience: Runtime Dev + description: | + Added `force_set_pallet_state` to the `pallet-bridge-grandpa`. It is only callable by the + root (governance or sudo) and may be used to update current authorities set and the best + finalized header without any additional checks. + +crates: + - name: pallet-bridge-grandpa + bump: major + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_4471.prdoc b/prdoc/pr_4471.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..6d589be81fd96904f0c5c76abccb1c9b5acd920d --- /dev/null +++ b/prdoc/pr_4471.prdoc @@ -0,0 +1,16 @@ +title: "Remove prospective-parachains subsystem from collator nodes" + +doc: + - audience: Node Dev + description: | + Removes the prospective-parachains subsystem from collators. The GetMinimumRelayParents of the implicit view + is replaced by direct ChainAPI and runtime calls. The subsystem was causing performance problems when collating + connected to an RPC node, due to the high number of runtime API calls, which were unneccessary for a collator. + +crates: + - name: polkadot-collator-protocol + bump: minor + - name: polkadot-service + bump: minor + - name: polkadot-node-subsystem-util + bump: minor diff --git a/prdoc/pr_4472.prdoc b/prdoc/pr_4472.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cd7527d73d6ba3689d9623c09a591ed61ab5da42 --- /dev/null +++ b/prdoc/pr_4472.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Remove `pallet::getter` usage from pallet-democracy + +doc: + - audience: Runtime Dev + description: | + This PR removes the `pallet::getter`s from `pallet-democracy`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-democracy + bump: major diff --git a/prdoc/pr_4475.prdoc b/prdoc/pr_4475.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..30093dcd32b89d8f34c00154ee73fc60ced0b00a --- /dev/null +++ b/prdoc/pr_4475.prdoc @@ -0,0 +1,10 @@ +title: "Deprecate dmp-queue pallet" + +doc: + - audience: Runtime Dev + description: | + Schedule the DMP queue pallet for deletion. It is not needed anymore sine https://github.com/paritytech/polkadot-sdk/pull/1246. + +crates: + - name: cumulus-pallet-dmp-queue + bump: minor diff --git a/prdoc/pr_4478.prdoc b/prdoc/pr_4478.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..22e2e43db4caff9d405ca0ca8c4915416e7df1f5 --- /dev/null +++ b/prdoc/pr_4478.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Snowbridge - Ethereum Client - Reject finalized updates without a sync committee in next store period + +doc: + - audience: Runtime Dev + description: | + Bug fix in the Ethereum light client that stalls the light client when an update in the next sync committee period is received without receiving the next sync committee update in the next period. + +crates: + - name: snowbridge-pallet-ethereum-client + bump: patch diff --git a/prdoc/pr_4503.prdoc b/prdoc/pr_4503.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d95a24cc7d6b6281761d81b4ab2ccdc8b14e4550 --- /dev/null +++ b/prdoc/pr_4503.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Patch pool to handle extra consumer ref when destroying. + +doc: + - audience: Runtime User + description: | + An erroneous consumer reference on the pool account is preventing pools from being destroyed. This patch removes the extra reference if it exists when the pool account is destroyed. + +crates: + - name: pallet-nomination-pools + bump: patch diff --git a/prdoc/pr_4510.prdoc b/prdoc/pr_4510.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..fbd9bf961fe902b9e38a1cba1a3393764b6f3ccf --- /dev/null +++ b/prdoc/pr_4510.prdoc @@ -0,0 +1,13 @@ +title: "[Contracts] Remove internal topic index" + +doc: + - audience: Runtime Dev + description: | + This PR removes topics from internal events emitted by pallet_contracts. It does not touch the `deposit_event` host function used by + smart contracts that can still include topics. + Event topics incurs significant Storage costs, and are only used by light clients to index events and avoid downloading the entire block. + They are not used by Dapp or Indexers that download the whole block anyway. + +crates: + - name: pallet-contracts + bump: patch diff --git a/prdoc/pr_4514.prdoc b/prdoc/pr_4514.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..dab6cab303476f32bf0c17836e2b770b5260c0f6 --- /dev/null +++ b/prdoc/pr_4514.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-fast-unstake + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-fast-unstake`. + The syntax `StorageItem::::get()` should be used instead. + +crates: + - name: pallet-fast-unstake + bump: major diff --git a/prdoc/pr_4521.prdoc b/prdoc/pr_4521.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a8b42a2c7ee3fd5f4beca6c2aede7a4a82a8a1b1 --- /dev/null +++ b/prdoc/pr_4521.prdoc @@ -0,0 +1,28 @@ +title: AdaptPrice trait is now price controlled + +doc: + - audience: Runtime Dev + description: | + The broker pallet price adaptation interface is changed to be less opinionated and more + information is made available to the `AdaptPrice` trait. A new example impl is included which + adapts the price based not on the number of cores sold, but rather on the price that was + achieved during the sale to mitigate a potential price manipulation vector. More information + here: + + https://github.com/paritytech/polkadot-sdk/issues/4360 + + - audience: Runtime User + description: | + The price controller of the Rococo and Westend Coretime chain will be + adjusted with this release. This will very likely be used in the + fellowship production runtime to have a much larger leadin. This fixes a + price manipulation issue we discovered with the Kusama launch. + +crates: + - name: pallet-broker + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + diff --git a/prdoc/pr_4533.prdoc b/prdoc/pr_4533.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..a0835285fc012cb0018e499f87e380f7ac059999 --- /dev/null +++ b/prdoc/pr_4533.prdoc @@ -0,0 +1,10 @@ +title: "Fixed RPC subscriptions leak when subscription stream is finished" + +doc: + - audience: Node Operator + description: | + The node may leak RPC subscriptions in some cases, e.g. during + `author_submitAndWatchExtrinsic` calls. This PR fixes the issue. + +crates: + - name: sc-rpc diff --git a/prdoc/pr_4534.prdoc b/prdoc/pr_4534.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..417e4d3dace01e2e840a074bf9aac8615dbb5f1a --- /dev/null +++ b/prdoc/pr_4534.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add Extra Check in Primary Username Setter + +doc: + - audience: Runtime User + description: | + Setting primary usernames requires an additional verification. + +crates: + - name: pallet-identity + bump: patch diff --git a/prdoc/pr_4537.prdoc b/prdoc/pr_4537.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0148c95fb4e8bcddf530b66f66ffba9c6fdaec1e --- /dev/null +++ b/prdoc/pr_4537.prdoc @@ -0,0 +1,27 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime apis to help with delegate-stake based Nomination Pools. + +doc: + - audience: Runtime User + description: | + Introduces a new set of runtime apis to facilitate dapps and wallets to integrate with delegate-stake + functionalities of Nomination Pools. These apis support pool and member migration, as well as lazy application of + pending slashes of the pool members. + +crates: + - name: pallet-nomination-pools + bump: minor + - name: westend-runtime + bump: minor + - name: kitchensink-runtime + bump: minor + - name: pallet-delegated-staking + bump: minor + - name: sp-staking + bump: minor + - name: pallet-nomination-pools-benchmarking + bump: patch + - name: pallet-nomination-pools-runtime-api + bump: minor diff --git a/prdoc/pr_4541.prdoc b/prdoc/pr_4541.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..815ea2c800625f78e1f92d153a0f3da664508829 --- /dev/null +++ b/prdoc/pr_4541.prdoc @@ -0,0 +1,16 @@ +title: "Remove warning about `BadCertificate` Version 2" + +doc: + - audience: Node Operator + description: | + The node was printing the following warning from time to time: + ``` + Sending fatal alert BadCertificate + ``` + + This is not an user error and thus, the warning will now not be printed + anymore. + +crates: + - name: sc-tracing + bump: patch diff --git a/prdoc/pr_4542.prdoc b/prdoc/pr_4542.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..faaf9dc2c288551ec84b7bcd739158dd817341e6 --- /dev/null +++ b/prdoc/pr_4542.prdoc @@ -0,0 +1,13 @@ +title: "Adds ability to specify chain type in chain-spec-builder" + +doc: + - audience: Node Operator + description: | + Currently, `chain-spec-builder` only creates a spec with Live chain type. This PR adds the + ability to specify it while keeping the same default. + +crates: + - name: staging-chain-spec-builder + bump: patch + - name: sc-chain-spec + bump: patch diff --git a/prdoc/pr_4555.prdoc b/prdoc/pr_4555.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..257115d236e76756007281a0b64d7a73fa2fdff9 --- /dev/null +++ b/prdoc/pr_4555.prdoc @@ -0,0 +1,11 @@ +title: Move `para_id` to `MockValidationDataInherentDataProvider` + +doc: + - audience: Node Dev + description: | + This moves the `para_id` from `MockXcmConfig` to `MockValidationDataInherentDataProvider` to make it more prominent. The `para_id` should + be set to the parachain id of the parachain that gets mocked to ensure that the relay chain storage proof is setup correctly etc. + +crates: + - name: cumulus-client-parachain-inherent + bump: major diff --git a/prdoc/pr_4571.prdoc b/prdoc/pr_4571.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..b03fee8a5cc8746f9ae569a6ffd4902f1da4a510 --- /dev/null +++ b/prdoc/pr_4571.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Ignore mandatory extrinsics in total PoV size check + +doc: + - audience: Runtime Dev + description: | + The `CheckWeight` extension is checking that extrinsic length and used storage proof + weight together do not exceed the PoV size limit. This lead to problems when + the PoV size was already reached before mandatory extrinsics were applied.The `CheckWeight` + extension will now allow extrinsics of `DispatchClass::Mandatory` to be applied even if + the limit is reached. + +crates: + - name: frame-system + bump: minor + - name: polkadot-sdk + bump: minor diff --git a/prdoc/pr_4595.prdoc b/prdoc/pr_4595.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8baa6e8a91f3c54c50a5f20b2e03e812a8857162 --- /dev/null +++ b/prdoc/pr_4595.prdoc @@ -0,0 +1,25 @@ +title: "Remove `elastic-scaling-experimental` feature flag" + +doc: + - audience: Node Dev + description: | + The feature was masking the ability of collators to respond with `CollationWithParentHeadData` + to validator collation fetch requests, a requirement for elastic scaling. + Please note that `CollationWithParentHeadData` is only sent by collators of parachains with + multiple cores assigned, otherwise collators must respond with `CollationFetchingResponse::Collation` + - audience: Node Operator + description: | + This change enables elastic scaling support in collators. Please upgrade to latest version, + otherwise validator nodes will not be able to back elastic parachain blocks leading to + missed rewards. + +crates: + - name: polkadot-collator-protocol + bump: major + validate: false + - name: polkadot-service + bump: major + validate: false + - name: polkadot-parachain-bin + bump: minor + validate: false diff --git a/prdoc/pr_4621.prdoc b/prdoc/pr_4621.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ebc06b92b39c3bf2956f0db71a790acd08f291a9 --- /dev/null +++ b/prdoc/pr_4621.prdoc @@ -0,0 +1,43 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Change XcmDryRunApi::dry_run_extrinsic to take a call instead + +doc: + - audience: Runtime User + description: | + The XcmDryRunApi now dry-run calls instead of extrinsics. + This means it's possible to dry-run an extrinsic before signing it, + allowing for seamless dry-running in dapps. + Additionally, calls can now be dry-run for different accounts. + - audience: Runtime Dev + description: | + The XcmDryRunApi::dry_run_extrinsic function was replaced by + XcmDryRunApi::dry_run_call. + This new function takes an origin (OriginCaller, the encodable inner variant) + and a call instead of an extrinsic. + This was needed to not require the user signing twice, once for the dry-run and + a second time to actually submit the extrinsic. + Additionally, calls can now be dry-run for different accounts. + The implementation for this runtime API is now simpler, being `call.dispatch(origin.into())` + instead of using the `Executive`. + +crates: + - name: xcm-fee-payment-runtime-api + bump: major + - name: penpal-runtime + bump: major + - name: xcm-emulator + bump: minor + - name: polkadot-service + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: pallet-xcm + bump: minor diff --git a/prdoc/pr_4634.prdoc b/prdoc/pr_4634.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0c16dedeae16e4cba3b0e69029abaf213771aade --- /dev/null +++ b/prdoc/pr_4634.prdoc @@ -0,0 +1,34 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement XcmPaymentApi and DryRunApi on all system parachains + +doc: + - audience: Runtime User + description: | + The new XcmPaymentApi and DryRunApi have been implement on all westend and rococo system parachains. + You can test them out. + - audience: Runtime Dev + description: | + The new XcmPaymentApi and DryRunApi have been implement on all westend and rococo system parachains. + These can be used to build UIs that estimate XCM execution and sending, using libraries like PAPI or PJS. + +crates: + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: contracts-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor diff --git a/prdoc/pr_4645.prdoc b/prdoc/pr_4645.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1bc65f02ea578dace067b21f05cc298d448dffb4 --- /dev/null +++ b/prdoc/pr_4645.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: make all storage items in parachain-system public + +doc: + - audience: Runtime Dev + description: | + All storage items in cumulus-pallet-parachain-systemare now public. This allows + the usage of these storage items from within other runtime-pallets + or the runtime itself. For instance, it should allow to read the latests + relay state proof to read a certain well-known-key. + +crates: + - name: cumulus-pallet-parachain-system + bump: minor \ No newline at end of file diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json index 294005f209d57d719816a40132ea7f5d49b3d029..e6c0468aaf8517956501324dc4ba087b444bb424 100644 --- a/prdoc/schema_user.json +++ b/prdoc/schema_user.json @@ -218,7 +218,7 @@ }, "doc": { "type": "object", - "description": "You have the the option to provide different description of your PR for different audiences.", + "description": "You have the option to provide different description of your PR for different audiences.", "additionalProperties": false, "properties": { "audience": { diff --git a/scripts/generate-umbrella.py b/scripts/generate-umbrella.py new file mode 100644 index 0000000000000000000000000000000000000000..0bdf160e63b176c6981ec18d8b72ee8782dd0e8b --- /dev/null +++ b/scripts/generate-umbrella.py @@ -0,0 +1,204 @@ +""" + +Creates the Polkadot-SDK umbrella crate that re-exports all other crates. + +This re-creates the `umbrella/` folder. Ensure that it does not contain any changes you want to keep. + +Usage: + python3 polkadot-sdk-umbrella-crate.py --sdk --version + +Example: + python3 polkadot-sdk-umbrella-crate.py --sdk ../polkadot-sdk --version 1.11.0 +""" + +import argparse +import os +import re +import toml +import shutil + +from cargo_workspace import Workspace + +""" +Crate names that should be excluded from the umbrella crate. +""" +def exclude(crate): + name = crate.name + if crate.metadata.get("polkadot-sdk.skip-umbrella", False): + return True + + # No fuzzers or examples: + if "example" in name or name.endswith("fuzzer"): + return True + # No runtime crates: + if name.endswith("-runtime"): + # Note: this is a bit hacky. We should use custom crate metadata instead. + return name != "sp-runtime" and name != "bp-runtime" and name != "frame-try-runtime" + + return False + +def main(path, version): + delete_umbrella(path) + workspace = Workspace.from_path(path) + print(f'Indexed {workspace}') + + std_crates = [] # name -> path. use list for sorting + nostd_crates = [] + for crate in workspace.crates: + if crate.name == 'polkadot-sdk': + continue + if not crate.publish: + print(f"Skipping {crate.name} as it is not published") + continue + + lib_path = os.path.dirname(crate.abs_path) + manifest_path = os.path.join(lib_path, "Cargo.toml") + lib_path = os.path.join(lib_path, "src", "lib.rs") + path = os.path.dirname(crate.rel_path) + + # Guess which crates support no_std. Proc-macro crates are always no_std: + with open(manifest_path, "r") as f: + manifest = toml.load(f) + if 'lib' in manifest and 'proc-macro' in manifest['lib']: + if manifest['lib']['proc-macro']: + nostd_crates.append((crate, path)) + continue + + # Crates without a lib.rs cannot be no_std + if not os.path.exists(lib_path): + print(f"Skipping {crate.name} as it does not have a 'src/lib.rs'") + continue + if exclude(crate): + print(f"Skipping {crate.name} as it is in the exclude list") + continue + + # No search for a no_std attribute: + with open(lib_path, "r") as f: + content = f.read() + if "#![no_std]" in content or '#![cfg_attr(not(feature = "std"), no_std)]' in content: + nostd_crates.append((crate, path)) + elif 'no_std' in content: + raise Exception(f"Found 'no_std' in {lib_path} without knowing how to handle it") + else: + std_crates.append((crate, path)) + + # Sort by name + std_crates.sort(key=lambda x: x[0].name) + nostd_crates.sort(key=lambda x: x[0].name) + all_crates = std_crates + nostd_crates + all_crates.sort(key=lambda x: x[0].name) + dependencies = {} + + for (crate, path) in nostd_crates: + dependencies[crate.name] = {"path": f"../{path}", "default-features": False, "optional": True} + + for (crate, path) in std_crates: + dependencies[crate.name] = {"path": f"../{path}", "default-features": False, "optional": True} + + # The empty features are filled by Zepter + features = { + "default": [ "std" ], + "std": [], + "runtime-benchmarks": [], + "try-runtime": [], + "serde": [], + "experimental": [], + "with-tracing": [], + "runtime": list([f"{d.name}" for d, _ in nostd_crates]), + "node": ["std"] + list([f"{d.name}" for d, _ in std_crates]), + "tuples-96": [], + } + + manifest = { + "package": { + "name": "polkadot-sdk", + "version": version, + "edition": { "workspace": True }, + "authors": { "workspace": True }, + "description": "Polkadot SDK umbrella crate.", + "license": "Apache-2.0", + "metadata": { "docs": { "rs": { + "features": ["runtime", "node"], + "targets": ["x86_64-unknown-linux-gnu"] + }}} + }, + "dependencies": dependencies, + "features": features, + } + + umbrella_dir = os.path.join(workspace.path, "umbrella") + manifest_path = os.path.join(umbrella_dir, "Cargo.toml") + lib_path = os.path.join(umbrella_dir, "src", "lib.rs") + # create all dir + os.makedirs(os.path.dirname(lib_path), exist_ok=True) + # Write the manifest + with open(manifest_path, "w") as f: + toml_manifest = toml.dumps(manifest) + f.write(toml_manifest) + print(f"Wrote {manifest_path}") + # and the lib.rs + with open(lib_path, "w") as f: + f.write('''// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Polkadot SDK umbrella crate re-exporting all other published crates. +//! +//! This helps to set a single version number for all your dependencies. Docs are in the +//! `polkadot-sdk-docs` crate. + +// This file is auto-generated and checked by the CI. You can edit it manually, but it must be +// exactly the way that the CI expects it. +''') + + for crate, _ in all_crates: + use = crate.name.replace("-", "_") + desc = crate.description if crate.description.endswith(".") else crate.description + "." + f.write(f'\n/// {desc}') + f.write(f'\n#[cfg(feature = "{crate.name}")]\n') + f.write(f"pub use {use};\n") + + print(f"Wrote {lib_path}") + + add_to_workspace(workspace.path) + +""" +Delete the umbrella folder and remove the umbrella crate from the workspace. +""" +def delete_umbrella(path): + umbrella_dir = os.path.join(path, "umbrella") + # remove the umbrella crate from the workspace + manifest = os.path.join(path, "Cargo.toml") + manifest = open(manifest, "r").read() + manifest = re.sub(r'\s+"umbrella",\n', "", manifest) + with open(os.path.join(path, "Cargo.toml"), "w") as f: + f.write(manifest) + if os.path.exists(umbrella_dir): + print(f"Deleting {umbrella_dir}") + shutil.rmtree(umbrella_dir) + +""" +Create the umbrella crate and add it to the workspace. +""" +def add_to_workspace(path): + manifest = os.path.join(path, "Cargo.toml") + manifest = open(manifest, "r").read() + manifest = re.sub(r'^members = \[', 'members = [\n "umbrella",', manifest, flags=re.M) + with open(os.path.join(path, "Cargo.toml"), "w") as f: + f.write(manifest) + + os.chdir(path) # hack + os.system("cargo metadata --format-version 1 > /dev/null") # update the lockfile + os.system(f"zepter") # enable the features + os.system(f"taplo format --config .config/taplo.toml Cargo.toml umbrella/Cargo.toml") + +def parse_args(): + parser = argparse.ArgumentParser(description="Create a polkadot-sdk crate") + parser.add_argument("--sdk", type=str, default="polkadot-sdk", help="Path to the polkadot-sdk crate") + parser.add_argument("--version", type=str, help="Version of the polkadot-sdk crate") + return parser.parse_args() + +if __name__ == "__main__": + args = parse_args() + main(args.sdk, args.version) diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 050004acc78f185ddca3c7637e7ca944319fdc7e..929cd6a29e3889dbd93d6e31277406cf61176114 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -23,13 +23,10 @@ wasm-opt = false targets = ["x86_64-unknown-linux-gnu"] [badges] -travis-ci = { repository = "paritytech/substrate" } maintenance = { status = "actively-developed" } -is-it-maintained-issue-resolution = { repository = "paritytech/substrate" } -is-it-maintained-open-issues = { repository = "paritytech/substrate" } +is-it-maintained-issue-resolution = { repository = "paritytech/polkadot-sdk" } +is-it-maintained-open-issues = { repository = "paritytech/polkadot-sdk" } -# The same node binary as the `substrate` (defined in the workspace `Cargo.toml`) binary, -# but just exposed by this crate here. [[bin]] name = "substrate-node" path = "bin/main.rs" @@ -40,99 +37,30 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -array-bytes = "6.2.2" +array-bytes = "6.1" clap = { version = "4.5.3", features = ["derive"], optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.30" log = { workspace = true, default-features = true } rand = "0.8" +serde_json = { workspace = true, default-features = true } + +# The Polkadot-SDK: +polkadot-sdk = { path = "../../../../umbrella", features = ["node"] } -# primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../primitives/consensus/beefy" } -grandpa-primitives = { package = "sp-consensus-grandpa", path = "../../../primitives/consensus/grandpa" } -sp-api = { path = "../../../primitives/api" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-genesis-builder = { path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-keystore = { path = "../../../primitives/keystore" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-transaction-storage-proof = { path = "../../../primitives/transaction-storage-proof" } -sp-io = { path = "../../../primitives/io" } -sp-mixnet = { path = "../../../primitives/mixnet" } -sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } -sp-statement-store = { path = "../../../primitives/statement-store" } - -# client dependencies -sc-client-api = { path = "../../../client/api" } -sc-chain-spec = { path = "../../../client/chain-spec" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-transaction-pool = { path = "../../../client/transaction-pool" } -sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } -sc-statement-store = { path = "../../../client/statement-store" } -sc-network = { path = "../../../client/network" } -sc-network-common = { path = "../../../client/network/common" } -sc-network-sync = { path = "../../../client/network/sync" } -sc-network-statement = { path = "../../../client/network/statement" } -sc-consensus-slots = { path = "../../../client/consensus/slots" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -beefy = { package = "sc-consensus-beefy", path = "../../../client/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" } -mmr-gadget = { path = "../../../client/merkle-mountain-range" } -sc-rpc = { path = "../../../client/rpc" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-service = { path = "../../../client/service", default-features = false } -sc-telemetry = { path = "../../../client/telemetry" } -sc-executor = { path = "../../../client/executor" } -sc-authority-discovery = { path = "../../../client/authority-discovery" } -sc-mixnet = { path = "../../../client/mixnet" } -sc-sync-state-rpc = { path = "../../../client/sync-state-rpc" } -sc-sysinfo = { path = "../../../client/sysinfo" } -sc-storage-monitor = { path = "../../../client/storage-monitor" } -sc-offchain = { path = "../../../client/offchain" } - -# frame dependencies -frame-benchmarking = { path = "../../../frame/benchmarking" } -frame-system = { path = "../../../frame/system" } -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } -pallet-assets = { path = "../../../frame/assets" } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-im-online = { path = "../../../frame/im-online", default-features = false } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } - -# node-specific dependencies +# Shared code between the staging node and kitchensink runtime: kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } - -# CLI-specific dependencies -sc-cli = { path = "../../../client/cli", optional = true } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } -serde_json = { workspace = true, default-features = true } [dev-dependencies] -sc-keystore = { path = "../../../client/keystore" } -sc-client-db = { path = "../../../client/db" } -sc-consensus = { path = "../../../client/consensus/common" } -sc-consensus-babe = { path = "../../../client/consensus/babe" } -sc-consensus-epochs = { path = "../../../client/consensus/epochs" } -sc-service-test = { path = "../../../client/service/test" } -sc-block-builder = { path = "../../../client/block-builder" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } futures = "0.3.30" tempfile = "3.1.0" assert_cmd = "2.0.2" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } regex = "1.6.0" platforms = "3.0" soketto = "0.7.1" @@ -140,94 +68,39 @@ criterion = { version = "0.5.1", features = ["async_tokio"] } tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } tokio-util = { version = "0.7.4", features = ["compat"] } wait-timeout = "0.2" -substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } -pallet-timestamp = { path = "../../../frame/timestamp" } -substrate-cli-test-utils = { path = "../../../test-utils/cli" } - wat = "1.0" -frame-support = { path = "../../../frame/support" } -node-testing = { path = "../testing" } -pallet-balances = { path = "../../../frame/balances" } -pallet-contracts = { path = "../../../frame/contracts" } -pallet-glutton = { path = "../../../frame/glutton" } -pallet-sudo = { path = "../../../frame/sudo" } -pallet-treasury = { path = "../../../frame/treasury" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -pallet-root-testing = { path = "../../../frame/root-testing" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } serde_json = { workspace = true, default-features = true } scale-info = { version = "2.11.1", features = ["derive", "serde"] } -sp-trie = { path = "../../../primitives/trie" } -sp-state-machine = { path = "../../../primitives/state-machine" } + +# These testing-only dependencies are not exported by the Polkadot-SDK crate: +node-testing = { path = "../testing" } +substrate-cli-test-utils = { path = "../../../test-utils/cli" } +sc-service-test = { path = "../../../client/service/test" } [build-dependencies] clap = { version = "4.5.3", optional = true } clap_complete = { version = "4.0.2", optional = true } + node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } -substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } -substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true } -sc-cli = { path = "../../../client/cli", optional = true } -pallet-balances = { path = "../../../frame/balances" } -sc-storage-monitor = { path = "../../../client/storage-monitor" } + +polkadot-sdk = { path = "../../../../umbrella", features = ["frame-benchmarking-cli", "sc-cli", "sc-storage-monitor", "substrate-build-script-utils"], optional = true } [features] default = ["cli"] cli = [ "clap", "clap_complete", - "frame-benchmarking-cli", "node-inspect", - "sc-cli", - "sc-service/rocksdb", - "substrate-build-script-utils", - "substrate-frame-cli", + "polkadot-sdk", ] runtime-benchmarks = [ - "frame-benchmarking-cli/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", "node-inspect?/runtime-benchmarks", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-contracts/runtime-benchmarks", - "pallet-glutton/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-skip-feeless-payment/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "sc-client-db/runtime-benchmarks", - "sc-service/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] -# Enable features that allow the runtime to be tried and debugged. Name might be subject to change -# in the near future. try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", "kitchensink-runtime/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-balances/try-runtime", - "pallet-contracts/try-runtime", - "pallet-glutton/try-runtime", - "pallet-im-online/try-runtime", - "pallet-root-testing/try-runtime", - "pallet-skip-feeless-payment/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-treasury/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index ef7ae4fdf26308827e2745d4cb54c8fe21fbc14f..c16b25187e5f58a218fa22fc55cfdc58a214c6bf 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; use kitchensink_runtime::{constants::currency::*, BalancesCall}; diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index a326e1a79ea347f169e372581d07dc4f43848e24..fa4da5c13d4344208c6a51067fbf78e380923a82 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use polkadot_sdk::*; + use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; @@ -55,7 +57,7 @@ const HEAP_PAGES: u64 = 20; type TestExternalities = CoreTestExternalities; fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities { diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index c4488415b98343a15c8c81eb2ff164db4b36fa29..6618f4b1132e031cd7e6c69451e3ff8997e86684 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; use std::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; diff --git a/substrate/bin/node/cli/bin/main.rs b/substrate/bin/node/cli/bin/main.rs index ccc7d7b6b112558832549bf7e5435f68ac0fe944..b18d08880556093319cad2dfd3faa789fae819b1 100644 --- a/substrate/bin/node/cli/bin/main.rs +++ b/substrate/bin/node/cli/bin/main.rs @@ -20,6 +20,7 @@ #![warn(missing_docs)] +use polkadot_sdk::*; use staging_node_cli as node_cli; fn main() -> sc_cli::Result<()> { diff --git a/substrate/bin/node/cli/build.rs b/substrate/bin/node/cli/build.rs index 033f1e3349e6fae1f062f9a075dcb66295aab22d..c25d15de057425458230f48be270ca7ad09d895e 100644 --- a/substrate/bin/node/cli/build.rs +++ b/substrate/bin/node/cli/build.rs @@ -27,8 +27,10 @@ mod cli { use clap::{CommandFactory, ValueEnum}; use clap_complete::{generate_to, Shell}; + use polkadot_sdk::substrate_build_script_utils::{ + generate_cargo_keys, rerun_if_git_head_changed, + }; use std::{env, fs, path::Path}; - use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; pub fn main() { build_shell_completion(); diff --git a/substrate/bin/node/cli/src/benchmarking.rs b/substrate/bin/node/cli/src/benchmarking.rs index 333f855f2d7bb876d7768784b60dfa21e1d8d708..a2b28a0f317de4fcc74b963d3ccb1c026ddfa380 100644 --- a/substrate/bin/node/cli/src/benchmarking.rs +++ b/substrate/bin/node/cli/src/benchmarking.rs @@ -22,6 +22,8 @@ use crate::service::{create_extrinsic, FullClient}; +use polkadot_sdk::*; + use kitchensink_runtime::{BalancesCall, SystemCall}; use node_primitives::{AccountId, Balance}; use sc_cli::Result; diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index d48d4a50f85f2672d5e65c639a6a0b200c06756c..bc7821bfcf304bc7c41d50d0bf1606e33ac114bc 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -18,8 +18,8 @@ //! Substrate chain configurations. -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa_primitives::AuthorityId as GrandpaId; +use polkadot_sdk::*; + use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, Block, MaxNominations, SessionKeys, StakerStatus, }; @@ -30,6 +30,8 @@ use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; use sp_mixnet::types::AuthorityId as MixnetId; use sp_runtime::{ @@ -62,7 +64,7 @@ pub struct Extensions { } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 1d1af6e03e9e6eb57055b416038ee97ba95f3e3a..c0dcacb2e4b451990ea08c0c29fd1f92f89b3eaa 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + /// An overarching CLI command definition. #[derive(Debug, clap::Parser)] pub struct Cli { diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index d869b77e9122859550232136d3907e2f33a4eda3..51fbf0904cf8c303a6af3bed0c24587952723cd0 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -16,6 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use polkadot_sdk::*; + use super::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder}; use crate::{ chain_spec, service, @@ -215,7 +217,7 @@ pub fn run() -> Result<()> { new_partial(&config, None)?; let aux_revert = Box::new(|client: Arc, backend, blocks| { sc_consensus_babe::revert(client.clone(), backend, blocks)?; - grandpa::revert(client, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; Ok(()) }); Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 5dc1193daf8d67f6f9067eb42a161dab9242efed..e57ca04f3b743ff3fab2f492e90a2d92cfdf8fd5 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -20,6 +20,11 @@ //! Service implementation. Specialized wrapper over substrate service. +use polkadot_sdk::{ + sc_consensus_beefy as beefy, sc_consensus_grandpa as grandpa, + sp_consensus_beefy as beefy_primitives, *, +}; + use crate::Cli; use codec::Encode; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; @@ -65,8 +70,13 @@ type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; -type FullBeefyBlockImport = - beefy::import::BeefyBlockImport; +type FullBeefyBlockImport = beefy::import::BeefyBlockImport< + Block, + FullBackend, + FullClient, + InnerBlockImport, + beefy_primitives::ecdsa_crypto::AuthorityId, +>; /// The transaction pool type definition. pub type TransactionPool = sc_transaction_pool::FullPool; @@ -126,6 +136,7 @@ pub fn create_extrinsic( kitchensink_runtime::Runtime, >::from(tip, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( @@ -140,6 +151,7 @@ pub fn create_extrinsic( (), (), (), + None, ), ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); @@ -176,7 +188,7 @@ pub fn new_partial( >, grandpa::LinkHalf, sc_consensus_babe::BabeLink, - beefy::BeefyVoterLinks, + beefy::BeefyVoterLinks, ), grandpa::SharedVoterState, Option, @@ -324,7 +336,7 @@ pub fn new_partial( subscription_executor: subscription_executor.clone(), finality_provider: finality_proof_provider.clone(), }, - beefy: node_rpc::BeefyDeps { + beefy: node_rpc::BeefyDeps:: { beefy_finality_proof_stream: beefy_rpc_links .from_voter_justif_stream .clone(), @@ -668,7 +680,7 @@ pub fn new_full_base::Hash>>( let beefy_params = beefy::BeefyParams { client: client.clone(), backend: backend.clone(), - payload_provider: beefy_primitives::mmr::MmrRootProvider::new(client.clone()), + payload_provider: sp_consensus_beefy::mmr::MmrRootProvider::new(client.clone()), runtime: client.clone(), key_store: keystore.clone(), network_params, @@ -679,7 +691,7 @@ pub fn new_full_base::Hash>>( is_authority: role.is_authority(), }; - let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let beefy_gadget = beefy::start_beefy_gadget::<_, _, _, _, _, _, _, _>(beefy_params); // BEEFY is part of consensus, if it fails we'll bring the node down with it to make sure it // is noticed. task_manager @@ -842,6 +854,7 @@ mod tests { Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; + use polkadot_sdk::*; use sc_client_api::BlockBackend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; @@ -1041,6 +1054,7 @@ mod tests { let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); + let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false); let extra = ( check_non_zero_sender, check_spec_version, @@ -1050,11 +1064,22 @@ mod tests { check_nonce, check_weight, tx_payment, + metadata_hash, ); let raw_payload = SignedPayload::from_raw( function, extra, - ((), spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), + ( + (), + spec_version, + transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + None, + ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); let (function, extra, _) = raw_payload.deconstruct(); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index a9eea84d926000fafce766d3aa2889919ef5c376..b1f737ce399b32de652abd8c26bad1811727fcff 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -22,6 +22,7 @@ use frame_support::{ weights::Weight, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; +use polkadot_sdk::*; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 2d74cdd5a0418aa7f93d7ebadfe3598036fb38ab..95583395f73403959f20162c543617a84d2741fa 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -18,6 +18,7 @@ use codec::{Decode, Encode}; use frame_support::Hashable; use frame_system::offchain::AppCrypto; +use polkadot_sdk::*; use sc_executor::error::Result; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, @@ -48,7 +49,7 @@ pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); pub mod sr25519 { mod app_sr25519 { use super::super::TEST_KEY_TYPE_ID; - use sp_application_crypto::{app_crypto, sr25519}; + use polkadot_sdk::sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, TEST_KEY_TYPE_ID); } @@ -83,7 +84,7 @@ pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_ve pub type TestExternalities = CoreTestExternalities; pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None) } pub fn default_transfer_call() -> pallet_balances::Call { diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 69c96bf63a6d8d7b7c24746cdf5c3c48b526c12e..9f82338b4fb03bbe36d09894add142027e419f7e 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -28,6 +28,7 @@ use kitchensink_runtime::{ }; use node_primitives::Balance; use node_testing::keyring::*; +use polkadot_sdk::*; use sp_runtime::{traits::One, Perbill}; pub mod common; diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 5cbb0103d471b96902bb341bcd796e6cf09eac76..18826e7e90a784e2b644b88024a8d83e3e15f3c1 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -18,6 +18,7 @@ use codec::Decode; use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; use kitchensink_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use polkadot_sdk::*; use sp_application_crypto::AppCrypto; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 8453aa3cdeb18715afede5d93fb52f3c6b39be7e..5e4488903bf45fa32dd1f219bf9cf5766522e5bd 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } diff --git a/substrate/bin/node/rpc/Cargo.toml b/substrate/bin/node/rpc/Cargo.toml index 894dbf0da85ca56d412087adf01fa12c7983ae7a..6ae80eb578596490753d903d253c01af2660ef4f 100644 --- a/substrate/bin/node/rpc/Cargo.toml +++ b/substrate/bin/node/rpc/Cargo.toml @@ -26,6 +26,7 @@ sc-consensus-babe = { path = "../../../client/consensus/babe" } sc-consensus-babe-rpc = { path = "../../../client/consensus/babe/rpc" } sc-consensus-beefy = { path = "../../../client/consensus/beefy" } sc-consensus-beefy-rpc = { path = "../../../client/consensus/beefy/rpc" } +sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" } sc-consensus-grandpa = { path = "../../../client/consensus/grandpa" } sc-consensus-grandpa-rpc = { path = "../../../client/consensus/grandpa/rpc" } sc-mixnet = { path = "../../../client/mixnet" } @@ -41,6 +42,7 @@ sp-consensus = { path = "../../../primitives/consensus/common" } sp-consensus-babe = { path = "../../../primitives/consensus/babe" } sp-keystore = { path = "../../../primitives/keystore" } sp-runtime = { path = "../../../primitives/runtime" } +sp-application-crypto = { path = "../../../primitives/application-crypto" } sp-statement-store = { path = "../../../primitives/statement-store" } substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } substrate-state-trie-migration-rpc = { path = "../../../utils/frame/rpc/state-trie-migration-rpc" } diff --git a/substrate/bin/node/rpc/src/lib.rs b/substrate/bin/node/rpc/src/lib.rs index 4646524a25babfd7cb1276326d4704abdcc65622..52cd7f9561d2a9e8287e85e1fdc7dc80f4bf5a3f 100644 --- a/substrate/bin/node/rpc/src/lib.rs +++ b/substrate/bin/node/rpc/src/lib.rs @@ -47,10 +47,12 @@ pub use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; +use sp_consensus_beefy::AuthorityIdBound; use sp_keystore::KeystorePtr; /// Extra dependencies for BABE. @@ -76,9 +78,9 @@ pub struct GrandpaDeps { } /// Dependencies for BEEFY -pub struct BeefyDeps { +pub struct BeefyDeps { /// Receives notifications about finality proof events from BEEFY. - pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, + pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream, /// Receives notifications about best block events from BEEFY. pub beefy_best_block_stream: BeefyBestBlockStream, /// Executor to drive the subscription manager in the BEEFY RPC handler. @@ -86,7 +88,7 @@ pub struct BeefyDeps { } /// Full client dependencies. -pub struct FullDeps { +pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. @@ -102,7 +104,7 @@ pub struct FullDeps { /// GRANDPA specific dependencies. pub grandpa: GrandpaDeps, /// BEEFY specific dependencies. - pub beefy: BeefyDeps, + pub beefy: BeefyDeps, /// Shared statement store reference. pub statement_store: Arc, /// The backend used by the node. @@ -112,7 +114,7 @@ pub struct FullDeps { } /// Instantiate all Full RPC extensions. -pub fn create_full( +pub fn create_full( FullDeps { client, pool, @@ -125,7 +127,7 @@ pub fn create_full( statement_store, backend, mixnet_api, - }: FullDeps, + }: FullDeps, ) -> Result, Box> where C: ProvideRuntimeApi @@ -145,6 +147,8 @@ where SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { use mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; @@ -223,7 +227,7 @@ where } io.merge( - Beefy::::new( + Beefy::::new( beefy.beefy_finality_proof_stream, beefy.beefy_best_block_stream, beefy.subscription_executor, diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 7d661e808013ec5abe0e4c3206ea10b9f8c6bb99..e8cc7b3482b66ef8e850871670430b23f2923396 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } @@ -31,412 +31,45 @@ serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } -# primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features = ["serde"] } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-consensus-beefy = { path = "../../../primitives/consensus/beefy", default-features = false } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } -sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false } +polkadot-sdk = { path = "../../../../umbrella", features = ["runtime", "tuples-96"], default-features = false } + +# shared code between runtime and node node-primitives = { path = "../primitives", default-features = false } -sp-mixnet = { path = "../../../primitives/mixnet", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } -sp-std = { path = "../../../primitives/std", default-features = false } -sp-api = { path = "../../../primitives/api", default-features = false } -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-staking = { path = "../../../primitives/staking", default-features = false, features = ["serde"] } -sp-storage = { path = "../../../primitives/storage", default-features = false } -sp-session = { path = "../../../primitives/session", default-features = false } -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } -sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features = ["serde"] } -sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -sp-io = { path = "../../../primitives/io", default-features = false } -# frame dependencies -frame-executive = { path = "../../../frame/executive", default-features = false } -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } -frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } -frame-support = { path = "../../../frame/support", default-features = false, features = ["experimental", "tuples-96"] } -frame-system = { path = "../../../frame/system", default-features = false } -frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } -frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-alliance = { path = "../../../frame/alliance", default-features = false } -pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } -pallet-asset-conversion-ops = { path = "../../../frame/asset-conversion/ops", default-features = false } -pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } -pallet-assets = { path = "../../../frame/assets", default-features = false } -pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } -pallet-authorship = { path = "../../../frame/authorship", default-features = false } -pallet-babe = { path = "../../../frame/babe", default-features = false } -pallet-bags-list = { path = "../../../frame/bags-list", default-features = false } -pallet-balances = { path = "../../../frame/balances", default-features = false } -pallet-beefy = { path = "../../../frame/beefy", default-features = false } -pallet-beefy-mmr = { path = "../../../frame/beefy-mmr", default-features = false } -pallet-bounties = { path = "../../../frame/bounties", default-features = false } -pallet-broker = { path = "../../../frame/broker", default-features = false } -pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false } -pallet-collective = { path = "../../../frame/collective", default-features = false } -pallet-contracts = { path = "../../../frame/contracts", default-features = false } -pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false } -pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false } -pallet-democracy = { path = "../../../frame/democracy", default-features = false } -pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false } -pallet-election-provider-support-benchmarking = { path = "../../../frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } -pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } -pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } -pallet-migrations = { path = "../../../frame/migrations", default-features = false } +# Example pallets that are not published: pallet-example-mbm = { path = "../../../frame/examples/multi-block-migrations", default-features = false } -pallet-nis = { path = "../../../frame/nis", default-features = false } -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } -pallet-im-online = { path = "../../../frame/im-online", default-features = false } -pallet-indices = { path = "../../../frame/indices", default-features = false } -pallet-identity = { path = "../../../frame/identity", default-features = false } -pallet-lottery = { path = "../../../frame/lottery", default-features = false } -pallet-membership = { path = "../../../frame/membership", default-features = false } -pallet-message-queue = { path = "../../../frame/message-queue", default-features = false } -pallet-mixnet = { path = "../../../frame/mixnet", default-features = false } -pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false } -pallet-multisig = { path = "../../../frame/multisig", default-features = false } -pallet-nfts = { path = "../../../frame/nfts", default-features = false } -pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false } -pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false } -pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false } -pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true } -pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false } -pallet-offences = { path = "../../../frame/offences", default-features = false } -pallet-offences-benchmarking = { path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-glutton = { path = "../../../frame/glutton", default-features = false } -pallet-preimage = { path = "../../../frame/preimage", default-features = false } -pallet-proxy = { path = "../../../frame/proxy", default-features = false } -pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false } -pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false } -pallet-recovery = { path = "../../../frame/recovery", default-features = false } -pallet-referenda = { path = "../../../frame/referenda", default-features = false } -pallet-remark = { path = "../../../frame/remark", default-features = false } -pallet-root-testing = { path = "../../../frame/root-testing", default-features = false } -pallet-salary = { path = "../../../frame/salary", default-features = false } -pallet-session = { path = "../../../frame/session", default-features = false, features = ["historical"] } -pallet-session-benchmarking = { path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { path = "../../../frame/staking", default-features = false } -pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false } -pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false } -pallet-stake-tracker = { path = "../../../frame/staking/stake-tracker", default-features = false } -pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false } -pallet-statement = { path = "../../../frame/statement", default-features = false } -pallet-scheduler = { path = "../../../frame/scheduler", default-features = false } -pallet-society = { path = "../../../frame/society", default-features = false } -pallet-sudo = { path = "../../../frame/sudo", default-features = false } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } -pallet-tips = { path = "../../../frame/tips", default-features = false } -pallet-treasury = { path = "../../../frame/treasury", default-features = false } -pallet-utility = { path = "../../../frame/utility", default-features = false } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } -pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false } -pallet-uniques = { path = "../../../frame/uniques", default-features = false } -pallet-vesting = { path = "../../../frame/vesting", default-features = false } -pallet-whitelist = { path = "../../../frame/whitelist", default-features = false } -pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false } -pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false } -pallet-parameters = { path = "../../../frame/parameters", default-features = false } +pallet-example-tasks = { path = "../../../frame/examples/tasks", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] default = ["std"] -with-tracing = ["frame-executive/with-tracing"] +with-tracing = ["polkadot-sdk/with-tracing"] std = [ "codec/std", - "frame-benchmarking-pallet-pov/std", - "frame-benchmarking/std", - "frame-election-provider-support/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", "log/std", "node-primitives/std", - "pallet-alliance/std", - "pallet-asset-conversion-ops/std", - "pallet-asset-conversion-tx-payment/std", - "pallet-asset-conversion/std", - "pallet-asset-rate/std", - "pallet-asset-tx-payment/std", - "pallet-assets/std", - "pallet-authority-discovery/std", - "pallet-authorship/std", - "pallet-babe/std", - "pallet-bags-list/std", - "pallet-balances/std", - "pallet-beefy-mmr/std", - "pallet-beefy/std", - "pallet-bounties/std", - "pallet-broker/std", - "pallet-child-bounties/std", - "pallet-collective/std", - "pallet-contracts/std", - "pallet-conviction-voting/std", - "pallet-core-fellowship/std", - "pallet-democracy/std", - "pallet-election-provider-multi-phase/std", - "pallet-election-provider-support-benchmarking?/std", - "pallet-elections-phragmen/std", "pallet-example-mbm/std", "pallet-example-tasks/std", - "pallet-fast-unstake/std", - "pallet-glutton/std", - "pallet-grandpa/std", - "pallet-identity/std", - "pallet-im-online/std", - "pallet-indices/std", - "pallet-insecure-randomness-collective-flip/std", - "pallet-lottery/std", - "pallet-membership/std", - "pallet-message-queue/std", - "pallet-migrations/std", - "pallet-mixnet/std", - "pallet-mmr/std", - "pallet-multisig/std", - "pallet-nft-fractionalization/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-nis/std", - "pallet-nomination-pools-benchmarking?/std", - "pallet-nomination-pools-runtime-api/std", - "pallet-nomination-pools/std", - "pallet-offences-benchmarking?/std", - "pallet-offences/std", - "pallet-parameters/std", - "pallet-preimage/std", - "pallet-proxy/std", - "pallet-ranked-collective/std", - "pallet-recovery/std", - "pallet-referenda/std", - "pallet-remark/std", - "pallet-root-testing/std", - "pallet-safe-mode/std", - "pallet-salary/std", - "pallet-scheduler/std", - "pallet-session-benchmarking?/std", - "pallet-session/std", - "pallet-skip-feeless-payment/std", - "pallet-society/std", - "pallet-stake-tracker/std", - "pallet-staking-runtime-api/std", - "pallet-staking/std", - "pallet-state-trie-migration/std", - "pallet-statement/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-tips/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-transaction-storage/std", - "pallet-treasury/std", - "pallet-tx-pause/std", - "pallet-uniques/std", - "pallet-utility/std", - "pallet-vesting/std", - "pallet-whitelist/std", + "polkadot-sdk/std", "primitive-types/std", "scale-info/std", "serde_json/std", - "sp-api/std", - "sp-authority-discovery/std", - "sp-block-builder/std", - "sp-consensus-babe/std", - "sp-consensus-beefy/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-io/std", - "sp-mixnet/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-staking/std", - "sp-statement-store/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", "substrate-wasm-builder", ] runtime-benchmarks = [ - "frame-benchmarking-pallet-pov/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-election-provider-support/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-alliance/runtime-benchmarks", - "pallet-asset-conversion-ops/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-asset-rate/runtime-benchmarks", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-babe/runtime-benchmarks", - "pallet-bags-list/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bounties/runtime-benchmarks", - "pallet-broker/runtime-benchmarks", - "pallet-child-bounties/runtime-benchmarks", - "pallet-collective/runtime-benchmarks", - "pallet-contracts/runtime-benchmarks", - "pallet-conviction-voting/runtime-benchmarks", - "pallet-core-fellowship/runtime-benchmarks", - "pallet-democracy/runtime-benchmarks", - "pallet-election-provider-multi-phase/runtime-benchmarks", - "pallet-election-provider-support-benchmarking/runtime-benchmarks", - "pallet-elections-phragmen/runtime-benchmarks", "pallet-example-mbm/runtime-benchmarks", "pallet-example-tasks/runtime-benchmarks", - "pallet-fast-unstake/runtime-benchmarks", - "pallet-glutton/runtime-benchmarks", - "pallet-grandpa/runtime-benchmarks", - "pallet-identity/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-indices/runtime-benchmarks", - "pallet-lottery/runtime-benchmarks", - "pallet-membership/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-migrations/runtime-benchmarks", - "pallet-mixnet/runtime-benchmarks", - "pallet-mmr/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nft-fractionalization/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-nis/runtime-benchmarks", - "pallet-nomination-pools-benchmarking/runtime-benchmarks", - "pallet-nomination-pools/runtime-benchmarks", - "pallet-offences-benchmarking/runtime-benchmarks", - "pallet-offences/runtime-benchmarks", - "pallet-parameters/runtime-benchmarks", - "pallet-preimage/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-ranked-collective/runtime-benchmarks", - "pallet-recovery/runtime-benchmarks", - "pallet-referenda/runtime-benchmarks", - "pallet-remark/runtime-benchmarks", - "pallet-safe-mode/runtime-benchmarks", - "pallet-salary/runtime-benchmarks", - "pallet-scheduler/runtime-benchmarks", - "pallet-session-benchmarking/runtime-benchmarks", - "pallet-skip-feeless-payment/runtime-benchmarks", - "pallet-society/runtime-benchmarks", - "pallet-stake-tracker/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-tips/runtime-benchmarks", - "pallet-transaction-storage/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "pallet-tx-pause/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-vesting/runtime-benchmarks", - "pallet-whitelist/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "sp-staking/runtime-benchmarks", + "polkadot-sdk/runtime-benchmarks", ] try-runtime = [ - "frame-benchmarking-pallet-pov/try-runtime", - "frame-election-provider-support/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-alliance/try-runtime", - "pallet-asset-conversion-ops/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-conversion/try-runtime", - "pallet-asset-rate/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-authority-discovery/try-runtime", - "pallet-authorship/try-runtime", - "pallet-babe/try-runtime", - "pallet-bags-list/try-runtime", - "pallet-balances/try-runtime", - "pallet-beefy-mmr/try-runtime", - "pallet-beefy/try-runtime", - "pallet-bounties/try-runtime", - "pallet-broker/try-runtime", - "pallet-child-bounties/try-runtime", - "pallet-collective/try-runtime", - "pallet-contracts/try-runtime", - "pallet-conviction-voting/try-runtime", - "pallet-core-fellowship/try-runtime", - "pallet-democracy/try-runtime", - "pallet-election-provider-multi-phase/try-runtime", - "pallet-elections-phragmen/try-runtime", "pallet-example-mbm/try-runtime", "pallet-example-tasks/try-runtime", - "pallet-fast-unstake/try-runtime", - "pallet-glutton/try-runtime", - "pallet-grandpa/try-runtime", - "pallet-identity/try-runtime", - "pallet-im-online/try-runtime", - "pallet-indices/try-runtime", - "pallet-insecure-randomness-collective-flip/try-runtime", - "pallet-lottery/try-runtime", - "pallet-membership/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-migrations/try-runtime", - "pallet-mixnet/try-runtime", - "pallet-mmr/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nft-fractionalization/try-runtime", - "pallet-nfts/try-runtime", - "pallet-nis/try-runtime", - "pallet-nomination-pools/try-runtime", - "pallet-offences/try-runtime", - "pallet-parameters/try-runtime", - "pallet-preimage/try-runtime", - "pallet-proxy/try-runtime", - "pallet-ranked-collective/try-runtime", - "pallet-recovery/try-runtime", - "pallet-referenda/try-runtime", - "pallet-remark/try-runtime", - "pallet-root-testing/try-runtime", - "pallet-safe-mode/try-runtime", - "pallet-salary/try-runtime", - "pallet-scheduler/try-runtime", - "pallet-session/try-runtime", - "pallet-skip-feeless-payment/try-runtime", - "pallet-society/try-runtime", - "pallet-stake-tracker/try-runtime", - "pallet-staking/try-runtime", - "pallet-state-trie-migration/try-runtime", - "pallet-statement/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-tips/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-transaction-storage/try-runtime", - "pallet-treasury/try-runtime", - "pallet-tx-pause/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-vesting/try-runtime", - "pallet-whitelist/try-runtime", - "sp-runtime/try-runtime", + "polkadot-sdk/try-runtime", ] experimental = [ - "frame-support/experimental", - "frame-system/experimental", "pallet-example-tasks/experimental", ] + +metadata-hash = ["substrate-wasm-builder/metadata-hash"] diff --git a/substrate/bin/node/runtime/build.rs b/substrate/bin/node/runtime/build.rs index b7676a70dfe843e2cd47fc600ef599bbe7bff591..0e11c579f09ee04918c5d53d4f8779044b57ef30 100644 --- a/substrate/bin/node/runtime/build.rs +++ b/substrate/bin/node/runtime/build.rs @@ -15,13 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { - #[cfg(feature = "std")] - { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); - } + substrate_wasm_builder::WasmBuilder::build_using_defaults() } + +#[cfg(all(feature = "std", feature = "metadata-hash"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("Test", 14) + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/substrate/bin/node/runtime/src/assets_api.rs b/substrate/bin/node/runtime/src/assets_api.rs index 792ed7c6576bd85485eae9abe84ea17d5749cd13..38ec56507113f561721887689a5f4540121e2403 100644 --- a/substrate/bin/node/runtime/src/assets_api.rs +++ b/substrate/bin/node/runtime/src/assets_api.rs @@ -18,6 +18,8 @@ //! Runtime API definition for assets. +use polkadot_sdk::*; + use codec::Codec; use sp_std::vec::Vec; diff --git a/substrate/bin/node/runtime/src/constants.rs b/substrate/bin/node/runtime/src/constants.rs index e4fafbf0fa4790121787788cef8aa2547b226cf3..d13dca48d1f125acdc38217b677279f259a35aa5 100644 --- a/substrate/bin/node/runtime/src/constants.rs +++ b/substrate/bin/node/runtime/src/constants.rs @@ -50,7 +50,7 @@ pub mod time { /// always be assigned, in which case `MILLISECS_PER_BLOCK` and /// `SLOT_DURATION` should have the same value. /// - /// + /// pub const MILLISECS_PER_BLOCK: Moment = 3000; pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index 34f043b33a4edfe2d8cdbe154896e937826110ca..dbe562857c99fcdd4730edd0aa5d985ab2e6dd51 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -17,6 +17,8 @@ //! Some configurable implementations as associated type for the substrate runtime. +use polkadot_sdk::*; + use frame_support::{ pallet_prelude::*, traits::{ @@ -118,6 +120,7 @@ mod multiplier_tests { weights::{Weight, WeightToFee}, }; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + use polkadot_sdk::*; use sp_runtime::{ assert_eq_error_rate, traits::{Convert, One, Zero}, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 7c6c29f2614a52dbc9fb7ab984d9aa45762ba2b3..1c461b726e40ae9f73846cfa960b7817e73de82f 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -22,6 +22,8 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limits. #![recursion_limit = "1024"] +use polkadot_sdk::*; + use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, @@ -931,7 +933,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -1362,6 +1364,9 @@ impl pallet_tips::Config for Runtime { } parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); pub Schedule: pallet_contracts::Schedule = Default::default(); pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); } @@ -1379,9 +1384,9 @@ impl pallet_contracts::Config for Runtime { /// change because that would break already deployed contracts. The `Call` structure itself /// is not allowed to change the indices of existing pallets, too. type CallFilter = Nothing; - type DepositPerItem = dynamic_params::contracts::DepositPerItem; - type DepositPerByte = dynamic_params::contracts::DepositPerByte; - type DefaultDepositLimit = dynamic_params::contracts::DefaultDepositLimit; + type DepositPerItem = DepositPerItem; + type DepositPerByte = DepositPerByte; + type DefaultDepositLimit = DefaultDepositLimit; type CallStack = [pallet_contracts::Frame; 5]; type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_contracts::weights::SubstrateWeight; @@ -1455,6 +1460,7 @@ where tip, None, ), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1890,6 +1896,7 @@ impl pallet_core_fellowship::Config for Runtime { type ApproveOrigin = EnsureRootWithSuccess>; type PromoteOrigin = EnsureRootWithSuccess>; type EvidenceSize = ConstU32<16_384>; + type MaxRank = ConstU32<9>; } parameter_types! { @@ -2159,7 +2166,7 @@ impl pallet_broker::Config for Runtime { type WeightInfo = (); type PalletId = BrokerPalletId; type AdminOrigin = EnsureRoot; - type PriceAdapter = pallet_broker::Linear; + type PriceAdapter = pallet_broker::CenterTargetPrice; } parameter_types! { @@ -2202,19 +2209,6 @@ pub mod dynamic_params { #[codec(index = 1)] pub static ByteDeposit: Balance = 1 * CENTS; } - - #[dynamic_pallet_params] - #[codec(index = 1)] - pub mod contracts { - #[codec(index = 0)] - pub static DepositPerItem: Balance = deposit(1, 0); - - #[codec(index = 1)] - pub static DepositPerByte: Balance = deposit(0, 1); - - #[codec(index = 2)] - pub static DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); - } } #[cfg(feature = "runtime-benchmarks")] @@ -2240,10 +2234,6 @@ impl EnsureOriginWithArg for DynamicParamet frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; return Ok(()) }, - RuntimeParametersKey::Contracts(_) => { - frame_system::ensure_root(origin.clone()).map_err(|_| origin)?; - return Ok(()) - }, } } @@ -2262,6 +2252,8 @@ impl pallet_parameters::Config for Runtime { #[frame_support::runtime] mod runtime { + use super::*; + #[runtime::runtime] #[runtime::derive( RuntimeCall, @@ -2554,6 +2546,7 @@ pub type SignedExtra = ( Runtime, pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, >, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. @@ -2609,7 +2602,7 @@ impl pallet_beefy::Config for Runtime { /// MMR helper types. mod mmr { - use super::Runtime; + use super::*; pub use pallet_mmr::primitives::*; pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; @@ -2619,7 +2612,7 @@ mod mmr { #[cfg(feature = "runtime-benchmarks")] mod benches { - frame_benchmarking::define_benchmarks!( + polkadot_sdk::frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_benchmarking_pallet_pov, Pov] [pallet_alliance, Alliance] @@ -2809,6 +2802,22 @@ impl_runtime_apis! { fn balance_to_points(pool_id: pallet_nomination_pools::PoolId, new_funds: Balance) -> Balance { NominationPools::api_balance_to_points(pool_id, new_funds) } + + fn pool_pending_slash(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_pending_slash(pool_id) + } + + fn member_pending_slash(member: AccountId) -> Balance { + NominationPools::api_member_pending_slash(member) + } + + fn pool_needs_delegate_migration(pool_id: pallet_nomination_pools::PoolId) -> bool { + NominationPools::api_pool_needs_delegate_migration(pool_id) + } + + fn member_needs_delegate_migration(member: AccountId) -> bool { + NominationPools::api_member_needs_delegate_migration(member) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { @@ -3122,7 +3131,7 @@ impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -3136,7 +3145,7 @@ impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -3149,7 +3158,7 @@ impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index fa3f90193ba5d11325bfb068fa69780495ec8c90..3ba3f07510e006458cf23b246e293af4e288c624 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -16,11 +16,12 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } fs_extra = "1" futures = "0.3.30" log = { workspace = true, default-features = true } tempfile = "3.1.0" +frame-metadata-hash-extension = { path = "../../../frame/metadata-hash-extension" } frame-system = { path = "../../../frame/system" } node-cli = { package = "staging-node-cli", path = "../cli" } node-primitives = { path = "../primitives" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index e5c2563905e9ea59901db1564f2b0f34f6bd4aec..007d314684cf17faeda6adb4842513ea57030fdc 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -571,6 +571,8 @@ impl BenchKeyring { tx_version, genesis_hash, genesis_hash, + // metadata_hash + None::<()>, ); let key = self.accounts.get(&signed).expect("Account id not found in keyring"); let signature = payload.using_encoded(|b| { diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index f712191bed695031275cfb11c5e22c8fa2a26f78..eab088d9100ef266f26b600a501e78881e557efe 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -82,6 +82,7 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { pallet_skip_feeless_payment::SkipCheckIfFeeless::from( pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), ) } @@ -91,11 +92,19 @@ pub fn sign( spec_version: u32, tx_version: u32, genesis_hash: [u8; 32], + metadata_hash: Option<[u8; 32]>, ) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = - (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = ( + xt.function, + extra.clone(), + spec_version, + tx_version, + genesis_hash, + genesis_hash, + metadata_hash, + ); let key = AccountKeyring::from_account_id(&signed).unwrap(); let signature = payload diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 5c8a3ab4e89a55e5e61c371a41878ab8457ec12b..de06bbb3fff698e7bb700b19499e4fce30715df8 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "staging-chain-spec-builder" -version = "3.0.0" +version = "1.6.0" authors.workspace = true edition.workspace = true build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository.workspace = true -publish = false +publish = true +description = "Utility for building chain-specification files for Substrate-based runtimes based on `sp-genesis-builder`" [lints] workspace = true @@ -25,6 +26,6 @@ crate-type = ["rlib"] [dependencies] clap = { version = "4.5.3", features = ["derive"] } log = { workspace = true, default-features = true } -sc-chain-spec = { path = "../../../client/chain-spec" } +sc-chain-spec = { path = "../../../client/chain-spec", features = ["clap"] } serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../primitives/tracing" } diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 7982da76227aeb70cc4683cc1887440cb2c0ecc0..0f7c003fc8c2da2709076ee3c6f517d9a2ae8beb 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -120,7 +120,7 @@ use std::{fs, path::PathBuf}; use clap::{Parser, Subcommand}; -use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; +use sc_chain_spec::{ChainType, GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; use serde_json::Value; /// A utility to easily create a chain spec definition. @@ -154,6 +154,9 @@ pub struct CreateCmd { /// The chain id. #[arg(long, short = 'i', default_value = "custom")] chain_id: String, + /// The chain type. + #[arg(value_enum, short = 't', default_value = "live")] + chain_type: ChainType, /// The path to runtime wasm blob. #[arg(long, short)] runtime_wasm_path: PathBuf, @@ -206,9 +209,14 @@ struct NamedPresetCmd { /// /// The code field of the chain spec will be updated with the runtime provided in the /// command line. This operation supports both plain and raw formats. +/// +/// This command does not update chain-spec file in-place. The result of this command will be stored +/// in a file given as `-c/--chain-spec-path` command line argument. #[derive(Parser, Debug, Clone)] pub struct UpdateCodeCmd { /// Chain spec to be updated. + /// + /// Please note that the file will not be updated in-place. pub input_chain_spec: PathBuf, /// The path to new runtime wasm blob to be stored into chain-spec. pub runtime_wasm_path: PathBuf, @@ -256,10 +264,12 @@ pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result::builder(&code[..], Default::default()) .with_name(&cmd.chain_name[..]) .with_id(&cmd.chain_id[..]) - .with_chain_type(sc_chain_spec::ChainType::Live); + .with_chain_type(chain_type.clone()); let builder = match cmd.action { GenesisBuildAction::NamedPreset(NamedPresetCmd { ref preset_name }) => diff --git a/substrate/bin/utils/subkey/README.md b/substrate/bin/utils/subkey/README.md index fc1053e232d70d746cf315d203a51dfd09e8447c..5c6dda37edf681b0425fa5e2c2072fd511200587 100644 --- a/substrate/bin/utils/subkey/README.md +++ b/substrate/bin/utils/subkey/README.md @@ -74,7 +74,7 @@ The output above shows a **secret phrase** (also called **mnemonic phrase**) and **Private Key**). Those 2 secrets are the pieces of information you MUST keep safe and secret. All the other information below can be derived from those secrets. -The output above also show the **public key** and the **Account ID**. Those are the independent from the network where +The output above also shows the **public key** and the **Account ID**. Those are the independent from the network where you will use the key. The **SS58 address** (or **Public Address**) of a new account is a representation of the public keys of an account for @@ -152,7 +152,7 @@ subkey inspect "soup lyrics media market way crouch elevator put moon useful que which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined **password** -(`extra_secret` in our example) is now required to fully recover the account. Let's inspect the the previous mnemonic, +(`extra_secret` in our example) is now required to fully recover the account. Let's inspect the previous mnemonic, this time passing also the required `password` as shown below: ```bash diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index fb650c5b532f2cbf841dc65e5f7f1a3c35dae733..147ea2bfbf5df83716c4a7f1f5fb2ade0c41d3f8 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } fnv = "1.0.6" diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index b933ed1f17e01a8822b958ac6a064fe2f3503926..ba89aede9147ff0ebe16beb6fedf39389ee9c700 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -419,20 +419,6 @@ impl blockchain::Backend for Blockchain { Ok(self.storage.read().leaves.hashes()) } - fn displaced_leaves_after_finalizing( - &self, - block_number: NumberFor, - ) -> sp_blockchain::Result> { - Ok(self - .storage - .read() - .leaves - .displaced_by_finalize_height(block_number) - .leaves() - .cloned() - .collect::>()) - } - fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { unimplemented!() } diff --git a/substrate/client/api/src/leaves.rs b/substrate/client/api/src/leaves.rs index a8a988771e2fdf346dd0c50983d0e8b32c74bafc..e129de8bf3fad30a0bb76569957c93138c631db8 100644 --- a/substrate/client/api/src/leaves.rs +++ b/substrate/client/api/src/leaves.rs @@ -49,7 +49,7 @@ pub struct FinalizationOutcome { removed: BTreeMap, Vec>, } -impl FinalizationOutcome { +impl FinalizationOutcome { /// Merge with another. This should only be used for displaced items that /// are produced within one transaction of each other. pub fn merge(&mut self, mut other: Self) { @@ -63,6 +63,16 @@ impl FinalizationOutcome { pub fn leaves(&self) -> impl Iterator { self.removed.values().flatten() } + + /// Constructor + pub fn new(new_displaced: impl Iterator) -> Self { + let mut removed = BTreeMap::, Vec>::new(); + for (hash, number) in new_displaced { + removed.entry(Reverse(number)).or_default().push(hash); + } + + FinalizationOutcome { removed } + } } /// list of leaf hashes ordered by number (descending). @@ -151,39 +161,12 @@ where Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } }) } - /// Note a block height finalized, displacing all leaves with number less than the finalized - /// block's. - /// - /// Although it would be more technically correct to also prune out leaves at the - /// same number as the finalized block, but with different hashes, the current behavior - /// is simpler and our assumptions about how finalization works means that those leaves - /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { - let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } - } else { - number - N::one() - }; - - let below_boundary = self.storage.split_off(&Reverse(boundary)); - FinalizationOutcome { removed: below_boundary } - } - - /// The same as [`Self::finalize_height`], but it only simulates the operation. - /// - /// This means that no changes are done. - /// - /// Returns the leaves that would be displaced by finalizing the given block. - pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { - let boundary = if number == N::zero() { - return FinalizationOutcome { removed: BTreeMap::new() } - } else { - number - N::one() - }; - - let below_boundary = self.storage.range(&Reverse(boundary)..); - FinalizationOutcome { - removed: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), + /// Remove all leaves displaced by the last block finalization. + pub fn remove_displaced_leaves(&mut self, displaced_leaves: &FinalizationOutcome) { + for (number, hashes) in &displaced_leaves.removed { + for hash in hashes.iter() { + self.remove_leaf(number, hash); + } } } @@ -420,32 +403,6 @@ mod tests { assert!(set.contains(11, 11_2)); } - #[test] - fn finalization_works() { - let mut set = LeafSet::new(); - set.import(9_1u32, 9u32, 0u32); - set.import(10_1, 10, 9_1); - set.import(10_2, 10, 9_1); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - set.import(12_1, 12, 11_2); - - let outcome = set.finalize_height(11); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert_eq!( - outcome.removed, - [(Reverse(10), vec![10_2])].into_iter().collect::>(), - ); - - set.undo().undo_finalization(outcome); - assert_eq!(set.count(), 3); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert!(set.contains(10, 10_2)); - } - #[test] fn flush_to_disk() { const PREFIX: &[u8] = b"abcdefg"; @@ -479,35 +436,4 @@ mod tests { assert!(set.contains(10, 1_2)); assert!(!set.contains(10, 1_3)); } - - #[test] - fn finalization_consistent_with_disk() { - const PREFIX: &[u8] = b"prefix"; - let db = Arc::new(sp_database::MemDb::default()); - - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - assert!(set.contains(10, 10_1)); - - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx).unwrap(); - - let _ = set.finalize_height(11); - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx).unwrap(); - - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - assert!(set.contains(12, 12_1)); - assert!(!set.contains(10, 10_1)); - - let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); - assert_eq!(set, set2); - } } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index ac4537d5ba0a4f606a897fb4c98929427683023a..435ca88a80079c9da9510b0f505d21677afb52cf 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.12.4" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } futures = "0.3.30" futures-timer = "3.0.1" ip_network = "0.4.1" diff --git a/substrate/client/authority-discovery/src/error.rs b/substrate/client/authority-discovery/src/error.rs index 6f791237c2f229c3f94996e2a91c0c0de8800d0d..d2c567d77afcfb17f9af1cfde55ba232205e8167 100644 --- a/substrate/client/authority-discovery/src/error.rs +++ b/substrate/client/authority-discovery/src/error.rs @@ -35,7 +35,7 @@ pub enum Error { VerifyingDhtPayload, #[error("Failed to hash the authority id to be used as a dht key.")] - HashingAuthorityId(#[from] sc_network::multiaddr::multihash::Error), + HashingAuthorityId(#[from] sc_network_types::multihash::Error), #[error("Failed calling into the Substrate runtime: {0}")] CallingRuntime(#[from] sp_blockchain::Error), @@ -53,7 +53,7 @@ pub enum Error { EncodingDecodingScale(#[from] codec::Error), #[error("Failed to parse a libp2p multi address.")] - ParsingMultiaddress(#[from] sc_network::multiaddr::Error), + ParsingMultiaddress(#[from] sc_network::multiaddr::ParseError), #[error("Failed to parse a libp2p key: {0}")] ParsingLibp2pIdentity(String), diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index 53418d2d38c4a0ce0ab2c2b938b288360d2fdbbc..d89083100aa3c51c16e523a6f71ba6559cd04b62 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,7 +35,6 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use linked_hash_set::LinkedHashSet; -use multihash::{Code, Multihash, MultihashDigest}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; @@ -46,7 +45,10 @@ use sc_network::{ event::DhtEvent, multiaddr, KademliaKey, Multiaddr, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, }; -use sc_network_types::PeerId; +use sc_network_types::{ + multihash::{Code, Multihash}, + PeerId, +}; use sp_api::{ApiError, ProvideRuntimeApi}; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs index 6e3b3c8af20190f3711aac72fbd5e7cf3eb493c7..77cdfbd4f1502574cd5a1aa2434d3c0fd392fd51 100644 --- a/substrate/client/authority-discovery/src/worker/addr_cache.rs +++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs @@ -176,8 +176,8 @@ fn addresses_to_peer_ids(addresses: &HashSet) -> HashSet { mod tests { use super::*; - use multihash::{self, Multihash}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; + use sc_network_types::multihash::Multihash; use sp_authority_discovery::{AuthorityId, AuthorityPair}; use sp_core::crypto::Pair; diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index caeac56c54073e53026ec3f8c52c8e7f88958f2b..70107c89a851d3de33d6bce65515661ec0675af1 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -29,11 +29,15 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, identity::SigningError, kad::record::Key as KademliaKey, PeerId}; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; use prometheus_endpoint::prometheus::default_registry; use sc_client_api::HeaderBackend; use sc_network::{service::signature::Keypair, Signature}; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -168,7 +172,7 @@ impl NetworkSigner for TestNetwork { let public_key = libp2p::identity::PublicKey::try_decode_protobuf(&public_key) .map_err(|error| error.to_string())?; let peer_id: PeerId = peer_id.into(); - let remote: libp2p::PeerId = public_key.to_peer_id(); + let remote: PeerId = public_key.to_peer_id().into(); Ok(peer_id == remote && public_key.verify(message, signature)) } @@ -435,7 +439,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p(peer_id.into())) + address.with(Protocol::P2p(peer_id.into())) }; let remote_key_store = MemoryKeystore::new(); let remote_public_key: AuthorityId = remote_key_store diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 4890b66c9b2f91ef13ea74fa9f66c797d0beb32c..b75cb463b1a874c48a7b8c4511929eade4245704 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index e74d587d9b40fc57e0575b0fae7ecb0c5a177c15..62efe977e989c13bfc6e3fe0fb11d13ac3aca298 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } sp-api = { path = "../../primitives/api" } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index dd7bb3598c2c5d3fefdc588f21988cb3cc4c1c82..5b411b642a0e3aa410517621d99f4ab3cd245a74 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -16,7 +16,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +clap = { version = "4.5.3", features = ["derive"], optional = true } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } memmap2 = "0.9.3" serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } @@ -42,3 +43,4 @@ substrate-test-runtime = { path = "../../test-utils/runtime" } sp-keyring = { path = "../../primitives/keyring" } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } +regex = "1.6.0" diff --git a/substrate/client/chain-spec/derive/src/impls.rs b/substrate/client/chain-spec/derive/src/impls.rs index c0624897c133e6ebd3ecda6556d628dccee3c53b..d8b20c5c2a8cca93bd2a6bc8da051ea3746c1e4e 100644 --- a/substrate/client/chain-spec/derive/src/impls.rs +++ b/substrate/client/chain-spec/derive/src/impls.rs @@ -19,7 +19,7 @@ use proc_macro2::{Span, TokenStream}; use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; -use syn::{DeriveInput, Error, Ident}; +use syn::{DeriveInput, Error, Ident, Path}; const CRATE_NAME: &str = "sc-chain-spec"; const ATTRIBUTE_NAME: &str = "forks"; @@ -143,7 +143,7 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { pub fn derive( ast: &DeriveInput, derive: impl Fn( - &Ident, + &Path, &Ident, &syn::Generics, Vec<&Ident>, @@ -171,25 +171,28 @@ pub fn derive( }; let name = &ast.ident; - let crate_name = match crate_name(CRATE_NAME) { + let crate_path = match crate_name(CRATE_NAME) { Ok(FoundCrate::Itself) => CRATE_NAME.replace("-", "_"), Ok(FoundCrate::Name(chain_spec_name)) => chain_spec_name, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - return quote!( #err ).into() + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(sdk)) => format!("{sdk}::{CRATE_NAME}").replace("-", "_"), + _ => { + return Error::new(Span::call_site(), &e).to_compile_error().into(); + }, }, }; - let crate_name = Ident::new(&crate_name, Span::call_site()); + let crate_path = + syn::parse_str::(&crate_path).expect("crate_name returns valid path; qed"); let field_names = fields.named.iter().flat_map(|x| x.ident.as_ref()).collect::>(); let field_types = fields.named.iter().map(|x| &x.ty).collect::>(); - derive(&crate_name, name, &ast.generics, field_names, field_types, fields).into() + derive(&crate_path, name, &ast.generics, field_names, field_types, fields).into() } -fn generate_fork_fields(crate_name: &Ident, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { - let crate_name = std::iter::repeat(crate_name); +fn generate_fork_fields(crate_path: &Path, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { + let crate_path = std::iter::repeat(crate_path); quote! { - #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* + #( pub #names: Option<<#types as #crate_path::Group>::Fork>, )* } } diff --git a/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err b/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err deleted file mode 100644 index c545b53b2bafea88af090236f825bbafe2baf8f8..0000000000000000000000000000000000000000 --- a/substrate/client/chain-spec/res/chain_spec_as_json_fails_with_invalid_config.err +++ /dev/null @@ -1,114 +0,0 @@ -Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, `substrateTest`, `balances` at line 3 column 9 for blob: -{ - "system": {}, - "babex": { - "authorities": [ - [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - 1 - ], - [ - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - 1 - ], - [ - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - 1 - ] - ], - "epochConfig": { - "c": [ - 3, - 10 - ], - "allowed_slots": "PrimaryAndSecondaryPlainSlots" - } - }, - "substrateTest": { - "authorities": [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" - ] - }, - "balances": { - "balances": [ - [ - "5D34dL5prEUaGNQtPPZ3yN5Y6BnkfXunKXXz6fo7ZJbLwRRH", - 100000000000000000 - ], - [ - "5GBNeWRhZc2jXu7D55rBimKYDk8PGk8itRYFTPfC8RJLKG5o", - 100000000000000000 - ], - [ - "5Dfis6XL8J2P6JHUnUtArnFWndn62SydeP8ee8sG2ky9nfm9", - 100000000000000000 - ], - [ - "5F4H97f7nQovyrbiq4ZetaaviNwThSVcFobcA5aGab6167dK", - 100000000000000000 - ], - [ - "5DiDShBWa1fQx6gLzpf3SFBhMinCoyvHM1BWjPNsmXS8hkrW", - 100000000000000000 - ], - [ - "5EFb84yH9tpcFuiKUcsmdoF7xeeY3ajG1ZLQimxQoFt9HMKR", - 100000000000000000 - ], - [ - "5DZLHESsfGrJ5YzT3HuRPXsSNb589xQ4Unubh1mYLodzKdVY", - 100000000000000000 - ], - [ - "5GHJzqvG6tXnngCpG7B12qjUvbo5e4e9z8Xjidk3CQZHxTPZ", - 100000000000000000 - ], - [ - "5CUnSsgAyLND3bxxnfNhgWXSe9Wn676JzLpGLgyJv858qhoX", - 100000000000000000 - ], - [ - "5CVKn7HAZW1Ky4r7Vkgsr7VEW88C2sHgUNDiwHY9Ct2hjU8q", - 100000000000000000 - ], - [ - "5H673aukQ4PeDe1U2nuv1bi32xDEziimh3PZz7hDdYUB7TNz", - 100000000000000000 - ], - [ - "5HTe9L15LJryjUAt1jZXZCBPnzbbGnpvFwbjE3NwCWaAqovf", - 100000000000000000 - ], - [ - "5D7LFzGpMwHPyDBavkRbWSKWTtJhCaPPZ379wWLT23bJwXJz", - 100000000000000000 - ], - [ - "5CLepMARnEgtVR1EkUuJVUvKh97gzergpSxUU3yKGx1v6EwC", - 100000000000000000 - ], - [ - "5Chb2UhfvZpmjjEziHbFbotM4quX32ZscRV6QJBt1rUKzz51", - 100000000000000000 - ], - [ - "5HmRp3i3ZZk7xsAvbi8hyXVP6whSMnBJGebVC4FsiZVhx52e", - 100000000000000000 - ], - [ - "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - 100000000000000000 - ], - [ - "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", - 100000000000000000 - ], - [ - "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - 100000000000000000 - ] - ] - } -} \ No newline at end of file diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index a9cdce4bf95580f6c00cbf27b0330779e4f39a28..883cd19adfd1c7543ee919a6e436df1838c6efe2 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] use crate::{ extension::GetExtension, genesis_config_builder::HostFunctions, ChainType, - GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, RuntimeGenesis, + GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, }; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; @@ -37,7 +37,6 @@ use std::{ fs::File, marker::PhantomData, path::PathBuf, - sync::Arc, }; #[derive(Serialize, Deserialize)] @@ -58,37 +57,33 @@ impl Clone for GenesisBuildAction { } } -#[allow(deprecated)] -enum GenesisSource { +enum GenesisSource { File(PathBuf), Binary(Cow<'static, [u8]>), /// factory function + code - //Factory and G type parameter shall be removed together with `ChainSpec::from_genesis` - Factory(Arc G + Send + Sync>, Vec), Storage(Storage), /// build action + code GenesisBuilderApi(GenesisBuildAction, Vec), } -impl Clone for GenesisSource { +impl Clone for GenesisSource { fn clone(&self) -> Self { match *self { Self::File(ref path) => Self::File(path.clone()), Self::Binary(ref d) => Self::Binary(d.clone()), - Self::Factory(ref f, ref c) => Self::Factory(f.clone(), c.clone()), Self::Storage(ref s) => Self::Storage(s.clone()), Self::GenesisBuilderApi(ref s, ref c) => Self::GenesisBuilderApi(s.clone(), c.clone()), } } } -impl GenesisSource { - fn resolve(&self) -> Result, String> { +impl GenesisSource { + fn resolve(&self) -> Result { /// helper container for deserializing genesis from the JSON file (ChainSpec JSON file is /// also supported here) #[derive(Serialize, Deserialize)] - struct GenesisContainer { - genesis: Genesis, + struct GenesisContainer { + genesis: Genesis, } match self { @@ -105,19 +100,15 @@ impl GenesisSource { })? }; - let genesis: GenesisContainer = json::from_slice(&bytes) + let genesis: GenesisContainer = json::from_slice(&bytes) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) }, Self::Binary(buf) => { - let genesis: GenesisContainer = json::from_reader(buf.as_ref()) + let genesis: GenesisContainer = json::from_reader(buf.as_ref()) .map_err(|e| format!("Error parsing embedded file: {}", e))?; Ok(genesis.genesis) }, - Self::Factory(f, code) => Ok(Genesis::RuntimeAndCode(RuntimeInnerWrapper { - runtime: f(), - code: code.clone(), - })), Self::Storage(storage) => Ok(Genesis::Raw(RawGenesis::from(storage.clone()))), Self::GenesisBuilderApi(GenesisBuildAction::Full(config), code) => Ok(Genesis::RuntimeGenesis(RuntimeGenesisInner { @@ -140,24 +131,12 @@ impl GenesisSource { } } -impl BuildStorage for ChainSpec +impl BuildStorage for ChainSpec where EHF: HostFunctions, { fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { match self.genesis.resolve()? { - #[allow(deprecated)] - Genesis::Runtime(runtime_genesis_config) => { - runtime_genesis_config.assimilate_storage(storage)?; - }, - #[allow(deprecated)] - Genesis::RuntimeAndCode(RuntimeInnerWrapper { - runtime: runtime_genesis_config, - code, - }) => { - runtime_genesis_config.assimilate_storage(storage)?; - storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); - }, Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => { storage.top.extend(map.into_iter().map(|(k, v)| (k.0, v.0))); children_map.into_iter().for_each(|(k, v)| { @@ -236,7 +215,7 @@ impl From for RawGenesis { } } -/// Inner representation of [`Genesis::RuntimeGenesis`] format +/// Inner representation of [`Genesis::RuntimeGenesis`] format #[derive(Serialize, Deserialize, Debug)] struct RuntimeGenesisInner { /// Runtime wasm code, expected to be hex-encoded in JSON. @@ -249,7 +228,7 @@ struct RuntimeGenesisInner { } /// Represents two possible variants of the contained JSON blob for the -/// [`Genesis::RuntimeGenesis`] format. +/// [`Genesis::RuntimeGenesis`] format. #[derive(Serialize, Deserialize, Debug)] #[serde(rename_all = "camelCase")] enum RuntimeGenesisConfigJson { @@ -265,31 +244,11 @@ enum RuntimeGenesisConfigJson { Patch(json::Value), } -/// Inner variant wrapper for deprecated runtime. -#[derive(Serialize, Deserialize, Debug)] -struct RuntimeInnerWrapper { - /// The native `RuntimeGenesisConfig` struct. - runtime: G, - /// Runtime code. - #[serde(with = "sp_core::bytes")] - code: Vec, -} - /// Represents the different formats of the genesis state within chain spec JSON blob. #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] -enum Genesis { - /// (Deprecated) Contains the JSON representation of G (the native type representing the - /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// without the runtime code. It is required to deserialize the legacy chainspecs generated - /// with `ChainsSpec::from_genesis` method. - Runtime(G), - /// (Deprecated) Contains the JSON representation of G (the native type representing the - /// runtime's `RuntimeGenesisConfig` struct) (will be removed with `ChainSpec::from_genesis`) - /// and the runtime code. It is required to create and deserialize JSON chainspecs created with - /// deprecated `ChainSpec::from_genesis` method. - RuntimeAndCode(RuntimeInnerWrapper), +enum Genesis { /// The genesis storage as raw data. Typically raw key-value entries in state. Raw(RawGenesis), /// State root hash of the genesis storage. @@ -343,7 +302,7 @@ struct ClientSpec { pub type NoExtension = Option<()>; /// Builder for creating [`ChainSpec`] instances. -pub struct ChainSpecBuilder { +pub struct ChainSpecBuilder { code: Vec, extensions: E, name: String, @@ -355,10 +314,9 @@ pub struct ChainSpecBuilder { protocol_id: Option, fork_id: Option, properties: Option, - _genesis: PhantomData, } -impl ChainSpecBuilder { +impl ChainSpecBuilder { /// Creates a new builder instance with no defaults. pub fn new(code: &[u8], extensions: E) -> Self { Self { @@ -373,7 +331,6 @@ impl ChainSpecBuilder { protocol_id: None, fork_id: None, properties: None, - _genesis: Default::default(), } } @@ -457,7 +414,7 @@ impl ChainSpecBuilder { } /// Builds a [`ChainSpec`] instance using the provided settings. - pub fn build(self) -> ChainSpec { + pub fn build(self) -> ChainSpec { let client_spec = ClientSpec { name: self.name, id: self.id, @@ -486,13 +443,13 @@ impl ChainSpecBuilder { /// The chain spec is generic over the native `RuntimeGenesisConfig` struct (`G`). It is also /// possible to parametrize chain spec over the extended host functions (EHF). It should be use if /// runtime is using the non-standard host function during genesis state creation. -pub struct ChainSpec { +pub struct ChainSpec { client_spec: ClientSpec, - genesis: GenesisSource, + genesis: GenesisSource, _host_functions: PhantomData, } -impl Clone for ChainSpec { +impl Clone for ChainSpec { fn clone(&self) -> Self { ChainSpec { client_spec: self.client_spec.clone(), @@ -502,7 +459,7 @@ impl Clone for ChainSpec { } } -impl ChainSpec { +impl ChainSpec { /// A list of bootnode addresses. pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { &self.client_spec.boot_nodes @@ -555,58 +512,18 @@ impl ChainSpec { &mut self.client_spec.extensions } - /// Create hardcoded spec. - #[deprecated( - note = "`from_genesis` is planned to be removed in May 2024. Use `builder()` instead." - )] - // deprecated note: Genesis::Runtime + GenesisSource::Factory shall also be removed - pub fn from_genesis G + 'static + Send + Sync>( - name: &str, - id: &str, - chain_type: ChainType, - constructor: F, - boot_nodes: Vec, - telemetry_endpoints: Option, - protocol_id: Option<&str>, - fork_id: Option<&str>, - properties: Option, - extensions: E, - code: &[u8], - ) -> Self { - let client_spec = ClientSpec { - name: name.to_owned(), - id: id.to_owned(), - chain_type, - boot_nodes, - telemetry_endpoints, - protocol_id: protocol_id.map(str::to_owned), - fork_id: fork_id.map(str::to_owned), - properties, - extensions, - consensus_engine: (), - genesis: Default::default(), - code_substitutes: BTreeMap::new(), - }; - - ChainSpec { - client_spec, - genesis: GenesisSource::Factory(Arc::new(constructor), code.into()), - _host_functions: Default::default(), - } - } - /// Type of the chain. fn chain_type(&self) -> ChainType { self.client_spec.chain_type.clone() } /// Provides a `ChainSpec` builder. - pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { + pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { ChainSpecBuilder::new(code, extensions) } } -impl ChainSpec { +impl ChainSpec { /// Parse json content into a `ChainSpec` pub fn from_json_bytes(json: impl Into>) -> Result { let json = json.into(); @@ -649,17 +566,17 @@ impl ChainS #[derive(Serialize, Deserialize)] // we cannot #[serde(deny_unknown_fields)]. Otherwise chain-spec-builder will fail on any // non-standard spec. -struct ChainSpecJsonContainer { +struct ChainSpecJsonContainer { #[serde(flatten)] client_spec: ClientSpec, - genesis: Genesis, + genesis: Genesis, } -impl ChainSpec +impl ChainSpec where EHF: HostFunctions, { - fn json_container(&self, raw: bool) -> Result, String> { + fn json_container(&self, raw: bool) -> Result, String> { let raw_genesis = match (raw, self.genesis.resolve()?) { ( true, @@ -685,20 +602,7 @@ where storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); RawGenesis::from(storage) }, - - #[allow(deprecated)] - (true, Genesis::RuntimeAndCode(RuntimeInnerWrapper { runtime: g, code })) => { - let mut storage = g.build_storage()?; - storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); - RawGenesis::from(storage) - }, - #[allow(deprecated)] - (true, Genesis::Runtime(g)) => { - let storage = g.build_storage()?; - RawGenesis::from(storage) - }, (true, Genesis::Raw(raw)) => raw, - (_, genesis) => return Ok(ChainSpecJsonContainer { client_spec: self.client_spec.clone(), genesis }), }; @@ -716,9 +620,8 @@ where } } -impl crate::ChainSpec for ChainSpec +impl crate::ChainSpec for ChainSpec where - G: RuntimeGenesis + 'static, E: GetExtension + serde::Serialize + Clone + Send + Sync + 'static, EHF: HostFunctions, { @@ -831,8 +734,8 @@ fn json_contains_path(doc: &json::Value, path: &mut VecDeque<&str>) -> bool { /// This function updates the code in given chain spec. /// -/// Function support updating the runtime code in provided JSON chain spec blob. `Genesis::Raw` -/// and `Genesis::RuntimeGenesis` formats are supported. +/// Function support updating the runtime code in provided JSON chain spec blob. `Genesis::Raw` +/// and `Genesis::RuntimeGenesis` formats are supported. /// /// If update was successful `true` is returned, otherwise `false`. Chain spec JSON is modified in /// place. @@ -871,19 +774,7 @@ mod tests { use sp_core::storage::well_known_keys; use sp_keyring::AccountKeyring; - #[derive(Debug, Serialize, Deserialize)] - struct Genesis(BTreeMap); - - impl BuildStorage for Genesis { - fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { - storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), - ); - Ok(()) - } - } - - type TestSpec = ChainSpec; + type TestSpec = ChainSpec; #[test] fn should_deserialize_example_chain_spec() { @@ -919,7 +810,7 @@ mod tests { } } - type TestSpec2 = ChainSpec; + type TestSpec2 = ChainSpec; #[test] fn should_deserialize_chain_spec_with_extensions() { @@ -1137,10 +1028,10 @@ mod tests { #[test] fn chain_spec_as_json_fails_with_invalid_config() { - let expected_error_message = - include_str!("../res/chain_spec_as_json_fails_with_invalid_config.err"); - let j = - include_str!("../../../test-utils/runtime/res/default_genesis_config_invalid_2.json"); + let invalid_genesis_config = from_str::(include_str!( + "../../../test-utils/runtime/res/default_genesis_config_invalid_2.json" + )) + .unwrap(); let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), @@ -1148,12 +1039,25 @@ mod tests { .with_name("TestName") .with_id("test_id") .with_chain_type(ChainType::Local) - .with_genesis_config(from_str(j).unwrap()) + .with_genesis_config(invalid_genesis_config.clone()) .build(); - let result = output.as_json(true); + let result = output.as_json(true).unwrap_err(); + let mut result = result.lines(); - assert_eq!(result.err().unwrap(), expected_error_message); + let result_header = result.next().unwrap(); + let result_body = result.collect::>().join("\n"); + let result_body: Value = serde_json::from_str(&result_body).unwrap(); + + let re = regex::Regex::new(concat!( + r"^Invalid JSON blob: unknown field `babex`, expected one of `system`, `babe`, ", + r"`substrateTest`, `balances` at line \d+ column \d+ for blob:$" + )) + .unwrap(); + + assert_eq!(json!({"a":1,"b":2}), json!({"b":2,"a":1})); + assert!(re.is_match(result_header)); + assert_eq!(invalid_genesis_config, result_body); } #[test] @@ -1278,35 +1182,4 @@ mod tests { &|v| { *v == "0x000102040506" } )); } - - #[test] - fn generate_from_genesis_is_still_supported() { - #[allow(deprecated)] - let chain_spec: ChainSpec = ChainSpec::from_genesis( - "TestName", - "test", - ChainType::Local, - || Default::default(), - Vec::new(), - None, - None, - None, - None, - Default::default(), - &vec![0, 1, 2, 4, 5, 6], - ); - - let chain_spec_json = from_str::(&chain_spec.as_json(false).unwrap()).unwrap(); - assert!(json_eval_value_at_key( - &chain_spec_json, - &mut json_path!["genesis", "runtimeAndCode", "code"], - &|v| { *v == "0x000102040506" } - )); - let chain_spec_json = from_str::(&chain_spec.as_json(true).unwrap()).unwrap(); - assert!(json_eval_value_at_key( - &chain_spec_json, - &mut json_path!["genesis", "raw", "top", "0x3a636f6465"], - &|v| { *v == "0x000102040506" } - )); - } } diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index abe01dafd92483fbede02eb6d95205b29bddacb5..653c3c618b772a5b8044e7e21746a8d652897407 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -257,7 +257,7 @@ //! pub known_blocks: HashMap, //! } //! -//! pub type MyChainSpec = GenericChainSpec; +//! pub type MyChainSpec = GenericChainSpec; //! ``` //! Some parameters may require different values depending on the current blockchain height (a.k.a. //! forks). You can use the [`ChainSpecGroup`](macro@ChainSpecGroup) macro and the provided [`Forks`] @@ -286,10 +286,10 @@ //! pub type BlockNumber = u64; //! //! /// A chain spec supporting forkable `ClientParams`. -//! pub type MyChainSpec1 = GenericChainSpec>; +//! pub type MyChainSpec1 = GenericChainSpec>; //! //! /// A chain spec supporting forkable `Extension`. -//! pub type MyChainSpec2 = GenericChainSpec>; +//! pub type MyChainSpec2 = GenericChainSpec>; //! ``` //! It's also possible to have a set of parameters that are allowed to change with block numbers //! (i.e., they are forkable), and another set that is not subject to changes. This can also be @@ -316,7 +316,7 @@ //! pub pool: Forks, //! } //! -//! pub type MyChainSpec = GenericChainSpec; +//! pub type MyChainSpec = GenericChainSpec; //! ``` //! The chain spec can be extended with other fields that are opaque to the default chain spec. //! Specific node implementations will need to be able to deserialize these extensions. @@ -344,7 +344,6 @@ pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; -use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; use sp_runtime::BuildStorage; @@ -353,6 +352,7 @@ use sp_runtime::BuildStorage; /// This can be used by tools to determine the type of a chain for displaying /// additional information or enabling additional features. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] pub enum ChainType { /// A development chain that runs mainly on one node. Development, @@ -361,6 +361,7 @@ pub enum ChainType { /// A live chain. Live, /// Some custom chain type. + #[cfg_attr(feature = "clap", clap(skip))] Custom(String), } @@ -373,10 +374,6 @@ impl Default for ChainType { /// Arbitrary properties defined in chain spec as a JSON object pub type Properties = serde_json::map::Map; -/// A set of traits for the runtime genesis config. -pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} -impl RuntimeGenesis for T {} - /// Common interface of a chain specification. pub trait ChainSpec: BuildStorage + Send + Sync { /// Spec name. diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 317a344cf58e5399b062d40c3b9c9afdddb5d1fa..1f3bce799b2c436565cf948f54d2017338344732 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -25,7 +25,7 @@ itertools = "0.11" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } names = { version = "0.14.0", default-features = false } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" diff --git a/substrate/client/cli/src/commands/insert_key.rs b/substrate/client/cli/src/commands/insert_key.rs index 3d89610b28b1bd0a031ffad6e94fef7fd604e20d..66dbec794865c00bed9e508a44e760299b2bc3ea 100644 --- a/substrate/client/cli/src/commands/insert_key.rs +++ b/substrate/client/cli/src/commands/insert_key.rs @@ -126,8 +126,10 @@ mod tests { } fn load_spec(&self, _: &str) -> std::result::Result, String> { + let builder = + GenericChainSpec::::builder(Default::default(), NoExtension::None); Ok(Box::new( - GenericChainSpec::<()>::builder(Default::default(), NoExtension::None) + builder .with_name("test") .with_id("test_id") .with_chain_type(ChainType::Development) diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index 7058af19f1d4a5edc31a8e93d062f7f93c065f1e..0e12c7a2a2d3742ef3d9aa3681f900036e67a467 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use clap::Args; -use sc_network::config::{identity::ed25519, NodeKeyConfig}; +use sc_network::config::{ed25519, NodeKeyConfig}; use sc_service::Role; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; @@ -148,7 +148,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result::builder(Default::default(), NoExtension::None) - .with_name("test") - .with_id("test_id") - .with_chain_type(ChainType::Development) - .with_genesis_config_patch(Default::default()) - .build(), + GenericChainSpec::::builder( + Default::default(), + NoExtension::None, + ) + .with_name("test") + .with_id("test_id") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(Default::default()) + .build(), ), wasm_method: Default::default(), wasm_runtime_overrides: None, diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index 64e2d16cd913aaea5d2688087e90088e5ff46ab8..d1460c45356d7ec86204b52c42c08ee28e9c5faf 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } thiserror = { workspace = true } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index b001e3d117aa94c86e76484336b434f840ec3e69..c51082a018b5cfd558efe0b76985d6add890057d 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" log = { workspace = true, default-features = true } num-bigint = "0.4.3" diff --git a/substrate/client/consensus/babe/README.md b/substrate/client/consensus/babe/README.md index a3cf944b513b80eb96493342b821147bfc905592..47b5820ff71aab8a4a6c92640ebd8981c396fa09 100644 --- a/substrate/client/consensus/babe/README.md +++ b/substrate/client/consensus/babe/README.md @@ -43,6 +43,6 @@ primary blocks in the chain. We will pick the heaviest chain (more primary blocks) and will go with the longest one in case of a tie. An in-depth description and analysis of the protocol can be found here: - + License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index d10bdd8c7e4c4d108b78ef3286f475ba85e16a8a..0c85de24004031fce96be35bc413506069093eb1 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -61,7 +61,7 @@ //! blocks) and will go with the longest one in case of a tie. //! //! An in-depth description and analysis of the protocol can be found here: -//! +//! #![forbid(unsafe_code)] #![warn(missing_docs)] @@ -562,9 +562,10 @@ fn aux_storage_cleanup + HeaderBackend, Block: B // Cleans data for stale forks. let stale_forks = match client.expand_forks(¬ification.stale_heads) { Ok(stale_forks) => stale_forks, - Err((stale_forks, e)) => { + Err(e) => { warn!(target: LOG_TARGET, "{:?}", e); - stale_forks + + Default::default() }, }; hashes.extend(stale_forks.iter()); diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 38c9e1ff6ac25cc26151c668902838ec65ab6189..716067ae4000661beab6aeb90772087720d0a5ae 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -1094,8 +1094,8 @@ async fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork1_hashes[2..3], false)); // Present: A4 assert!(aux_data_check(&fork1_hashes[3..], true)); - // Present C4, C5 - assert!(aux_data_check(&fork3_hashes, true)); + // Wiped C4, C5 + assert!(aux_data_check(&fork3_hashes, false)); } #[tokio::test] diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index 9336841146e74df3539bb64265314a4a7917dff9..cd183f6bc8b0562a334d59f86fa58699c63a7a73 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = "6.2.2" async-channel = "1.8.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fnv = "1.0.6" futures = "0.3.30" log = { workspace = true, default-features = true } @@ -42,7 +42,6 @@ sp-keystore = { path = "../../../primitives/keystore" } sp-runtime = { path = "../../../primitives/runtime" } tokio = "1.37" - [dev-dependencies] serde = { workspace = true, default-features = true } tempfile = "3.1.0" diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index 0959424ba862d05c693d417fed45cdaa91d38be5..84f90622b5c14f8b7cba19749c877a6ce53ec226 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } @@ -24,6 +24,7 @@ sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } sc-rpc = { path = "../../../rpc" } sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } +sp-application-crypto = { path = "../../../../primitives/application-crypto" } [dev-dependencies] serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/beefy/rpc/src/lib.rs b/substrate/client/consensus/beefy/rpc/src/lib.rs index f01baee2d6ece9a9d1dd36e1524a1d004e9b0401..66102eeb35c897a7bff4e50a12da1b90754eeeaa 100644 --- a/substrate/client/consensus/beefy/rpc/src/lib.rs +++ b/substrate/client/consensus/beefy/rpc/src/lib.rs @@ -21,9 +21,11 @@ #![warn(missing_docs)] use parking_lot::RwLock; +use sp_consensus_beefy::AuthorityIdBound; use std::sync::Arc; use sc_rpc::{utils::pipe_from_stream, SubscriptionTaskExecutor}; +use sp_application_crypto::RuntimeAppPublic; use sp_runtime::traits::Block as BlockT; use futures::{task::SpawnError, FutureExt, StreamExt}; @@ -98,19 +100,20 @@ pub trait BeefyApi { } /// Implements the BeefyApi RPC trait for interacting with BEEFY. -pub struct Beefy { - finality_proof_stream: BeefyVersionedFinalityProofStream, +pub struct Beefy { + finality_proof_stream: BeefyVersionedFinalityProofStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } -impl Beefy +impl Beefy where Block: BlockT, + AuthorityId: AuthorityIdBound, { /// Creates a new Beefy Rpc handler instance. pub fn new( - finality_proof_stream: BeefyVersionedFinalityProofStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -129,16 +132,18 @@ where } #[async_trait] -impl BeefyApiServer - for Beefy +impl BeefyApiServer + for Beefy where Block: BlockT, + AuthorityId: AuthorityIdBound, + ::Signature: Send + Sync, { fn subscribe_justifications(&self, pending: PendingSubscriptionSink) { let stream = self .finality_proof_stream .subscribe(100_000) - .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); + .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); sc_rpc::utils::spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); } @@ -158,20 +163,26 @@ mod tests { communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, }; - use sp_consensus_beefy::{known_payloads, Payload, SignedCommitment}; + use sp_consensus_beefy::{ecdsa_crypto, known_payloads, Payload, SignedCommitment}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { + fn setup_io_handler() -> ( + RpcModule>, + BeefyVersionedFinalityProofSender, + ) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { + ) -> ( + RpcModule>, + BeefyVersionedFinalityProofSender, + ) { let (finality_proof_sender, finality_proof_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let handler = Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) @@ -250,10 +261,10 @@ mod tests { assert_eq!(response, expected); } - fn create_finality_proof() -> BeefyVersionedFinalityProof { + fn create_finality_proof() -> BeefyVersionedFinalityProof { let payload = Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); - BeefyVersionedFinalityProof::::V1(SignedCommitment { + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: sp_consensus_beefy::Commitment { payload, block_number: 5, @@ -280,7 +291,7 @@ mod tests { // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_finality_proof: BeefyVersionedFinalityProof = + let recv_finality_proof: BeefyVersionedFinalityProof = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); assert_eq!(recv_finality_proof, finality_proof); diff --git a/substrate/client/consensus/beefy/rpc/src/notification.rs b/substrate/client/consensus/beefy/rpc/src/notification.rs index 690c511b999ac1b89627cd1e67dc2ce8e2343862..d4339058a6940df75959fbd7b9ded13a318bd5db 100644 --- a/substrate/client/consensus/beefy/rpc/src/notification.rs +++ b/substrate/client/consensus/beefy/rpc/src/notification.rs @@ -19,6 +19,7 @@ use codec::Encode; use serde::{Deserialize, Serialize}; +use sp_consensus_beefy::AuthorityIdBound; use sp_runtime::traits::Block as BlockT; /// An encoded finality proof proving that the given header has been finalized. @@ -28,11 +29,15 @@ use sp_runtime::traits::Block as BlockT; pub struct EncodedVersionedFinalityProof(sp_core::Bytes); impl EncodedVersionedFinalityProof { - pub fn new( - finality_proof: sc_consensus_beefy::justification::BeefyVersionedFinalityProof, + pub fn new( + finality_proof: sc_consensus_beefy::justification::BeefyVersionedFinalityProof< + Block, + AuthorityId, + >, ) -> Self where Block: BlockT, + AuthorityId: AuthorityIdBound, { EncodedVersionedFinalityProof(finality_proof.encode().into()) } diff --git a/substrate/client/consensus/beefy/src/aux_schema.rs b/substrate/client/consensus/beefy/src/aux_schema.rs index 534f668ae69c2996064bef086e2958b23f48caf0..1922494ad11207ae4f46931a5dcf673b3154001b 100644 --- a/substrate/client/consensus/beefy/src/aux_schema.rs +++ b/substrate/client/consensus/beefy/src/aux_schema.rs @@ -20,8 +20,10 @@ use crate::{error::Error, worker::PersistedState, LOG_TARGET}; use codec::{Decode, Encode}; -use log::{debug, trace}; +use log::{debug, trace, warn}; use sc_client_api::{backend::AuxStore, Backend}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus_beefy::AuthorityIdBound; use sp_runtime::traits::Block as BlockT; const VERSION_KEY: &[u8] = b"beefy_auxschema_version"; @@ -36,26 +38,27 @@ pub(crate) fn write_current_version(backend: &BE) -> Result<(), Er } /// Write voter state. -pub(crate) fn write_voter_state( +pub(crate) fn write_voter_state( backend: &BE, - state: &PersistedState, -) -> Result<(), Error> { + state: &PersistedState, +) -> ClientResult<()> { trace!(target: LOG_TARGET, "🥩 persisting {:?}", state); AuxStore::insert_aux(backend, &[(WORKER_STATE_KEY, state.encode().as_slice())], &[]) - .map_err(|e| Error::Backend(e.to_string())) } -fn load_decode(backend: &BE, key: &[u8]) -> Result, Error> { - match backend.get_aux(key).map_err(|e| Error::Backend(e.to_string()))? { +fn load_decode(backend: &BE, key: &[u8]) -> ClientResult> { + match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err(|e| Error::Backend(format!("BEEFY DB is corrupted: {}", e))) + .map_err(|e| ClientError::Backend(format!("BEEFY DB is corrupted: {}", e))) .map(Some), } } /// Load or initialize persistent data from backend. -pub(crate) fn load_persistent(backend: &BE) -> Result>, Error> +pub(crate) fn load_persistent( + backend: &BE, +) -> ClientResult>> where B: BlockT, BE: Backend, @@ -64,9 +67,14 @@ where match version { None => (), - Some(1) | Some(2) | Some(3) => (), // versions 1, 2 & 3 are obsolete and should be ignored - Some(4) => return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), - other => return Err(Error::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), + + Some(v) if 1 <= v && v <= 3 => + // versions 1, 2 & 3 are obsolete and should be ignored + warn!(target: LOG_TARGET, "🥩 backend contains a BEEFY state of an obsolete version {v}. ignoring..."), + Some(4) => + return load_decode::<_, PersistedState>(backend, WORKER_STATE_KEY), + other => + return Err(ClientError::Backend(format!("Unsupported BEEFY DB version: {:?}", other))), } // No persistent state found in DB. @@ -78,6 +86,7 @@ pub(crate) mod tests { use super::*; use crate::tests::BeefyTestNet; use sc_network_test::TestNetFactory; + use sp_consensus_beefy::ecdsa_crypto; // also used in tests.rs pub fn verify_persisted_version>(backend: &BE) -> bool { @@ -91,7 +100,7 @@ pub(crate) mod tests { let backend = net.peer(0).client().as_backend(); // version not available in db -> None - assert_eq!(load_persistent(&*backend).unwrap(), None); + assert_eq!(load_persistent::<_, _, ecdsa_crypto::AuthorityId>(&*backend).unwrap(), None); // populate version in db write_current_version(&*backend).unwrap(); @@ -99,7 +108,7 @@ pub(crate) mod tests { assert_eq!(load_decode(&*backend, VERSION_KEY).unwrap(), Some(CURRENT_VERSION)); // version is available in db but state isn't -> None - assert_eq!(load_persistent(&*backend).unwrap(), None); + assert_eq!(load_persistent::<_, _, ecdsa_crypto::AuthorityId>(&*backend).unwrap(), None); // full `PersistedState` load is tested in `tests.rs`. } diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs index 947fe13856f476042858b1a5698aa5333b1d287b..95cac250b7c595e7a1f8875096b1f37d061a367f 100644 --- a/substrate/client/consensus/beefy/src/communication/gossip.rs +++ b/substrate/client/consensus/beefy/src/communication/gossip.rs @@ -36,10 +36,8 @@ use crate::{ keystore::BeefyKeystore, LOG_TARGET, }; -use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - ValidatorSet, ValidatorSetId, VoteMessage, -}; +use sp_application_crypto::RuntimeAppPublic; +use sp_consensus_beefy::{AuthorityIdBound, ValidatorSet, ValidatorSetId, VoteMessage}; // Timeout for rebroadcasting messages. #[cfg(not(test))] @@ -72,16 +70,19 @@ enum Consider { /// BEEFY gossip message type that gets encoded and sent on the network. #[derive(Debug, Encode, Decode)] -pub(crate) enum GossipMessage { +pub(crate) enum GossipMessage { /// BEEFY message with commitment and single signature. - Vote(VoteMessage, AuthorityId, Signature>), + Vote(VoteMessage, AuthorityId, ::Signature>), /// BEEFY justification with commitment and signatures. - FinalityProof(BeefyVersionedFinalityProof), + FinalityProof(BeefyVersionedFinalityProof), } -impl GossipMessage { +impl GossipMessage { /// Return inner vote if this message is a Vote. - pub fn unwrap_vote(self) -> Option, AuthorityId, Signature>> { + pub fn unwrap_vote( + self, + ) -> Option, AuthorityId, ::Signature>> + { match self { GossipMessage::Vote(vote) => Some(vote), GossipMessage::FinalityProof(_) => None, @@ -89,7 +90,7 @@ impl GossipMessage { } /// Return inner finality proof if this message is a FinalityProof. - pub fn unwrap_finality_proof(self) -> Option> { + pub fn unwrap_finality_proof(self) -> Option> { match self { GossipMessage::Vote(_) => None, GossipMessage::FinalityProof(proof) => Some(proof), @@ -114,33 +115,33 @@ where } #[derive(Clone, Debug)] -pub(crate) struct GossipFilterCfg<'a, B: Block> { +pub(crate) struct GossipFilterCfg<'a, B: Block, AuthorityId: AuthorityIdBound> { pub start: NumberFor, pub end: NumberFor, pub validator_set: &'a ValidatorSet, } #[derive(Clone, Debug)] -struct FilterInner { +struct FilterInner { pub start: NumberFor, pub end: NumberFor, pub validator_set: ValidatorSet, } -struct Filter { +struct Filter { // specifies live rounds - inner: Option>, + inner: Option>, // cache of seen valid justifications in active rounds rounds_with_valid_proofs: BTreeSet>, } -impl Filter { +impl Filter { pub fn new() -> Self { Self { inner: None, rounds_with_valid_proofs: BTreeSet::new() } } /// Update filter to new `start` and `set_id`. - fn update(&mut self, cfg: GossipFilterCfg) { + fn update(&mut self, cfg: GossipFilterCfg) { self.rounds_with_valid_proofs .retain(|&round| round >= cfg.start && round <= cfg.end); // only clone+overwrite big validator_set if set_id changed @@ -220,21 +221,22 @@ impl Filter { /// rejected/expired. /// ///All messaging is handled in a single BEEFY global topic. -pub(crate) struct GossipValidator +pub(crate) struct GossipValidator where B: Block, { votes_topic: B::Hash, justifs_topic: B::Hash, - gossip_filter: RwLock>, + gossip_filter: RwLock>, next_rebroadcast: Mutex, known_peers: Arc>>, network: Arc, } -impl GossipValidator +impl GossipValidator where B: Block, + AuthorityId: AuthorityIdBound, { pub(crate) fn new(known_peers: Arc>>, network: Arc) -> Self { Self { @@ -250,7 +252,7 @@ where /// Update gossip validator filter. /// /// Only votes for `set_id` and rounds `start <= round <= end` will be accepted. - pub(crate) fn update_filter(&self, filter: GossipFilterCfg) { + pub(crate) fn update_filter(&self, filter: GossipFilterCfg) { debug!( target: LOG_TARGET, "🥩 New gossip filter: start {:?}, end {:?}, validator set id {:?}", @@ -260,10 +262,11 @@ where } } -impl GossipValidator +impl GossipValidator where B: Block, N: NetworkPeers, + AuthorityId: AuthorityIdBound, { fn report(&self, who: PeerId, cost_benefit: ReputationChange) { self.network.report_peer(who, cost_benefit); @@ -271,7 +274,7 @@ where fn validate_vote( &self, - vote: VoteMessage, AuthorityId, Signature>, + vote: VoteMessage, AuthorityId, ::Signature>, sender: &PeerId, ) -> Action { let round = vote.commitment.block_number; @@ -299,7 +302,7 @@ where .unwrap_or(false) { debug!(target: LOG_TARGET, "Message from voter not in validator set: {}", vote.id); - return Action::Discard(cost::UNKNOWN_VOTER) + return Action::Discard(cost::UNKNOWN_VOTER); } } @@ -316,10 +319,10 @@ where fn validate_finality_proof( &self, - proof: BeefyVersionedFinalityProof, + proof: BeefyVersionedFinalityProof, sender: &PeerId, ) -> Action { - let (round, set_id) = proof_block_num_and_set_id::(&proof); + let (round, set_id) = proof_block_num_and_set_id::(&proof); self.known_peers.lock().note_vote_for(*sender, round); let action = { @@ -336,7 +339,7 @@ where } if guard.is_already_proven(round) { - return Action::Discard(benefit::NOT_INTERESTED) + return Action::Discard(benefit::NOT_INTERESTED); } // Verify justification signatures. @@ -344,7 +347,7 @@ where .validator_set() .map(|validator_set| { if let Err((_, signatures_checked)) = - verify_with_validator_set::(round, validator_set, &proof) + verify_with_validator_set::(round, validator_set, &proof) { debug!( target: LOG_TARGET, @@ -369,9 +372,10 @@ where } } -impl Validator for GossipValidator +impl Validator for GossipValidator where B: Block, + AuthorityId: AuthorityIdBound, N: NetworkPeers + Send + Sync, { fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { @@ -385,7 +389,7 @@ where mut data: &[u8], ) -> ValidationResult { let raw = data; - let action = match GossipMessage::::decode_all(&mut data) { + let action = match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => self.validate_vote(msg, sender), Ok(GossipMessage::FinalityProof(proof)) => self.validate_finality_proof(proof, sender), Err(e) => { @@ -414,26 +418,28 @@ where fn message_expired<'a>(&'a self) -> Box bool + 'a> { let filter = self.gossip_filter.read(); - Box::new(move |_topic, mut data| match GossipMessage::::decode_all(&mut data) { - Ok(GossipMessage::Vote(msg)) => { - let round = msg.commitment.block_number; - let set_id = msg.commitment.validator_set_id; - let expired = filter.consider_vote(round, set_id) != Consider::Accept; - trace!(target: LOG_TARGET, "🥩 Vote for round #{} expired: {}", round, expired); - expired - }, - Ok(GossipMessage::FinalityProof(proof)) => { - let (round, set_id) = proof_block_num_and_set_id::(&proof); - let expired = filter.consider_finality_proof(round, set_id) != Consider::Accept; - trace!( - target: LOG_TARGET, - "🥩 Finality proof for round #{} expired: {}", - round, + Box::new(move |_topic, mut data| { + match GossipMessage::::decode_all(&mut data) { + Ok(GossipMessage::Vote(msg)) => { + let round = msg.commitment.block_number; + let set_id = msg.commitment.validator_set_id; + let expired = filter.consider_vote(round, set_id) != Consider::Accept; + trace!(target: LOG_TARGET, "🥩 Vote for round #{} expired: {}", round, expired); expired - ); - expired - }, - Err(_) => true, + }, + Ok(GossipMessage::FinalityProof(proof)) => { + let (round, set_id) = proof_block_num_and_set_id::(&proof); + let expired = filter.consider_finality_proof(round, set_id) != Consider::Accept; + trace!( + target: LOG_TARGET, + "🥩 Finality proof for round #{} expired: {}", + round, + expired + ); + expired + }, + Err(_) => true, + } }) } @@ -455,10 +461,10 @@ where let filter = self.gossip_filter.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast + return do_rebroadcast; } - match GossipMessage::::decode_all(&mut data) { + match GossipMessage::::decode_all(&mut data) { Ok(GossipMessage::Vote(msg)) => { let round = msg.commitment.block_number; let set_id = msg.commitment.validator_set_id; @@ -467,7 +473,7 @@ where allowed }, Ok(GossipMessage::FinalityProof(proof)) => { - let (round, set_id) = proof_block_num_and_set_id::(&proof); + let (round, set_id) = proof_block_num_and_set_id::(&proof); let allowed = filter.consider_finality_proof(round, set_id) == Consider::Accept; trace!( target: LOG_TARGET, @@ -490,8 +496,8 @@ pub(crate) mod tests { use sc_network_test::Block; use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus_beefy::{ - ecdsa_crypto::Signature, known_payloads, test_utils::Keyring, Commitment, MmrRootHash, - Payload, SignedCommitment, VoteMessage, + ecdsa_crypto, known_payloads, test_utils::Keyring, Commitment, MmrRootHash, Payload, + SignedCommitment, VoteMessage, }; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -607,16 +613,18 @@ pub(crate) mod tests { } pub fn sign_commitment( - who: &Keyring, + who: &Keyring, commitment: &Commitment, - ) -> Signature { + ) -> ecdsa_crypto::Signature { let store = MemoryKeystore::new(); store.ecdsa_generate_new(BEEFY_KEY_TYPE, Some(&who.to_seed())).unwrap(); - let beefy_keystore: BeefyKeystore = Some(store.into()).into(); + let beefy_keystore: BeefyKeystore = Some(store.into()).into(); beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() } - fn dummy_vote(block_number: u64) -> VoteMessage { + fn dummy_vote( + block_number: u64, + ) -> VoteMessage { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, MmrRootHash::default().encode(), @@ -629,8 +637,8 @@ pub(crate) mod tests { pub fn dummy_proof( block_number: u64, - validator_set: &ValidatorSet, - ) -> BeefyVersionedFinalityProof { + validator_set: &ValidatorSet, + ) -> BeefyVersionedFinalityProof { let payload = Payload::from_single_entry( known_payloads::MMR_ROOT_ID, MmrRootHash::default().encode(), @@ -639,25 +647,29 @@ pub(crate) mod tests { let signatures = validator_set .validators() .iter() - .map(|validator: &AuthorityId| { + .map(|validator: &ecdsa_crypto::AuthorityId| { Some(sign_commitment( - &Keyring::::from_public(validator).unwrap(), + &Keyring::::from_public(validator).unwrap(), &commitment, )) }) .collect(); - BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment, signatures }) + BeefyVersionedFinalityProof::::V1(SignedCommitment { + commitment, + signatures, + }) } #[test] fn should_validate_messages() { - let keys = vec![Keyring::::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); + let keys = vec![Keyring::::Alice.public()]; + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); let (network, mut report_stream) = TestNetwork::new(); - let gv = GossipValidator::::new( + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(network), ); @@ -678,7 +690,8 @@ pub(crate) mod tests { // verify votes validation let vote = dummy_vote(3); - let encoded = GossipMessage::::Vote(vote.clone()).encode(); + let encoded = + GossipMessage::::Vote(vote.clone()).encode(); // filter not initialized let res = gv.validate(&mut context, &sender, &encoded); @@ -696,7 +709,7 @@ pub(crate) mod tests { // reject vote, voter not in validator set let mut bad_vote = vote.clone(); bad_vote.id = Keyring::Bob.public(); - let bad_vote = GossipMessage::::Vote(bad_vote).encode(); + let bad_vote = GossipMessage::::Vote(bad_vote).encode(); let res = gv.validate(&mut context, &sender, &bad_vote); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::UNKNOWN_VOTER; @@ -726,7 +739,8 @@ pub(crate) mod tests { // reject old proof let proof = dummy_proof(5, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::OUTDATED_MESSAGE; @@ -734,7 +748,8 @@ pub(crate) mod tests { // accept next proof with good set_id let proof = dummy_proof(7, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); expected_report.cost_benefit = benefit::VALIDATED_PROOF; @@ -742,16 +757,18 @@ pub(crate) mod tests { // accept future proof with good set_id let proof = dummy_proof(20, &validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); expected_report.cost_benefit = benefit::VALIDATED_PROOF; assert_eq!(report_stream.try_next().unwrap().unwrap(), expected_report); // reject proof, future set_id - let bad_validator_set = ValidatorSet::::new(keys, 1).unwrap(); + let bad_validator_set = ValidatorSet::::new(keys, 1).unwrap(); let proof = dummy_proof(20, &bad_validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::FUTURE_MESSAGE; @@ -759,9 +776,10 @@ pub(crate) mod tests { // reject proof, bad signatures (Bob instead of Alice) let bad_validator_set = - ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); + ValidatorSet::::new(vec![Keyring::Bob.public()], 0).unwrap(); let proof = dummy_proof(21, &bad_validator_set); - let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); assert!(matches!(res, ValidationResult::Discard)); expected_report.cost_benefit = cost::INVALID_PROOF; @@ -772,8 +790,9 @@ pub(crate) mod tests { #[test] fn messages_allowed_and_expired() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); - let gv = GossipValidator::::new( + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(TestNetwork::new().0), ); @@ -793,58 +812,70 @@ pub(crate) mod tests { // inactive round 1 -> expired let vote = dummy_vote(1); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(expired(topic, &mut encoded_vote)); let proof = dummy_proof(1, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(expired(topic, &mut encoded_proof)); // active round 2 -> !expired - concluded but still gossiped let vote = dummy_vote(2); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(2, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // using wrong set_id -> !allowed, expired - let bad_validator_set = ValidatorSet::::new(keys.clone(), 1).unwrap(); + let bad_validator_set = + ValidatorSet::::new(keys.clone(), 1).unwrap(); let proof = dummy_proof(2, &bad_validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(expired(topic, &mut encoded_proof)); // in progress round 3 -> !expired let vote = dummy_vote(3); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(3, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // unseen round 4 -> !expired let vote = dummy_vote(4); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(!expired(topic, &mut encoded_vote)); let proof = dummy_proof(4, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // future round 11 -> expired let vote = dummy_vote(11); - let mut encoded_vote = GossipMessage::::Vote(vote).encode(); + let mut encoded_vote = + GossipMessage::::Vote(vote).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_vote)); assert!(expired(topic, &mut encoded_vote)); // future proofs allowed while same set_id -> allowed let proof = dummy_proof(11, &validator_set); - let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); + let mut encoded_proof = + GossipMessage::::FinalityProof(proof).encode(); assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); } @@ -852,8 +883,9 @@ pub(crate) mod tests { #[test] fn messages_rebroadcast() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), 0).unwrap(); - let gv = GossipValidator::::new( + let validator_set = + ValidatorSet::::new(keys.clone(), 0).unwrap(); + let gv = GossipValidator::::new( Arc::new(Mutex::new(KnownPeers::new())), Arc::new(TestNetwork::new().0), ); diff --git a/substrate/client/consensus/beefy/src/communication/notification.rs b/substrate/client/consensus/beefy/src/communication/notification.rs index a4486e523c301f6b64af3936a5bdba3456303ad2..8bb5d848b4faab9ee5a25032b32db0015324d4bc 100644 --- a/substrate/client/consensus/beefy/src/communication/notification.rs +++ b/substrate/client/consensus/beefy/src/communication/notification.rs @@ -32,13 +32,15 @@ pub type BeefyBestBlockStream = /// The sending half of the notifications channel(s) used to send notifications /// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofSender = - NotificationSender>; +pub type BeefyVersionedFinalityProofSender = + NotificationSender>; /// The receiving half of a notifications channel used to receive notifications /// about versioned finality proof generated at the end of a BEEFY round. -pub type BeefyVersionedFinalityProofStream = - NotificationStream, BeefyVersionedFinalityProofTracingKey>; +pub type BeefyVersionedFinalityProofStream = NotificationStream< + BeefyVersionedFinalityProof, + BeefyVersionedFinalityProofTracingKey, +>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] diff --git a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs index 7893066a01e06c967893bdedbd25ad3c2da9c3bd..350e7a271bc3921a91380baae0cf2e14e1d6debd 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -87,9 +87,9 @@ impl IncomingRequest { sent_feedback: None, }; if let Err(_) = pending_response.send(response) { - return Err(Error::DecodingErrorNoReputationChange(peer, err)) + return Err(Error::DecodingErrorNoReputationChange(peer, err)); } - return Err(Error::DecodingError(peer, err)) + return Err(Error::DecodingError(peer, err)); }, }; Ok(Self::new(peer, payload, pending_response)) diff --git a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs index 2ab072960900817155b42434f69f29236fcda54b..4d40656375ec8231f5fa0b92839c73d2f187a7b4 100644 --- a/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/substrate/client/consensus/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -27,7 +27,7 @@ use sc_network::{ NetworkRequest, ProtocolName, }; use sc_network_types::PeerId; -use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, ValidatorSet}; +use sp_consensus_beefy::{AuthorityIdBound, ValidatorSet}; use sp_runtime::traits::{Block, NumberFor}; use std::{collections::VecDeque, result::Result, sync::Arc}; @@ -49,38 +49,38 @@ type Response = Result<(Vec, ProtocolName), RequestFailure>; type ResponseReceiver = oneshot::Receiver; #[derive(Clone, Debug)] -struct RequestInfo { +struct RequestInfo { block: NumberFor, active_set: ValidatorSet, } -enum State { +enum State { Idle, - AwaitingResponse(PeerId, RequestInfo, ResponseReceiver), + AwaitingResponse(PeerId, RequestInfo, ResponseReceiver), } /// Possible engine responses. -pub(crate) enum ResponseInfo { +pub(crate) enum ResponseInfo { /// No peer response available yet. Pending, /// Valid justification provided alongside peer reputation changes. - ValidProof(BeefyVersionedFinalityProof, PeerReport), + ValidProof(BeefyVersionedFinalityProof, PeerReport), /// No justification yet, only peer reputation changes. PeerReport(PeerReport), } -pub struct OnDemandJustificationsEngine { +pub struct OnDemandJustificationsEngine { network: Arc, protocol_name: ProtocolName, live_peers: Arc>>, peers_cache: VecDeque, - state: State, + state: State, metrics: Option, } -impl OnDemandJustificationsEngine { +impl OnDemandJustificationsEngine { pub fn new( network: Arc, protocol_name: ProtocolName, @@ -106,13 +106,13 @@ impl OnDemandJustificationsEngine { let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { - return Some(peer) + return Some(peer); } } None } - fn request_from_peer(&mut self, peer: PeerId, req_info: RequestInfo) { + fn request_from_peer(&mut self, peer: PeerId, req_info: RequestInfo) { debug!( target: BEEFY_SYNC_LOG_TARGET, "🥩 requesting justif #{:?} from peer {:?}", req_info.block, peer, @@ -140,7 +140,7 @@ impl OnDemandJustificationsEngine { pub fn request(&mut self, block: NumberFor, active_set: ValidatorSet) { // ignore new requests while there's already one pending if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return + return; } self.reset_peers_cache_for_block(block); @@ -174,9 +174,9 @@ impl OnDemandJustificationsEngine { fn process_response( &mut self, peer: &PeerId, - req_info: &RequestInfo, + req_info: &RequestInfo, response: Result, - ) -> Result, Error> { + ) -> Result, Error> { response .map_err(|e| { debug!( @@ -207,7 +207,7 @@ impl OnDemandJustificationsEngine { } }) .and_then(|(encoded, _)| { - decode_and_verify_finality_proof::( + decode_and_verify_finality_proof::( &encoded[..], req_info.block, &req_info.active_set, @@ -227,11 +227,11 @@ impl OnDemandJustificationsEngine { }) } - pub(crate) async fn next(&mut self) -> ResponseInfo { + pub(crate) async fn next(&mut self) -> ResponseInfo { let (peer, req_info, resp) = match &mut self.state { State::Idle => { futures::future::pending::<()>().await; - return ResponseInfo::Pending + return ResponseInfo::Pending; }, State::AwaitingResponse(peer, req_info, receiver) => { let resp = receiver.await; diff --git a/substrate/client/consensus/beefy/src/error.rs b/substrate/client/consensus/beefy/src/error.rs index b4773f940193e07d4f6f6962b69096c91805bbeb..9cd09cb99332a28f3f1ec731be3c5af6504c91e0 100644 --- a/substrate/client/consensus/beefy/src/error.rs +++ b/substrate/client/consensus/beefy/src/error.rs @@ -20,6 +20,7 @@ //! //! Used for BEEFY gadget internal error handling only +use sp_blockchain::Error as ClientError; use std::fmt::Debug; #[derive(Debug, thiserror::Error)] @@ -48,6 +49,12 @@ pub enum Error { VotesGossipStreamTerminated, } +impl From for Error { + fn from(e: ClientError) -> Self { + Self::Backend(e.to_string()) + } +} + #[cfg(test)] impl PartialEq for Error { fn eq(&self, other: &Self) -> bool { diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs index a2b4c8f945d1c224c45e522bc7e7b0461a3c00ce..073fee0bdbdbecb3ee6d869a3e727d3ec62b9c9b 100644 --- a/substrate/client/consensus/beefy/src/fisherman.rs +++ b/substrate/client/consensus/beefy/src/fisherman.rs @@ -20,11 +20,11 @@ use crate::{error::Error, keystore::BeefyKeystore, round::Rounds, LOG_TARGET}; use log::{debug, error, warn}; use sc_client_api::Backend; use sp_api::ProvideRuntimeApi; +use sp_application_crypto::RuntimeAppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_beefy::{ - check_equivocation_proof, - ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId, + check_equivocation_proof, AuthorityIdBound, BeefyApi, BeefySignatureHasher, DoubleVotingProof, + OpaqueKeyOwnershipProof, ValidatorSetId, }; use sp_runtime::{ generic::BlockId, @@ -33,13 +33,13 @@ use sp_runtime::{ use std::{marker::PhantomData, sync::Arc}; /// Helper struct containing the id and the key ownership proof for a validator. -pub struct ProvedValidator<'a> { +pub struct ProvedValidator<'a, AuthorityId: AuthorityIdBound> { pub id: &'a AuthorityId, pub key_owner_proof: OpaqueKeyOwnershipProof, } /// Helper used to check and report equivocations. -pub struct Fisherman { +pub struct Fisherman { backend: Arc, runtime: Arc, key_store: Arc>, @@ -47,9 +47,11 @@ pub struct Fisherman { _phantom: PhantomData, } -impl, RuntimeApi: ProvideRuntimeApi> Fisherman +impl, RuntimeApi: ProvideRuntimeApi, AuthorityId> + Fisherman where RuntimeApi::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { pub fn new( backend: Arc, @@ -64,7 +66,7 @@ where at: BlockId, offender_ids: impl Iterator, validator_set_id: ValidatorSetId, - ) -> Result>, Error> { + ) -> Result>, Error> { let hash = match at { BlockId::Hash(hash) => hash, BlockId::Number(number) => self @@ -119,8 +121,12 @@ where /// isn't necessarily the best block if there are pending authority set changes. pub fn report_double_voting( &self, - proof: DoubleVotingProof, AuthorityId, Signature>, - active_rounds: &Rounds, + proof: DoubleVotingProof< + NumberFor, + AuthorityId, + ::Signature, + >, + active_rounds: &Rounds, ) -> Result<(), Error> { let (validators, validator_set_id) = (active_rounds.validators(), active_rounds.validator_set_id()); @@ -128,13 +134,13 @@ where if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "🥩 Skipping report for bad equivocation {:?}", proof); - return Ok(()) + return Ok(()); } if let Some(local_id) = self.key_store.authority_id(validators) { if offender_id == &local_id { warn!(target: LOG_TARGET, "🥩 Skipping report for own equivocation"); - return Ok(()) + return Ok(()); } } diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index ed8ed68c4e8d0d378728ba87d1ffe726f6c4c11a..c01fb3db4845eb9e413e0dbe781ae395a3b31217 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -22,7 +22,7 @@ use log::debug; use sp_api::ProvideRuntimeApi; use sp_consensus::Error as ConsensusError; -use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, BeefyApi, BEEFY_ENGINE_ID}; +use sp_consensus_beefy::{AuthorityIdBound, BeefyApi, BEEFY_ENGINE_ID}; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, EncodedJustification, @@ -45,15 +45,17 @@ use crate::{ /// Wraps a `inner: BlockImport` and ultimately defers to it. /// /// When using BEEFY, the block import worker should be using this block import object. -pub struct BeefyBlockImport { +pub struct BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefyVersionedFinalityProofSender, metrics: Option, } -impl Clone for BeefyBlockImport { +impl Clone + for BeefyBlockImport +{ fn clone(&self) -> Self { BeefyBlockImport { backend: self.backend.clone(), @@ -65,32 +67,35 @@ impl Clone for BeefyBlockImport BeefyBlockImport { +impl + BeefyBlockImport +{ /// Create a new BeefyBlockImport. pub fn new( backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefyVersionedFinalityProofSender, + justification_sender: BeefyVersionedFinalityProofSender, metrics: Option, - ) -> BeefyBlockImport { + ) -> BeefyBlockImport { BeefyBlockImport { backend, runtime, inner, justification_sender, metrics } } } -impl BeefyBlockImport +impl BeefyBlockImport where Block: BlockT, BE: Backend, Runtime: ProvideRuntimeApi, Runtime::Api: BeefyApi + Send, + AuthorityId: AuthorityIdBound, { fn decode_and_verify( &self, encoded: &EncodedJustification, number: NumberFor, hash: ::Hash, - ) -> Result, ConsensusError> { + ) -> Result, ConsensusError> { use ConsensusError::ClientImport as ImportError; let beefy_genesis = self .runtime @@ -99,7 +104,7 @@ where .map_err(|e| ImportError(e.to_string()))? .ok_or_else(|| ImportError("Unknown BEEFY genesis".to_string()))?; if number < beefy_genesis { - return Err(ImportError("BEEFY genesis is set for future block".to_string())) + return Err(ImportError("BEEFY genesis is set for future block".to_string())); } let validator_set = self .runtime @@ -108,19 +113,21 @@ where .map_err(|e| ImportError(e.to_string()))? .ok_or_else(|| ImportError("Unknown validator set".to_string()))?; - decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) + decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) .map_err(|(err, _)| err) } } #[async_trait::async_trait] -impl BlockImport for BeefyBlockImport +impl BlockImport + for BeefyBlockImport where Block: BlockT, BE: Backend, I: BlockImport + Send + Sync, Runtime: ProvideRuntimeApi + Send + Sync, Runtime::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { type Error = ConsensusError; @@ -148,7 +155,7 @@ where // The block is imported as part of some chain sync. // The voter doesn't need to process it now. // It will be detected and processed as part of the voter state init. - return Ok(inner_import_result) + return Ok(inner_import_result); }, } diff --git a/substrate/client/consensus/beefy/src/justification.rs b/substrate/client/consensus/beefy/src/justification.rs index 886368c9d7cb096c0178819e1bad23f06c82f3fb..9ff7c3cf54f687c8f937ff0cecd9aea821c2ddaa 100644 --- a/substrate/client/consensus/beefy/src/justification.rs +++ b/substrate/client/consensus/beefy/src/justification.rs @@ -17,18 +17,20 @@ // along with this program. If not, see . use codec::DecodeAll; +use sp_application_crypto::RuntimeAppPublic; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, VersionedFinalityProof, + AuthorityIdBound, BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, + VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A finality proof with matching BEEFY authorities' signatures. -pub type BeefyVersionedFinalityProof = VersionedFinalityProof, Signature>; +pub type BeefyVersionedFinalityProof = + VersionedFinalityProof, ::Signature>; -pub(crate) fn proof_block_num_and_set_id( - proof: &BeefyVersionedFinalityProof, +pub(crate) fn proof_block_num_and_set_id( + proof: &BeefyVersionedFinalityProof, ) -> (NumberFor, ValidatorSetId) { match proof { VersionedFinalityProof::V1(sc) => @@ -37,23 +39,26 @@ pub(crate) fn proof_block_num_and_set_id( } /// Decode and verify a Beefy FinalityProof. -pub(crate) fn decode_and_verify_finality_proof( +pub(crate) fn decode_and_verify_finality_proof( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, (ConsensusError, u32)> { - let proof = >::decode_all(&mut &*encoded) +) -> Result, (ConsensusError, u32)> { + let proof = >::decode_all(&mut &*encoded) .map_err(|_| (ConsensusError::InvalidJustification, 0))?; - verify_with_validator_set::(target_number, validator_set, &proof)?; + verify_with_validator_set::(target_number, validator_set, &proof)?; Ok(proof) } /// Verify the Beefy finality proof against the validator set at the block it was generated. -pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( +pub(crate) fn verify_with_validator_set<'a, Block: BlockT, AuthorityId: AuthorityIdBound>( target_number: NumberFor, validator_set: &'a ValidatorSet, - proof: &'a BeefyVersionedFinalityProof, -) -> Result>, (ConsensusError, u32)> { + proof: &'a BeefyVersionedFinalityProof, +) -> Result< + Vec::Signature>>, + (ConsensusError, u32), +> { match proof { VersionedFinalityProof::V1(signed_commitment) => { let signatories = signed_commitment @@ -78,7 +83,7 @@ pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( pub(crate) mod tests { use codec::Encode; use sp_consensus_beefy::{ - known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, + ecdsa_crypto, known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, VersionedFinalityProof, }; use substrate_test_runtime_client::runtime::Block; @@ -88,9 +93,9 @@ pub(crate) mod tests { pub(crate) fn new_finality_proof( block_num: NumberFor, - validator_set: &ValidatorSet, - keys: &[Keyring], - ) -> BeefyVersionedFinalityProof { + validator_set: &ValidatorSet, + keys: &[Keyring], + ) -> BeefyVersionedFinalityProof { let commitment = Commitment { payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, @@ -112,11 +117,20 @@ pub(crate) mod tests { let good_proof = proof.clone().into(); // should verify successfully - verify_with_validator_set::(block_num, &validator_set, &good_proof).unwrap(); + verify_with_validator_set::( + block_num, + &validator_set, + &good_proof, + ) + .unwrap(); // wrong block number -> should fail verification let good_proof = proof.clone().into(); - match verify_with_validator_set::(block_num + 1, &validator_set, &good_proof) { + match verify_with_validator_set::( + block_num + 1, + &validator_set, + &good_proof, + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -124,7 +138,11 @@ pub(crate) mod tests { // wrong validator set id -> should fail verification let good_proof = proof.clone().into(); let other = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - match verify_with_validator_set::(block_num, &other, &good_proof) { + match verify_with_validator_set::( + block_num, + &other, + &good_proof, + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -136,7 +154,11 @@ pub(crate) mod tests { VersionedFinalityProof::V1(ref mut sc) => sc, }; bad_signed_commitment.signatures.pop().flatten().unwrap(); - match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { + match verify_with_validator_set::( + block_num + 1, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 0)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -148,7 +170,11 @@ pub(crate) mod tests { }; // remove a signature (but same length) *bad_signed_commitment.signatures.first_mut().unwrap() = None; - match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + match verify_with_validator_set::( + block_num, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 2)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -159,9 +185,15 @@ pub(crate) mod tests { VersionedFinalityProof::V1(ref mut sc) => sc, }; // change a signature to a different key - *bad_signed_commitment.signatures.first_mut().unwrap() = - Some(Keyring::::Dave.sign(&bad_signed_commitment.commitment.encode())); - match verify_with_validator_set::(block_num, &validator_set, &bad_proof.into()) { + *bad_signed_commitment.signatures.first_mut().unwrap() = Some( + Keyring::::Dave + .sign(&bad_signed_commitment.commitment.encode()), + ); + match verify_with_validator_set::( + block_num, + &validator_set, + &bad_proof.into(), + ) { Err((ConsensusError::InvalidJustification, 3)) => (), e => assert!(false, "Got unexpected {:?}", e), }; @@ -175,12 +207,17 @@ pub(crate) mod tests { // build valid justification let proof = new_finality_proof(block_num, &validator_set, keys); - let versioned_proof: BeefyVersionedFinalityProof = proof.into(); + let versioned_proof: BeefyVersionedFinalityProof = + proof.into(); let encoded = versioned_proof.encode(); // should successfully decode and verify - let verified = - decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); + let verified = decode_and_verify_finality_proof::( + &encoded, + block_num, + &validator_set, + ) + .unwrap(); assert_eq!(verified, versioned_proof); } } diff --git a/substrate/client/consensus/beefy/src/keystore.rs b/substrate/client/consensus/beefy/src/keystore.rs index 9582c2661c30b431a5934e2bcc592b4a56faf3b3..8daf3440c7d2c745cc0ef85bc3d7436a6d91e1f4 100644 --- a/substrate/client/consensus/beefy/src/keystore.rs +++ b/substrate/client/consensus/beefy/src/keystore.rs @@ -15,19 +15,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use codec::Decode; +use log::warn; use sp_application_crypto::{key_types::BEEFY as BEEFY_KEY_TYPE, AppCrypto, RuntimeAppPublic}; -use sp_consensus_beefy::{AuthorityIdBound, BeefyAuthorityId, BeefySignatureHasher}; -use sp_core::ecdsa; #[cfg(feature = "bls-experimental")] use sp_core::ecdsa_bls377; -use sp_crypto_hashing::keccak_256; -use sp_keystore::KeystorePtr; +use sp_core::{ecdsa, keccak_256}; -use codec::Decode; -use log::warn; +use sp_keystore::KeystorePtr; use std::marker::PhantomData; +use sp_consensus_beefy::{AuthorityIdBound, BeefyAuthorityId, BeefySignatureHasher}; + use crate::{error, LOG_TARGET}; /// A BEEFY specific keystore implemented as a `Newtype`. This is basically a @@ -175,10 +175,7 @@ impl BeefyKeystore { } } -impl From> for BeefyKeystore -where - ::Signature: Send + Sync, -{ +impl From> for BeefyKeystore { fn from(store: Option) -> BeefyKeystore { BeefyKeystore(store, PhantomData) } diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 0e49839f0fd2dae76e26738823ea9e5af775acb7..4cb014b00d5bedf36c7ce74b5649f28bd0d55f60 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -43,8 +43,7 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, - BEEFY_ENGINE_ID, + AuthorityIdBound, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; @@ -118,50 +117,55 @@ where /// Links between the block importer, the background voter and the RPC layer, /// to be used by the voter. #[derive(Clone)] -pub struct BeefyVoterLinks { +pub struct BeefyVoterLinks { // BlockImport -> Voter links /// Stream of BEEFY signed commitments from block import to voter. - pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, + pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, // Voter -> RPC links /// Sends BEEFY signed commitments from voter to RPC. - pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, + pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, /// Sends BEEFY best block hashes from voter to RPC. pub to_rpc_best_block_sender: BeefyBestBlockSender, } /// Links used by the BEEFY RPC layer, from the BEEFY background voter. #[derive(Clone)] -pub struct BeefyRPCLinks { +pub struct BeefyRPCLinks { /// Stream of signed commitments coming from the voter. - pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, + pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, /// Stream of BEEFY best block hashes coming from the voter. pub from_voter_best_beefy_stream: BeefyBestBlockStream, } /// Make block importer and link half necessary to tie the background voter to it. -pub fn beefy_block_import_and_links( +pub fn beefy_block_import_and_links( wrapped_block_import: I, backend: Arc, runtime: Arc, prometheus_registry: Option, -) -> (BeefyBlockImport, BeefyVoterLinks, BeefyRPCLinks) +) -> ( + BeefyBlockImport, + BeefyVoterLinks, + BeefyRPCLinks, +) where B: Block, BE: Backend, I: BlockImport + Send + Sync, RuntimeApi: ProvideRuntimeApi + Send + Sync, RuntimeApi::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let metrics = register_metrics(prometheus_registry); // BlockImport @@ -201,7 +205,7 @@ pub struct BeefyNetworkParams { } /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend @@ -219,7 +223,7 @@ pub struct BeefyParams { /// Prometheus metric registry pub prometheus_registry: Option, /// Links between the block importer, the background voter and the RPC layer. - pub links: BeefyVoterLinks, + pub links: BeefyVoterLinks, /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, /// Whether running under "Authority" role. @@ -228,10 +232,10 @@ pub struct BeefyParams { /// Helper object holding BEEFY worker communication/gossip components. /// /// These are created once, but will be reused if worker is restarted/reinitialized. -pub(crate) struct BeefyComms { +pub(crate) struct BeefyComms { pub gossip_engine: GossipEngine, - pub gossip_validator: Arc>, - pub on_demand_justifications: OnDemandJustificationsEngine, + pub gossip_validator: Arc>, + pub on_demand_justifications: OnDemandJustificationsEngine, } /// Helper builder object for building [worker::BeefyWorker]. @@ -240,22 +244,23 @@ pub(crate) struct BeefyComms { /// for certain chain and backend conditions, and while sleeping we still need to pump the /// GossipEngine. Once initialization is done, the GossipEngine (and other pieces) are added to get /// the complete [worker::BeefyWorker] object. -pub(crate) struct BeefyWorkerBuilder { +pub(crate) struct BeefyWorkerBuilder { // utilities backend: Arc, runtime: Arc, key_store: BeefyKeystore, // voter metrics metrics: Option, - persisted_state: PersistedState, + persisted_state: PersistedState, } -impl BeefyWorkerBuilder +impl BeefyWorkerBuilder where B: Block + codec::Codec, BE: Backend, R: ProvideRuntimeApi, R::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { /// This will wait for the chain to enable BEEFY (if not yet enabled) and also wait for the /// backend to sync all headers required by the voter to build a contiguous chain of mandatory @@ -269,7 +274,7 @@ where key_store: BeefyKeystore, metrics: Option, min_block_delta: u32, - gossip_validator: Arc>, + gossip_validator: Arc>, finality_notifications: &mut Fuse>, is_authority: bool, ) -> Result { @@ -301,11 +306,11 @@ where self, payload_provider: P, sync: Arc, - comms: BeefyComms, - links: BeefyVoterLinks, - pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + comms: BeefyComms, + links: BeefyVoterLinks, + pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, is_authority: bool, - ) -> BeefyWorker { + ) -> BeefyWorker { let key_store = Arc::new(self.key_store); BeefyWorker { backend: self.backend.clone(), @@ -334,7 +339,7 @@ where min_block_delta: u32, backend: Arc, runtime: Arc, - ) -> Result, Error> { + ) -> Result, Error> { let blockchain = backend.blockchain(); let beefy_genesis = runtime @@ -378,7 +383,7 @@ where beefy_genesis, ) .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; - break state + break state; } if *header.number() == beefy_genesis { @@ -401,10 +406,10 @@ where min_block_delta, beefy_genesis, ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; } - if let Some(active) = find_authorities_change::(&header) { + if let Some(active) = find_authorities_change::(&header) { debug!( target: LOG_TARGET, "🥩 Marking block {:?} as BEEFY Mandatory.", @@ -431,7 +436,7 @@ where key_store: &BeefyKeystore, metrics: &Option, is_authority: bool, - ) -> Result, Error> { + ) -> Result, Error> { // Initialize voter state from AUX DB if compatible. if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? // Verify state pallet genesis matches runtime. @@ -448,7 +453,7 @@ where let mut header = best_grandpa.clone(); while *header.number() > state.best_beefy() { if state.voting_oracle().can_add_session(*header.number()) { - if let Some(active) = find_authorities_change::(&header) { + if let Some(active) = find_authorities_change::(&header) { new_sessions.push((active, *header.number())); } } @@ -471,7 +476,7 @@ where is_authority, ); } - return Ok(state) + return Ok(state); } // No valid voter-state persisted, re-initialize from pallet genesis. @@ -482,8 +487,8 @@ where /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget( - beefy_params: BeefyParams, +pub async fn start_beefy_gadget( + beefy_params: BeefyParams, ) where B: Block, BE: Backend, @@ -493,6 +498,7 @@ pub async fn start_beefy_gadget( R::Api: BeefyApi, N: GossipNetwork + NetworkRequest + Send + Sync + 'static, S: GossipSyncing + SyncOracle + 'static, + AuthorityId: AuthorityIdBound, { let BeefyParams { client, @@ -598,15 +604,17 @@ pub async fn start_beefy_gadget( futures::future::Either::Left(((error::Error::ConsensusReset, reuse_comms), _)) => { error!(target: LOG_TARGET, "🥩 Error: {:?}. Restarting voter.", error::Error::ConsensusReset); beefy_comms = reuse_comms; - continue + continue; }, // On other errors, bring down / finish the task. - futures::future::Either::Left(((worker_err, _), _)) => - error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", worker_err), - futures::future::Either::Right((odj_handler_err, _)) => - error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", odj_handler_err), + futures::future::Either::Left(((worker_err, _), _)) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", worker_err) + }, + futures::future::Either::Right((odj_handler_err, _)) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", odj_handler_err) + }, }; - return + return; } } @@ -651,7 +659,7 @@ where /// Wait for BEEFY runtime pallet to be available, return active validator set. /// Should be called only once during worker initialization. -async fn wait_for_runtime_pallet( +async fn wait_for_runtime_pallet( runtime: &R, finality: &mut Fuse>, ) -> Result<(NumberFor, ::Header), Error> @@ -676,7 +684,7 @@ where "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", notif.header.number(), start ); - return Ok((start, notif.header)) + return Ok((start, notif.header)); } } } @@ -687,7 +695,7 @@ where /// /// Note: function will `async::sleep()` when walking back the chain if some needed header hasn't /// been synced yet (as it happens when warp syncing when headers are synced in the background). -async fn expect_validator_set( +async fn expect_validator_set( runtime: &R, backend: &BE, at_header: &B::Header, @@ -711,9 +719,9 @@ where loop { debug!(target: LOG_TARGET, "🥩 Looking for auth set change at block number: {:?}", *header.number()); if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { - return Ok(active) + return Ok(active); } else { - match find_authorities_change::(&header) { + match find_authorities_change::(&header) { Some(active) => return Ok(active), // Move up the chain. Ultimately we'll get it from chain genesis state, or error out // there. @@ -728,9 +736,12 @@ where /// Scan the `header` digest log for a BEEFY validator set change. Return either the new /// validator set or `None` in case no validator set change has been signaled. -pub(crate) fn find_authorities_change(header: &B::Header) -> Option> +pub(crate) fn find_authorities_change( + header: &B::Header, +) -> Option> where B: Block, + AuthorityId: AuthorityIdBound, { let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); diff --git a/substrate/client/consensus/beefy/src/round.rs b/substrate/client/consensus/beefy/src/round.rs index 5dae80cb1830ddc2d6402625865af6e965046897..31cfe4c10c2e7d3e7112c353aff1c0572a7bd52b 100644 --- a/substrate/client/consensus/beefy/src/round.rs +++ b/substrate/client/consensus/beefy/src/round.rs @@ -20,9 +20,10 @@ use crate::LOG_TARGET; use codec::{Decode, Encode}; use log::{debug, info}; +use sp_application_crypto::RuntimeAppPublic; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, + AuthorityIdBound, Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, + ValidatorSetId, VoteMessage, }; use sp_runtime::traits::{Block, NumberFor}; use std::collections::BTreeMap; @@ -31,15 +32,24 @@ use std::collections::BTreeMap; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Debug, Decode, Default, Encode, PartialEq)] -pub(crate) struct RoundTracker { - votes: BTreeMap, +#[derive(Debug, Decode, Encode, PartialEq)] +pub(crate) struct RoundTracker { + votes: BTreeMap::Signature>, +} + +impl Default for RoundTracker { + fn default() -> Self { + Self { votes: Default::default() } + } } -impl RoundTracker { - fn add_vote(&mut self, vote: (AuthorityId, Signature)) -> bool { +impl RoundTracker { + fn add_vote( + &mut self, + vote: (AuthorityId, ::Signature), + ) -> bool { if self.votes.contains_key(&vote.0) { - return false + return false; } self.votes.insert(vote.0, vote.1); @@ -58,10 +68,12 @@ pub fn threshold(authorities: usize) -> usize { } #[derive(Debug, PartialEq)] -pub enum VoteImportResult { +pub enum VoteImportResult { Ok, - RoundConcluded(SignedCommitment, Signature>), - DoubleVoting(DoubleVotingProof, AuthorityId, Signature>), + RoundConcluded(SignedCommitment, ::Signature>), + DoubleVoting( + DoubleVotingProof, AuthorityId, ::Signature>, + ), Invalid, Stale, } @@ -71,19 +83,22 @@ pub enum VoteImportResult { /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct Rounds { - rounds: BTreeMap>, RoundTracker>, - previous_votes: - BTreeMap<(AuthorityId, NumberFor), VoteMessage, AuthorityId, Signature>>, +pub(crate) struct Rounds { + rounds: BTreeMap>, RoundTracker>, + previous_votes: BTreeMap< + (AuthorityId, NumberFor), + VoteMessage, AuthorityId, ::Signature>, + >, session_start: NumberFor, validator_set: ValidatorSet, mandatory_done: bool, best_done: Option>, } -impl Rounds +impl Rounds where B: Block, + AuthorityId: AuthorityIdBound, { pub(crate) fn new( session_start: NumberFor, @@ -121,14 +136,14 @@ where pub(crate) fn add_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> VoteImportResult { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> VoteImportResult { let num = vote.commitment.block_number; let vote_key = (vote.id.clone(), num); if num < self.session_start || Some(num) <= self.best_done { debug!(target: LOG_TARGET, "🥩 received vote for old stale round {:?}, ignoring", num); - return VoteImportResult::Stale + return VoteImportResult::Stale; } else if vote.commitment.validator_set_id != self.validator_set_id() { debug!( target: LOG_TARGET, @@ -136,14 +151,14 @@ where self.validator_set_id(), vote, ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } else if !self.validators().iter().any(|id| &vote.id == id) { debug!( target: LOG_TARGET, "🥩 received vote {:?} from validator that is not in the validator set, ignoring", vote ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } if let Some(previous_vote) = self.previous_votes.get(&vote_key) { @@ -156,7 +171,7 @@ where return VoteImportResult::DoubleVoting(DoubleVotingProof { first: previous_vote.clone(), second: vote, - }) + }); } } else { // this is the first vote sent by `id` for `num`, all good @@ -169,7 +184,7 @@ where round.is_done(threshold(self.validator_set.len())) { if let Some(round) = self.rounds.remove_entry(&vote.commitment) { - return VoteImportResult::RoundConcluded(self.signed_commitment(round)) + return VoteImportResult::RoundConcluded(self.signed_commitment(round)); } } VoteImportResult::Ok @@ -177,8 +192,8 @@ where fn signed_commitment( &mut self, - round: (Commitment>, RoundTracker), - ) -> SignedCommitment, Signature> { + round: (Commitment>, RoundTracker), + ) -> SignedCommitment, ::Signature> { let votes = round.1.votes; let signatures = self .validators() @@ -207,14 +222,14 @@ mod tests { use sc_network_test::Block; use sp_consensus_beefy::{ - known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, DoubleVotingProof, Payload, - SignedCommitment, ValidatorSet, VoteMessage, + ecdsa_crypto, known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, + DoubleVotingProof, Payload, SignedCommitment, ValidatorSet, VoteMessage, }; - use super::{threshold, AuthorityId, Block as BlockT, RoundTracker, Rounds}; + use super::{threshold, Block as BlockT, RoundTracker, Rounds}; use crate::round::VoteImportResult; - impl Rounds + impl Rounds where B: BlockT, { @@ -225,8 +240,11 @@ mod tests { #[test] fn round_tracker() { - let mut rt = RoundTracker::default(); - let bob_vote = (Keyring::Bob.public(), Keyring::::Bob.sign(b"I am committed")); + let mut rt = RoundTracker::::default(); + let bob_vote = ( + Keyring::::Bob.public(), + Keyring::::Bob.sign(b"I am committed"), + ); let threshold = 2; // adding new vote allowed @@ -237,8 +255,10 @@ mod tests { // vote is not done assert!(!rt.is_done(threshold)); - let alice_vote = - (Keyring::Alice.public(), Keyring::::Alice.sign(b"I am committed")); + let alice_vote = ( + Keyring::::Alice.public(), + Keyring::::Alice.sign(b"I am committed"), + ); // adding new vote (self vote this time) allowed assert!(rt.add_vote(alice_vote)); @@ -260,22 +280,22 @@ mod tests { fn new_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) .unwrap(); let session_start = 1u64.into(); - let rounds = Rounds::::new(session_start, validators); + let rounds = Rounds::::new(session_start, validators); assert_eq!(42, rounds.validator_set_id()); assert_eq!(1, rounds.session_start()); assert_eq!( &vec![ - Keyring::::Alice.public(), - Keyring::::Bob.public(), - Keyring::::Charlie.public() + Keyring::::Alice.public(), + Keyring::::Bob.public(), + Keyring::::Charlie.public() ], rounds.validators() ); @@ -285,7 +305,7 @@ mod tests { fn add_and_conclude_votes() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![ Keyring::Alice.public(), Keyring::Bob.public(), @@ -298,7 +318,7 @@ mod tests { let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![]); let block_number = 1; @@ -306,7 +326,7 @@ mod tests { let mut vote = VoteMessage { id: Keyring::Alice.public(), commitment: commitment.clone(), - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; // add 1st good vote assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); @@ -315,26 +335,26 @@ mod tests { assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); vote.id = Keyring::Dave.public(); - vote.signature = Keyring::::Dave.sign(b"I am committed"); + vote.signature = Keyring::::Dave.sign(b"I am committed"); // invalid vote (Dave is not a validator) assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Invalid); vote.id = Keyring::Bob.public(); - vote.signature = Keyring::::Bob.sign(b"I am committed"); + vote.signature = Keyring::::Bob.sign(b"I am committed"); // add 2nd good vote assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Ok); vote.id = Keyring::Charlie.public(); - vote.signature = Keyring::::Charlie.sign(b"I am committed"); + vote.signature = Keyring::::Charlie.sign(b"I am committed"); // add 3rd good vote -> round concluded -> signatures present assert_eq!( rounds.add_vote(vote.clone()), VoteImportResult::RoundConcluded(SignedCommitment { commitment, signatures: vec![ - Some(Keyring::::Alice.sign(b"I am committed")), - Some(Keyring::::Bob.sign(b"I am committed")), - Some(Keyring::::Charlie.sign(b"I am committed")), + Some(Keyring::::Alice.sign(b"I am committed")), + Some(Keyring::::Bob.sign(b"I am committed")), + Some(Keyring::::Charlie.sign(b"I am committed")), None, ] }) @@ -342,7 +362,7 @@ mod tests { rounds.conclude(block_number); vote.id = Keyring::Eve.public(); - vote.signature = Keyring::::Eve.sign(b"I am committed"); + vote.signature = Keyring::::Eve.sign(b"I am committed"); // Eve is a validator, but round was concluded, adding vote disallowed assert_eq!(rounds.add_vote(vote), VoteImportResult::Stale); } @@ -351,7 +371,7 @@ mod tests { fn old_rounds_not_accepted() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], 42, ) @@ -360,7 +380,7 @@ mod tests { // active rounds starts at block 10 let session_start = 10u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); // vote on round 9 let block_number = 9; @@ -369,7 +389,7 @@ mod tests { let mut vote = VoteMessage { id: Keyring::Alice.public(), commitment, - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; // add vote for previous session, should fail assert_eq!(rounds.add_vote(vote.clone()), VoteImportResult::Stale); @@ -397,7 +417,7 @@ mod tests { fn multiple_rounds() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], Default::default(), ) @@ -405,29 +425,29 @@ mod tests { let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![]); let commitment = Commitment { block_number: 1, payload, validator_set_id }; let mut alice_vote = VoteMessage { id: Keyring::Alice.public(), commitment: commitment.clone(), - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; let mut bob_vote = VoteMessage { id: Keyring::Bob.public(), commitment: commitment.clone(), - signature: Keyring::::Bob.sign(b"I am committed"), + signature: Keyring::::Bob.sign(b"I am committed"), }; let mut charlie_vote = VoteMessage { id: Keyring::Charlie.public(), commitment, - signature: Keyring::::Charlie.sign(b"I am committed"), + signature: Keyring::::Charlie.sign(b"I am committed"), }; let expected_signatures = vec![ - Some(Keyring::::Alice.sign(b"I am committed")), - Some(Keyring::::Bob.sign(b"I am committed")), - Some(Keyring::::Charlie.sign(b"I am committed")), + Some(Keyring::::Alice.sign(b"I am committed")), + Some(Keyring::::Bob.sign(b"I am committed")), + Some(Keyring::::Charlie.sign(b"I am committed")), ]; // round 1 - only 2 out of 3 vote @@ -472,14 +492,14 @@ mod tests { fn should_provide_equivocation_proof() { sp_tracing::try_init_simple(); - let validators = ValidatorSet::::new( + let validators = ValidatorSet::::new( vec![Keyring::Alice.public(), Keyring::Bob.public()], Default::default(), ) .unwrap(); let validator_set_id = validators.id(); let session_start = 1u64.into(); - let mut rounds = Rounds::::new(session_start, validators); + let mut rounds = Rounds::::new(session_start, validators); let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![1, 1, 1, 1]); let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![2, 2, 2, 2]); @@ -489,7 +509,7 @@ mod tests { let alice_vote1 = VoteMessage { id: Keyring::Alice.public(), commitment: commitment1, - signature: Keyring::::Alice.sign(b"I am committed"), + signature: Keyring::::Alice.sign(b"I am committed"), }; let mut alice_vote2 = alice_vote1.clone(); alice_vote2.commitment = commitment2; diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 2bb145d660df061561efd2196b2193b5ef70b1df..681e11a0c5310f7bfa67aab824e861ef447f3941 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -55,6 +55,7 @@ use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_application_crypto::key_types::BEEFY as BEEFY_KEY_TYPE; use sp_consensus::BlockOrigin; use sp_consensus_beefy::{ + ecdsa_crypto, ecdsa_crypto::{AuthorityId, Signature}, known_payloads, mmr::{find_mmr_root_digest, MmrRootProvider}, @@ -89,6 +90,7 @@ type BeefyBlockImport = crate::BeefyBlockImport< substrate_test_runtime_client::Backend, TestApi, BlockImportAdapter, + AuthorityId, >; pub(crate) type BeefyValidatorSet = ValidatorSet; @@ -107,8 +109,8 @@ impl BuildStorage for Genesis { #[derive(Default)] pub(crate) struct PeerData { - pub(crate) beefy_rpc_links: Mutex>>, - pub(crate) beefy_voter_links: Mutex>>, + pub(crate) beefy_rpc_links: Mutex>>, + pub(crate) beefy_voter_links: Mutex>>, pub(crate) beefy_justif_req_handler: Mutex>>, } @@ -371,7 +373,7 @@ async fn voter_init_setup( net: &mut BeefyTestNet, finality: &mut futures::stream::Fuse>, api: &TestApi, -) -> Result, Error> { +) -> Result, Error> { let backend = net.peer(0).client().as_backend(); let (beefy_genesis, best_grandpa) = wait_for_runtime_pallet(api, finality).await.unwrap(); let key_store = None.into(); @@ -446,7 +448,7 @@ where on_demand_justifications_handler: on_demand_justif_handler, is_authority: true, }; - let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} assert_send(&task); @@ -472,8 +474,10 @@ pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, // peer index and key peers: impl Iterator)>, -) -> (Vec>, Vec>>) -{ +) -> ( + Vec>, + Vec>>, +) { let mut best_block_streams = Vec::new(); let mut versioned_finality_proof_streams = Vec::new(); peers.for_each(|(index, _)| { @@ -511,7 +515,7 @@ async fn wait_for_best_beefy_blocks( } async fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, expected_commitment_block_nums: &[u64], ) { @@ -1417,7 +1421,7 @@ async fn beefy_reports_equivocations() { for wait_ms in [250, 500, 1250, 3000] { run_for(Duration::from_millis(wait_ms), &net).await; if !api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty() { - break + break; } } @@ -1457,7 +1461,7 @@ async fn gossipped_finality_proofs() { // Charlie will run just the gossip engine and not the full voter. let gossip_validator = GossipValidator::new(known_peers, Arc::new(TestNetwork::new().0)); let charlie_gossip_validator = Arc::new(gossip_validator); - charlie_gossip_validator.update_filter(GossipFilterCfg:: { + charlie_gossip_validator.update_filter(GossipFilterCfg:: { start: 1, end: 10, validator_set: &validator_set, @@ -1501,7 +1505,7 @@ async fn gossipped_finality_proofs() { let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); // Charlie gossips finality proof for #1 -> Alice and Bob also finalize. let proof = crate::communication::gossip::tests::dummy_proof(1, &validator_set); - let gossip_proof = GossipMessage::::FinalityProof(proof); + let gossip_proof = GossipMessage::::FinalityProof(proof); let encoded_proof = gossip_proof.encode(); charlie_gossip_engine.gossip_message(proofs_topic::(), encoded_proof, true); // Expect #1 is finalized. @@ -1526,7 +1530,8 @@ async fn gossipped_finality_proofs() { let commitment = Commitment { payload, block_number, validator_set_id: validator_set.id() }; let signature = sign_commitment(&BeefyKeyring::Charlie, &commitment); let vote_message = VoteMessage { commitment, id: BeefyKeyring::Charlie.public(), signature }; - let encoded_vote = GossipMessage::::Vote(vote_message).encode(); + let encoded_vote = + GossipMessage::::Vote(vote_message).encode(); charlie_gossip_engine.gossip_message(votes_topic::(), encoded_vote, true); // Expect #2 is finalized. @@ -1538,12 +1543,15 @@ async fn gossipped_finality_proofs() { charlie_gossip_engine .messages_for(proofs_topic::()) .filter_map(|notification| async move { - GossipMessage::::decode(&mut ¬ification.message[..]).ok().and_then( - |message| match message { - GossipMessage::::Vote(_) => unreachable!(), - GossipMessage::::FinalityProof(proof) => Some(proof), - }, + GossipMessage::::decode( + &mut ¬ification.message[..], ) + .ok() + .and_then(|message| match message { + GossipMessage::::Vote(_) => unreachable!(), + GossipMessage::::FinalityProof(proof) => + Some(proof), + }) }) .fuse(), ); @@ -1561,7 +1569,7 @@ async fn gossipped_finality_proofs() { // verify finality proof has been gossipped proof = charlie_gossip_proofs.next() => { let proof = proof.unwrap(); - let (round, _) = proof_block_num_and_set_id::(&proof); + let (round, _) = proof_block_num_and_set_id::(&proof); match round { 1 => continue, // finality proof generated by Charlie in the previous round 2 => break, // finality proof generated by Alice or Bob and gossiped to Charlie diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index cfbb3d63aea446d5b6a9a20a0eb9b2cf8c391e79..3ce4da7ecd56adea78b92a905546c4fa619403d7 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -31,6 +31,8 @@ use crate::{ round::{Rounds, VoteImportResult}, BeefyComms, BeefyVoterLinks, LOG_TARGET, }; +use sp_application_crypto::RuntimeAppPublic; + use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, trace, warn}; @@ -40,9 +42,8 @@ use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ - ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, VersionedFinalityProof, - VoteMessage, BEEFY_ENGINE_ID, + AuthorityIdBound, BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ generic::BlockId, @@ -52,6 +53,7 @@ use sp_runtime::{ use std::{ collections::{BTreeMap, VecDeque}, fmt::Debug, + marker::PhantomData, sync::Arc, }; @@ -72,7 +74,7 @@ pub(crate) enum RoundAction { /// Note: this is part of `PersistedState` so any changes here should also bump /// aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct VoterOracle { +pub(crate) struct VoterOracle { /// Queue of known sessions. Keeps track of voting rounds (block numbers) within each session. /// /// There are three voter states corresponding to three queue states: @@ -82,19 +84,23 @@ pub(crate) struct VoterOracle { /// 3. lagging behind GRANDPA: queue has [1, N] elements, where all `mandatory_done == false`. /// In this state, every time a session gets its mandatory block BEEFY finalized, it's /// popped off the queue, eventually getting to state `2. up-to-date`. - sessions: VecDeque>, + sessions: VecDeque>, /// Min delta in block numbers between two blocks, BEEFY should vote on. min_block_delta: u32, /// Best block we received a GRANDPA finality for. best_grandpa_block_header: ::Header, /// Best block a BEEFY voting round has been concluded for. best_beefy_block: NumberFor, + _phantom: PhantomData AuthorityId>, } -impl VoterOracle { +impl VoterOracle +where + AuthorityId: AuthorityIdBound, +{ /// Verify provided `sessions` satisfies requirements, then build `VoterOracle`. pub fn checked_new( - sessions: VecDeque>, + sessions: VecDeque>, min_block_delta: u32, grandpa_header: ::Header, best_beefy: NumberFor, @@ -105,24 +111,24 @@ impl VoterOracle { let mut validate = || -> bool { let best_grandpa = *grandpa_header.number(); if sessions.is_empty() || best_beefy > best_grandpa { - return false + return false; } for (idx, session) in sessions.iter().enumerate() { let start = session.session_start(); if session.validators().is_empty() { - return false + return false; } if start > best_grandpa || start <= prev_start { - return false + return false; } #[cfg(not(test))] if let Some(prev_id) = prev_validator_id { if session.validator_set_id() <= prev_id { - return false + return false; } } if idx != 0 && session.mandatory_done() { - return false + return false; } prev_start = session.session_start(); prev_validator_id = Some(session.validator_set_id()); @@ -136,6 +142,7 @@ impl VoterOracle { min_block_delta: min_block_delta.max(1), best_grandpa_block_header: grandpa_header, best_beefy_block: best_beefy, + _phantom: PhantomData, }) } else { error!( @@ -151,13 +158,13 @@ impl VoterOracle { // Return reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds(&self) -> Result<&Rounds, Error> { + fn active_rounds(&self) -> Result<&Rounds, Error> { self.sessions.front().ok_or(Error::UninitSession) } // Return mutable reference to rounds pertaining to first session in the queue. // Voting will always happen at the head of the queue. - fn active_rounds_mut(&mut self) -> Result<&mut Rounds, Error> { + fn active_rounds_mut(&mut self) -> Result<&mut Rounds, Error> { self.sessions.front_mut().ok_or(Error::UninitSession) } @@ -183,7 +190,7 @@ impl VoterOracle { } /// Add new observed session to the Oracle. - pub fn add_session(&mut self, rounds: Rounds) { + pub fn add_session(&mut self, rounds: Rounds) { self.sessions.push_back(rounds); // Once we add a new session we can drop/prune previous session if it's been finalized. self.try_prune(); @@ -267,21 +274,21 @@ impl VoterOracle { /// /// Note: Any changes here should also bump aux-db schema version. #[derive(Debug, Decode, Encode, PartialEq)] -pub(crate) struct PersistedState { +pub(crate) struct PersistedState { /// Best block we voted on. best_voted: NumberFor, /// Chooses which incoming votes to accept and which votes to generate. /// Keeps track of voting seen for current and future rounds. - voting_oracle: VoterOracle, + voting_oracle: VoterOracle, /// Pallet-beefy genesis block - block number when BEEFY consensus started for this chain. pallet_genesis: NumberFor, } -impl PersistedState { +impl PersistedState { pub fn checked_new( grandpa_header: ::Header, best_beefy: NumberFor, - sessions: VecDeque>, + sessions: VecDeque>, min_block_delta: u32, pallet_genesis: NumberFor, ) -> Option { @@ -314,11 +321,11 @@ impl PersistedState { self.voting_oracle.best_grandpa_block_header = best_grandpa; } - pub fn voting_oracle(&self) -> &VoterOracle { + pub fn voting_oracle(&self) -> &VoterOracle { &self.voting_oracle } - pub(crate) fn gossip_filter_config(&self) -> Result, Error> { + pub(crate) fn gossip_filter_config(&self) -> Result, Error> { let (start, end) = self.voting_oracle.accepted_interval()?; let validator_set = self.voting_oracle.current_validator_set()?; Ok(GossipFilterCfg { start, end, validator_set }) @@ -373,34 +380,34 @@ impl PersistedState { } /// A BEEFY worker/voter that follows the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities pub backend: Arc, pub runtime: Arc, pub key_store: Arc>, pub payload_provider: P, pub sync: Arc, - pub fisherman: Arc>, + pub fisherman: Arc>, // communication (created once, but returned and reused if worker is restarted/reinitialized) - pub comms: BeefyComms, + pub comms: BeefyComms, // channels /// Links between the block importer, the background voter and the RPC layer. - pub links: BeefyVoterLinks, + pub links: BeefyVoterLinks, // voter state /// Buffer holding justifications for future processing. - pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Persisted voter state. - pub persisted_state: PersistedState, + pub persisted_state: PersistedState, /// BEEFY voter metrics pub metrics: Option, /// Node runs under "Authority" role. pub is_authority: bool, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, @@ -408,17 +415,18 @@ where S: SyncOracle, R: ProvideRuntimeApi, R::Api: BeefyApi, + AuthorityId: AuthorityIdBound, { fn best_grandpa_block(&self) -> NumberFor { *self.persisted_state.voting_oracle.best_grandpa_block_header.number() } - fn voting_oracle(&self) -> &VoterOracle { + fn voting_oracle(&self) -> &VoterOracle { &self.persisted_state.voting_oracle } #[cfg(test)] - fn active_rounds(&mut self) -> Result<&Rounds, Error> { + fn active_rounds(&mut self) -> Result<&Rounds, Error> { self.persisted_state.voting_oracle.active_rounds() } @@ -476,7 +484,8 @@ where }) .chain(std::iter::once(header.clone())) { - if let Some(new_validator_set) = find_authorities_change::(&header) { + if let Some(new_validator_set) = find_authorities_change::(&header) + { self.init_session_at(new_validator_set, *header.number()); new_session_added = true; } @@ -503,13 +512,17 @@ where /// Based on [VoterOracle] this vote is either processed here or discarded. fn triage_incoming_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> Result<(), Error> { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> Result<(), Error> + where + ::Signature: Encode + Decode, + { let block_num = vote.commitment.block_number; match self.voting_oracle().triage_round(block_num)? { RoundAction::Process => if let Some(finality_proof) = self.handle_vote(vote)? { - let gossip_proof = GossipMessage::::FinalityProof(finality_proof); + let gossip_proof = + GossipMessage::::FinalityProof(finality_proof); let encoded_proof = gossip_proof.encode(); self.comms.gossip_engine.gossip_message( proofs_topic::(), @@ -528,7 +541,7 @@ where /// Expects `justification` to be valid. fn triage_incoming_justif( &mut self, - justification: BeefyVersionedFinalityProof, + justification: BeefyVersionedFinalityProof, ) -> Result<(), Error> { let signed_commitment = match justification { VersionedFinalityProof::V1(ref sc) => sc, @@ -560,8 +573,8 @@ where fn handle_vote( &mut self, - vote: VoteMessage, AuthorityId, Signature>, - ) -> Result>, Error> { + vote: VoteMessage, AuthorityId, ::Signature>, + ) -> Result>, Error> { let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let block_number = vote.commitment.block_number; @@ -576,7 +589,7 @@ where // New state is persisted after finalization. self.finalize(finality_proof.clone())?; metric_inc!(self.metrics, beefy_good_votes_processed); - return Ok(Some(finality_proof)) + return Ok(Some(finality_proof)); }, VoteImportResult::Ok => { // Persist state after handling mandatory block vote. @@ -608,14 +621,17 @@ where /// 4. Send best block hash and `finality_proof` to RPC worker. /// /// Expects `finality proof` to be valid and for a block > current-best-beefy. - fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) -> Result<(), Error> { + fn finalize( + &mut self, + finality_proof: BeefyVersionedFinalityProof, + ) -> Result<(), Error> { let block_num = match finality_proof { VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, }; if block_num <= self.persisted_state.voting_oracle.best_beefy_block { // we've already finalized this round before, short-circuit. - return Ok(()) + return Ok(()); } // Finalize inner round and update voting_oracle state. @@ -740,7 +756,7 @@ where hash } else { warn!(target: LOG_TARGET, "🥩 No MMR root digest found for: {:?}", target_hash); - return Ok(()) + return Ok(()); }; let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; @@ -754,7 +770,7 @@ where target: LOG_TARGET, "🥩 Missing validator id - can't vote for: {:?}", target_hash ); - return Ok(()) + return Ok(()); }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; @@ -764,7 +780,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: LOG_TARGET, "🥩 Error signing commitment: {:?}", err); - return Ok(()) + return Ok(()); }, }; @@ -780,14 +796,15 @@ where error!(target: LOG_TARGET, "🥩 Error handling self vote: {}", err); err })? { - let encoded_proof = GossipMessage::::FinalityProof(finality_proof).encode(); + let encoded_proof = + GossipMessage::::FinalityProof(finality_proof).encode(); self.comms .gossip_engine .gossip_message(proofs_topic::(), encoded_proof, true); } else { metric_inc!(self.metrics, beefy_votes_sent); debug!(target: LOG_TARGET, "🥩 Sent vote message: {:?}", vote); - let encoded_vote = GossipMessage::::Vote(vote).encode(); + let encoded_vote = GossipMessage::::Vote(vote).encode(); self.comms.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); } @@ -825,9 +842,11 @@ where /// Should never end, returns `Error` otherwise. pub(crate) async fn run( mut self, - block_import_justif: &mut Fuse>>, + block_import_justif: &mut Fuse< + NotificationReceiver>, + >, finality_notifications: &mut Fuse>, - ) -> (Error, BeefyComms) { + ) -> (Error, BeefyComms) { info!( target: LOG_TARGET, "🥩 run BEEFY worker, best grandpa: #{:?}.", @@ -839,9 +858,10 @@ where .gossip_engine .messages_for(votes_topic::()) .filter_map(|notification| async move { - let vote = GossipMessage::::decode_all(&mut ¬ification.message[..]) - .ok() - .and_then(|message| message.unwrap_vote()); + let vote = + GossipMessage::::decode_all(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_vote()); trace!(target: LOG_TARGET, "🥩 Got vote message: {:?}", vote); vote }) @@ -852,9 +872,10 @@ where .gossip_engine .messages_for(proofs_topic::()) .filter_map(|notification| async move { - let proof = GossipMessage::::decode_all(&mut ¬ification.message[..]) - .ok() - .and_then(|message| message.unwrap_finality_proof()); + let proof = + GossipMessage::::decode_all(&mut ¬ification.message[..]) + .ok() + .and_then(|message| message.unwrap_finality_proof()); trace!(target: LOG_TARGET, "🥩 Got gossip proof message: {:?}", proof); proof }) @@ -945,7 +966,11 @@ where /// Report the given equivocation to the BEEFY runtime module. fn report_double_voting( &self, - proof: DoubleVotingProof, AuthorityId, Signature>, + proof: DoubleVotingProof< + NumberFor, + AuthorityId, + ::Signature, + >, ) -> Result<(), Error> { let rounds = self.persisted_state.voting_oracle.active_rounds()?; self.fisherman.report_double_voting(proof, rounds) @@ -1011,7 +1036,7 @@ pub(crate) mod tests { use sc_network_test::TestNetFactory; use sp_blockchain::Backend as BlockchainBackendT; use sp_consensus_beefy::{ - known_payloads, + ecdsa_crypto, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, test_utils::{generate_equivocation_proof, Keyring}, @@ -1023,8 +1048,8 @@ pub(crate) mod tests { Backend, }; - impl PersistedState { - pub fn active_round(&self) -> Result<&Rounds, Error> { + impl PersistedState { + pub fn active_round(&self) -> Result<&Rounds, Error> { self.voting_oracle.active_rounds() } @@ -1033,17 +1058,17 @@ pub(crate) mod tests { } } - impl VoterOracle { - pub fn sessions(&self) -> &VecDeque> { + impl VoterOracle { + pub fn sessions(&self) -> &VecDeque> { &self.sessions } } fn create_beefy_worker( peer: &mut BeefyPeer, - key: &Keyring, + key: &Keyring, min_block_delta: u32, - genesis_validator_set: ValidatorSet, + genesis_validator_set: ValidatorSet, ) -> BeefyWorker< Block, Backend, @@ -1051,15 +1076,16 @@ pub(crate) mod tests { TestApi, Arc>, TestNetwork, + ecdsa_crypto::AuthorityId, > { let keystore = create_beefy_keystore(key); let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); let (_, from_block_import_justif_stream) = - BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let beefy_rpc_links = BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; @@ -1115,7 +1141,8 @@ pub(crate) mod tests { .unwrap(); let payload_provider = MmrRootProvider::new(api.clone()); let comms = BeefyComms { gossip_engine, gossip_validator, on_demand_justifications }; - let key_store: Arc> = Arc::new(Some(keystore).into()); + let key_store: Arc> = + Arc::new(Some(keystore).into()); BeefyWorker { backend: backend.clone(), runtime: api.clone(), @@ -1233,13 +1260,14 @@ pub(crate) mod tests { Default::default(), Digest::default(), ); - let mut oracle = VoterOracle:: { + let mut oracle = VoterOracle:: { best_beefy_block: 0, best_grandpa_block_header: header, min_block_delta: 1, sessions: VecDeque::new(), + _phantom: PhantomData, }; - let voting_target_with = |oracle: &mut VoterOracle, + let voting_target_with = |oracle: &mut VoterOracle, best_beefy: NumberFor, best_grandpa: NumberFor| -> Option> { @@ -1295,18 +1323,20 @@ pub(crate) mod tests { Default::default(), Digest::default(), ); - let mut oracle = VoterOracle:: { + let mut oracle = VoterOracle:: { best_beefy_block: 0, best_grandpa_block_header: header, min_block_delta: 1, sessions: VecDeque::new(), + _phantom: PhantomData, }; - let accepted_interval_with = |oracle: &mut VoterOracle, - best_grandpa: NumberFor| - -> Result<(NumberFor, NumberFor), Error> { - oracle.best_grandpa_block_header.number = best_grandpa; - oracle.accepted_interval() - }; + let accepted_interval_with = + |oracle: &mut VoterOracle, + best_grandpa: NumberFor| + -> Result<(NumberFor, NumberFor), Error> { + oracle.best_grandpa_block_header.number = best_grandpa; + oracle.accepted_interval() + }; // rounds not initialized -> should accept votes: `None` assert!(accepted_interval_with(&mut oracle, 1).is_err()); @@ -1377,18 +1407,19 @@ pub(crate) mod tests { ); // verify empty digest shows nothing - assert!(find_authorities_change::(&header).is_none()); + assert!(find_authorities_change::(&header).is_none()); let peers = &[Keyring::One, Keyring::Two]; let id = 42; let validator_set = ValidatorSet::new(make_beefy_ids(peers), id).unwrap(); header.digest_mut().push(DigestItem::Consensus( BEEFY_ENGINE_ID, - ConsensusLog::::AuthoritiesChange(validator_set.clone()).encode(), + ConsensusLog::::AuthoritiesChange(validator_set.clone()) + .encode(), )); // verify validator set is correctly extracted from digest - let extracted = find_authorities_change::(&header); + let extracted = find_authorities_change::(&header); assert_eq!(extracted, Some(validator_set)); } diff --git a/substrate/client/consensus/epochs/Cargo.toml b/substrate/client/consensus/epochs/Cargo.toml index ff6bf86a6a441d49a8950c38d2333bd0e067724d..e409e171e477c2452903a09ee78916808a210011 100644 --- a/substrate/client/consensus/epochs/Cargo.toml +++ b/substrate/client/consensus/epochs/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fork-tree = { path = "../../../utils/fork-tree" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 235017d20ceaac42e67d05ba5dc277502b77e377..9099761fbceb4a30624eef4f636982f3e3b9921b 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -25,7 +25,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 9b73418c958e5def75e96416523f9ecaacde14f8..d4e72baef3e7df47baf948cba59e35b2687c76e1 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -17,7 +17,7 @@ finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.30" jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } log = { workspace = true, default-features = true } -parity-scale-codec = { version = "3.6.1", features = ["derive"] } +parity-scale-codec = { version = "3.6.12", features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } thiserror = { workspace = true } sc-client-api = { path = "../../../api" } diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index bc3023fc0281d150864cb3bb21214c3648a73c42..d7153a79ce0b991887b302265508d35de418f0a6 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -706,25 +706,12 @@ fn peer_with_higher_view_leads_to_catch_up_request() { } fn local_chain_spec() -> Box { - use sc_chain_spec::{ChainSpec, GenericChainSpec}; - use serde::{Deserialize, Serialize}; - use sp_runtime::{BuildStorage, Storage}; - - #[derive(Debug, Serialize, Deserialize)] - struct Genesis(std::collections::BTreeMap); - impl BuildStorage for Genesis { - fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { - storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), - ); - Ok(()) - } - } - let chain_spec = GenericChainSpec::::from_json_bytes( - &include_bytes!("../../../../chain-spec/res/chain_spec.json")[..], - ) - .unwrap(); - chain_spec.cloned_box() + let chain_spec = + sc_chain_spec::GenericChainSpec::::from_json_bytes( + &include_bytes!("../../../../chain-spec/res/chain_spec.json")[..], + ) + .unwrap(); + sc_chain_spec::ChainSpec::cloned_box(&chain_spec) } #[test] diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 7aa8df248b7c1486b61116c7ae0ec5cbb87292a6..33f5bf1f8c1501e0e366edab6c3606716aeb47ab 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } assert_matches = "1.3.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index ecfa29aa194d424d35eba858a0b829fb58a610e3..51a2be1b6cf5d4be2d5a5c3af6b0e6ea2bc25406 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index 4ac6ce90713798d2cce4172f3544c6a116736a84..8e88ee68d7d739a888f3b0e32b7a8fee3ac1e41c 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.1" log = { workspace = true, default-features = true } diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index f67a662949abc19da3c5408177bd49731d08ccba..b10c42d50f0bcbf9aed764a2df85cffe7c6baf66 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } hash-db = "0.16.0" diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 0faa90dfc4f925db8776c6914fcf864813b790a5..36f9aea817c9c7031ae69a588a9385e88e786b0c 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -68,8 +68,8 @@ use sc_client_api::{ use sc_state_db::{IsPruned, LastCanonicalized, StateDb}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{ - Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, - HeaderMetadataCache, Result as ClientResult, + Backend as _, CachedHeaderMetadata, DisplacedLeavesAfterFinalization, Error as ClientError, + HeaderBackend, HeaderMetadata, HeaderMetadataCache, Result as ClientResult, }; use sp_core::{ offchain::OffchainOverlayedChange, @@ -747,19 +747,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb, - ) -> ClientResult> { - Ok(self - .leaves - .read() - .displaced_by_finalize_height(block_number) - .leaves() - .cloned() - .collect::>()) - } - fn children(&self, parent_hash: Block::Hash) -> ClientResult> { children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) } @@ -1813,14 +1800,13 @@ impl Backend { apply_state_commit(transaction, commit); } - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); - self.prune_blocks( - transaction, - f_num, - f_hash, - &new_displaced, - current_transaction_justifications, - )?; + let new_displaced = self.blockchain.displaced_leaves_after_finalizing(f_hash, f_num)?; + let finalization_outcome = + FinalizationOutcome::new(new_displaced.displaced_leaves.clone().into_iter()); + + self.blockchain.leaves.write().remove_displaced_leaves(&finalization_outcome); + + self.prune_blocks(transaction, f_num, &new_displaced, current_transaction_justifications)?; Ok(()) } @@ -1829,8 +1815,7 @@ impl Backend { &self, transaction: &mut Transaction, finalized_number: NumberFor, - finalized_hash: Block::Hash, - displaced: &FinalizationOutcome>, + displaced: &DisplacedLeavesAfterFinalization, current_transaction_justifications: &mut HashMap, ) -> ClientResult<()> { match self.blocks_pruning { @@ -1858,10 +1843,10 @@ impl Backend { self.prune_block(transaction, BlockId::::number(number))?; } - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, displaced)?; }, BlocksPruning::KeepFinalized => { - self.prune_displaced_branches(transaction, finalized_hash, displaced)?; + self.prune_displaced_branches(transaction, displaced)?; }, } Ok(()) @@ -1870,21 +1855,13 @@ impl Backend { fn prune_displaced_branches( &self, transaction: &mut Transaction, - finalized: Block::Hash, - displaced: &FinalizationOutcome>, + displaced: &DisplacedLeavesAfterFinalization, ) -> ClientResult<()> { // Discard all blocks from displaced branches - for h in displaced.leaves() { - match sp_blockchain::tree_route(&self.blockchain, *h, finalized) { - Ok(tree_route) => - for r in tree_route.retracted() { - self.blockchain.insert_persisted_body_if_pinned(r.hash)?; - self.prune_block(transaction, BlockId::::hash(r.hash))?; - }, - Err(sp_blockchain::Error::UnknownBlock(_)) => { - // Sometimes routes can't be calculated. E.g. after warp sync. - }, - Err(e) => Err(e)?, + for (_, tree_route) in displaced.tree_routes.iter() { + for r in tree_route.retracted() { + self.blockchain.insert_persisted_body_if_pinned(r.hash)?; + self.prune_block(transaction, BlockId::::hash(r.hash))?; } } Ok(()) @@ -3190,6 +3167,9 @@ pub(crate) mod tests { #[test] fn test_leaves_pruned_on_finality() { + // / 1b - 2b - 3b + // 0 - 1a - 2a + // \ 1c let backend: Backend = Backend::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); @@ -3201,18 +3181,16 @@ pub(crate) mod tests { let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - assert_eq!( - backend.blockchain().leaves().unwrap(), - vec![block2_a, block2_b, block2_c, block1_c] - ); + let block3_b = insert_header(&backend, 3, block2_b, None, [3; 32].into()); + + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block3_b, block2_a, block1_c]); backend.finalize_block(block1_a, None).unwrap(); backend.finalize_block(block2_a, None).unwrap(); - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); + // All leaves are pruned that are known to not belong to canonical branch + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } #[test] diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index c08a7f5af342274ab41d4029f531581c7f65ee37..1f54b82030ff226b179afe8f167e134417b859e9 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -21,7 +21,7 @@ parking_lot = "0.12.1" schnellru = "0.2.1" tracing = "0.1.29" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sc-executor-common = { path = "common" } sc-executor-polkavm = { path = "polkavm" } sc-executor-wasmtime = { path = "wasmtime" } diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index f3fef4046914128dff2ad0904d445e89cba11c6a..d3d670650db789b2b9b854a5fda8724a506833ee 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -50,5 +50,5 @@ sc-runtime-test = { path = "../runtime-test" } sp-io = { path = "../../../primitives/io" } tempfile = "3.3.0" paste = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } cargo_metadata = "0.15.4" diff --git a/substrate/client/executor/wasmtime/src/lib.rs b/substrate/client/executor/wasmtime/src/lib.rs index 82e62b4a5dd3cd6d7b009e019f0c24c28551743a..8e8e92017df91260d7d79f8f237200c1ef9936c0 100644 --- a/substrate/client/executor/wasmtime/src/lib.rs +++ b/substrate/client/executor/wasmtime/src/lib.rs @@ -41,3 +41,7 @@ pub use runtime::{ prepare_runtime_artifact, Config, DeterministicStackLimit, InstantiationStrategy, Semantics, WasmtimeRuntime, }; +pub use sc_executor_common::{ + runtime_blob::RuntimeBlob, + wasm_runtime::{HeapAllocStrategy, WasmModule}, +}; diff --git a/substrate/client/merkle-mountain-range/Cargo.toml b/substrate/client/merkle-mountain-range/Cargo.toml index 46b7a1011c465f1aced062ba6978580a5f85c43c..3cf3cdd15dad9bb30fe2a9a70b6adc9f4541e56e 100644 --- a/substrate/client/merkle-mountain-range/Cargo.toml +++ b/substrate/client/merkle-mountain-range/Cargo.toml @@ -14,7 +14,7 @@ workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } sp-api = { path = "../../primitives/api" } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index ec7907906785c0c6210627da80035ba83a877927..25e6e316a8be0ab6ec0ff70f38a05f603ebf8404 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/client/merkle-mountain-range/rpc/src/lib.rs b/substrate/client/merkle-mountain-range/rpc/src/lib.rs index b4da9848de54cbc10ee549c3bd8cf1b033559722..41e73a5b8d75d6940c669786533e423fcc81b5ac 100644 --- a/substrate/client/merkle-mountain-range/rpc/src/lib.rs +++ b/substrate/client/merkle-mountain-range/rpc/src/lib.rs @@ -36,7 +36,7 @@ use sp_core::{ offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage}, Bytes, }; -use sp_mmr_primitives::{Error as MmrError, Proof}; +use sp_mmr_primitives::{Error as MmrError, LeafProof}; use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -52,17 +52,17 @@ pub struct LeavesProof { pub block_hash: BlockHash, /// SCALE-encoded vector of `LeafData`. pub leaves: Bytes, - /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. + /// SCALE-encoded proof data. See [sp_mmr_primitives::LeafProof]. pub proof: Bytes, } impl LeavesProof { /// Create new `LeavesProof` from a given vector of `Leaf` and a - /// [sp_mmr_primitives::Proof]. + /// [sp_mmr_primitives::LeafProof]. pub fn new( block_hash: BlockHash, leaves: Vec, - proof: Proof, + proof: LeafProof, ) -> Self where Leaf: Encode, @@ -258,7 +258,7 @@ mod tests { fn should_serialize_leaf_proof() { // given let leaf = vec![1_u8, 2, 3, 4]; - let proof = Proof { + let proof = LeafProof { leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -281,7 +281,7 @@ mod tests { // given let leaf_a = vec![1_u8, 2, 3, 4]; let leaf_b = vec![2_u8, 2, 3, 4]; - let proof = Proof { + let proof = LeafProof { leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -306,7 +306,7 @@ mod tests { block_hash: H256::repeat_byte(0), leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), proof: Bytes( - Proof { + LeafProof { leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -333,7 +333,7 @@ mod tests { block_hash: H256::repeat_byte(0), leaves: Bytes(vec![vec![1_u8, 2, 3, 4], vec![2_u8, 2, 3, 4]].encode()), proof: Bytes( - Proof { + LeafProof { leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], diff --git a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs index 3c3f0beb6c6a9196bfb8bf6d1108b737869d346c..94593f9c2c7ba265fe2ec1b0b50a6e1b962788a7 100644 --- a/substrate/client/merkle-mountain-range/src/offchain_mmr.rs +++ b/substrate/client/merkle-mountain-range/src/offchain_mmr.rs @@ -33,7 +33,7 @@ use sp_runtime::{ traits::{Block, Header, NumberFor, One}, Saturating, }; -use std::{collections::VecDeque, sync::Arc}; +use std::{collections::VecDeque, default::Default, sync::Arc}; /// `OffchainMMR` exposes MMR offchain canonicalization and pruning logic. pub struct OffchainMmr, C> { @@ -273,12 +273,11 @@ where self.write_gadget_state_or_log(); // Remove offchain MMR nodes for stale forks. - let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else( - |(stale_forks, e)| { - warn!(target: LOG_TARGET, "{:?}", e); - stale_forks - }, - ); + let stale_forks = self.client.expand_forks(¬ification.stale_heads).unwrap_or_else(|e| { + warn!(target: LOG_TARGET, "{:?}", e); + + Default::default() + }); for hash in stale_forks.iter() { self.prune_branch(hash); } diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index 5775b4cfe67cd49878508521ec72ba9af413a187..fcf9fa25b593c86eb780d8b563e6ecc55ecb50a7 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -309,11 +309,11 @@ sp_api::mock_impl_runtime_apis! { &self, _block_numbers: Vec, _best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_proof(_leaves: Vec, _proof: mmr::Proof) + fn verify_proof(_leaves: Vec, _proof: mmr::LeafProof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -322,7 +322,7 @@ sp_api::mock_impl_runtime_apis! { fn verify_proof_stateless( _root: MmrHash, _leaves: Vec, - _proof: mmr::Proof + _proof: mmr::LeafProof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 2ea152221ac5a4989004bd80b9c013a7944f8d84..1626305639498855148b515e666e1e91a603643f 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.2.2" arrayvec = "0.7.2" blake2 = "0.10.4" bytes = "1" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" log = { workspace = true, default-features = true } diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs index 46c2334ceb46d8ef7966b666e480e83075a51703..0071ce13b33a3ff015569ce66530215f1d9ac5b6 100644 --- a/substrate/client/mixnet/src/sync_with_runtime.rs +++ b/substrate/client/mixnet/src/sync_with_runtime.rs @@ -25,8 +25,10 @@ use mixnet::core::{ Mixnet, Mixnode as CoreMixnode, MixnodesErr as CoreMixnodesErr, RelSessionIndex, SessionPhase as CoreSessionPhase, SessionStatus as CoreSessionStatus, }; -use multiaddr::{multiaddr, Multiaddr, Protocol}; -use sc_network_types::PeerId; +use sc_network_types::{ + multiaddr::{multiaddr, Multiaddr, Protocol}, + PeerId, +}; use sp_api::{ApiError, ApiRef}; use sp_mixnet::{ runtime_api::MixnetApi, @@ -196,7 +198,6 @@ where #[cfg(test)] mod tests { use super::*; - use multiaddr::multiaddr; #[test] fn fixup_empty_external_addresses() { diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index ad81381edea1800035a96b608681c5f3691d2d05..3eeea6651186162ed2f28bdf12ec08ad184df906 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -34,6 +34,6 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.37" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index f5f6479c41f1c805da622e9fd5ce5ccca9a1274a..29b14a4511cac655096b3e3dded5013462ae7e9e 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -26,7 +26,7 @@ async-trait = "0.1.79" asynchronous-codec = "0.6" bytes = "1" cid = "0.9.0" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } either = "1.5.3" fnv = "1.0.6" futures = "0.3.30" @@ -59,7 +59,7 @@ sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } wasm-timer = "0.2" -litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } +litep2p = "0.5.0" once_cell = "1.18.0" void = "1.0.2" schnellru = "0.2.1" diff --git a/substrate/client/network/common/Cargo.toml b/substrate/client/network/common/Cargo.toml index ca510a2ae705e123e3935827ca97be24a360f0a2..9a1bf5b88ea1a97cf6bdfe358a44338561500b50 100644 --- a/substrate/client/network/common/Cargo.toml +++ b/substrate/client/network/common/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.12.4" [dependencies] async-trait = "0.1.79" bitflags = "1.3.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } futures = "0.3.30" diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index 2abefd4f8e29d9d159c0cf958786e2e328b4cc69..baaed578b884172bdcfba5eb2a66e48ac32c56a5 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -21,7 +21,7 @@ prost-build = "0.12.4" [dependencies] async-channel = "1.8.0" array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ] } futures = "0.3.30" diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index e6cc9de56942700940da2644b2c5b57c0abd144d..100a1e9dfb38ea23d4bb7d846aed0295f1cdaaea 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -35,12 +35,11 @@ pub use crate::{ types::ProtocolName, }; -pub use libp2p::{ - build_multiaddr, - identity::{self, ed25519, Keypair}, - multiaddr, Multiaddr, +pub use sc_network_types::{build_multiaddr, ed25519}; +use sc_network_types::{ + multiaddr::{self, Multiaddr}, + PeerId, }; -use sc_network_types::PeerId; use crate::service::{ensure_addresses_consistent_with_transport, traits::NetworkBackend}; use codec::Encode; @@ -100,7 +99,7 @@ impl fmt::Debug for ProtocolId { /// # Example /// /// ``` -/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_types::{multiaddr::Multiaddr, PeerId}; /// use sc_network::config::parse_str_addr; /// let (peer_id, addr) = parse_str_addr( /// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" @@ -131,7 +130,7 @@ pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> /// # Example /// /// ``` -/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_types::{multiaddr::Multiaddr, PeerId}; /// use sc_network::config::MultiaddrWithPeerId; /// let addr: MultiaddrWithPeerId = /// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); @@ -187,7 +186,7 @@ impl TryFrom for MultiaddrWithPeerId { #[derive(Debug)] pub enum ParseErr { /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), + MultiaddrParse(multiaddr::ParseError), /// Multihash of the peer ID is invalid. InvalidPeerId, /// The peer ID is missing from the address. @@ -214,8 +213,8 @@ impl std::error::Error for ParseErr { } } -impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { +impl From for ParseErr { + fn from(err: multiaddr::ParseError) -> ParseErr { Self::MultiaddrParse(err) } } @@ -343,10 +342,10 @@ impl NodeKeyConfig { /// /// * If the secret is configured to be new, it is generated and the corresponding keypair is /// returned. - pub fn into_keypair(self) -> io::Result { + pub fn into_keypair(self) -> io::Result { use NodeKeyConfig::*; match self { - Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + Ed25519(Secret::New) => Ok(ed25519::Keypair::generate()), Ed25519(Secret::Input(k)) => Ok(ed25519::Keypair::from(k).into()), @@ -365,8 +364,7 @@ impl NodeKeyConfig { ed25519::SecretKey::generate, |b| b.as_ref().to_vec(), ) - .map(ed25519::Keypair::from) - .map(Keypair::from), + .map(ed25519::Keypair::from), } } } @@ -887,7 +885,7 @@ impl> FullNetworkConfig .find(|o| o.peer_id != bootnode.peer_id) { Err(crate::error::Error::DuplicateBootnode { - address: bootnode.multiaddr.clone(), + address: bootnode.multiaddr.clone().into(), first_id: bootnode.peer_id.into(), second_id: other.peer_id.into(), }) @@ -947,14 +945,8 @@ mod tests { tempfile::Builder::new().prefix(prefix).tempdir().unwrap() } - fn secret_bytes(kp: Keypair) -> Vec { - kp.try_into_ed25519() - .expect("ed25519 keypair") - .secret() - .as_ref() - .iter() - .cloned() - .collect() + fn secret_bytes(kp: ed25519::Keypair) -> Vec { + kp.secret().to_bytes().into() } #[test] diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 4e2121c5540d6c7c6d895e2756b266f6b5a945e8..7d4481b0d06f947619b66ab60a5ab60b8fe15a99 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -105,7 +105,8 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocols: Vec>, + kademlia_protocol: Vec, + kademlia_legacy_protocol: Vec, kademlia_replication_factor: NonZeroUsize, } @@ -121,7 +122,8 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocols: Vec::new(), + kademlia_protocol: Vec::new(), + kademlia_legacy_protocol: Vec::new(), kademlia_replication_factor: NonZeroUsize::new(DEFAULT_KADEMLIA_REPLICATION_FACTOR) .expect("value is a constant; constant is non-zero; qed."), } @@ -177,9 +179,8 @@ impl DiscoveryConfig { fork_id: Option<&str>, protocol_id: &ProtocolId, ) -> &mut Self { - self.kademlia_protocols = Vec::new(); - self.kademlia_protocols.push(kademlia_protocol_name(genesis_hash, fork_id)); - self.kademlia_protocols.push(legacy_kademlia_protocol_name(protocol_id)); + self.kademlia_protocol = kademlia_protocol_name(genesis_hash, fork_id); + self.kademlia_legacy_protocol = legacy_kademlia_protocol_name(protocol_id); self } @@ -207,14 +208,19 @@ impl DiscoveryConfig { discovery_only_if_under_num, enable_mdns, kademlia_disjoint_query_paths, - kademlia_protocols, + kademlia_protocol, + kademlia_legacy_protocol, kademlia_replication_factor, } = self; - let kademlia = if !kademlia_protocols.is_empty() { + let kademlia = if !kademlia_protocol.is_empty() { let mut config = KademliaConfig::default(); config.set_replication_factor(kademlia_replication_factor); + // Populate kad with both the legacy and the new protocol names. + // Remove the legacy protocol: + // https://github.com/paritytech/polkadot-sdk/issues/504 + let kademlia_protocols = [kademlia_protocol.clone(), kademlia_legacy_protocol]; config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the @@ -266,6 +272,7 @@ impl DiscoveryConfig { .expect("value is a constant; constant is non-zero; qed."), ), records_to_publish: Default::default(), + kademlia_protocol, } } } @@ -309,6 +316,11 @@ pub struct DiscoveryBehaviour { /// did not return the record(in `FinishedWithNoAdditionalRecord`). We will then put the record /// to these peers. records_to_publish: HashMap, + /// The chain based kademlia protocol name (including genesis hash and fork id). + /// + /// Remove when all nodes are upgraded to genesis hash and fork ID-based Kademlia: + /// . + kademlia_protocol: Vec, } impl DiscoveryBehaviour { @@ -366,23 +378,29 @@ impl DiscoveryBehaviour { return } - if let Some(matching_protocol) = supported_protocols + // The supported protocols must include the chain-based Kademlia protocol. + // + // Extract the chain-based Kademlia protocol from `kademlia.protocol_name()` + // when all nodes are upgraded to genesis hash and fork ID-based Kademlia: + // https://github.com/paritytech/polkadot-sdk/issues/504. + if !supported_protocols .iter() - .find(|p| kademlia.protocol_names().iter().any(|k| k.as_ref() == p.as_ref())) + .any(|p| p.as_ref() == self.kademlia_protocol.as_slice()) { - trace!( - target: "sub-libp2p", - "Adding self-reported address {} from {} to Kademlia DHT {}.", - addr, peer_id, String::from_utf8_lossy(matching_protocol.as_ref()), - ); - kademlia.add_address(peer_id, addr.clone()); - } else { trace!( target: "sub-libp2p", "Ignoring self-reported address {} from {} as remote node is not part of the \ Kademlia DHT supported by the local node.", addr, peer_id, ); + return } + + trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT.", + addr, peer_id + ); + kademlia.add_address(peer_id, addr.clone()); } } @@ -1075,17 +1093,20 @@ mod tests { .unwrap(); // Test both genesis hash-based and legacy // protocol names. - let protocol_name = if swarm_n % 2 == 0 { - kademlia_protocol_name(genesis_hash, fork_id) + let protocol_names = if swarm_n % 2 == 0 { + vec![kademlia_protocol_name(genesis_hash, fork_id)] } else { - legacy_kademlia_protocol_name(&protocol_id) + vec![ + legacy_kademlia_protocol_name(&protocol_id), + kademlia_protocol_name(genesis_hash, fork_id), + ] }; swarms[swarm_n] .0 .behaviour_mut() .add_self_reported_address( &other, - &[protocol_name], + protocol_names.as_slice(), addr, ); @@ -1181,9 +1202,56 @@ mod tests { &[kademlia_protocol_name(supported_genesis_hash, None)], remote_addr.clone(), ); + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + !kademlia + .kbucket(remote_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with supported protocol to be added." + ); + } + + let unsupported_peer_id = predictable_peer_id(b"00000000000000000000000000000002"); + let unsupported_peer_addr: Multiaddr = "/memory/2".parse().unwrap(); + + // Check the unsupported peer is not present before and after the call. + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + kademlia + .kbucket(unsupported_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect unsupported peer not to be added." + ); + } + // Note: legacy protocol is not supported without genesis hash and fork ID, + // if the legacy is the only protocol supported, then the peer will not be added. + discovery.add_self_reported_address( + &unsupported_peer_id, + &[legacy_kademlia_protocol_name(&supported_protocol_id)], + unsupported_peer_addr.clone(), + ); + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert!( + kademlia + .kbucket(unsupported_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect unsupported peer not to be added." + ); + } + + // Supported legacy and genesis based protocols are allowed to be added. discovery.add_self_reported_address( &another_peer_id, - &[legacy_kademlia_protocol_name(&supported_protocol_id)], + &[ + legacy_kademlia_protocol_name(&supported_protocol_id), + kademlia_protocol_name(supported_genesis_hash, None), + ], another_addr.clone(), ); @@ -1194,6 +1262,13 @@ mod tests { kademlia.kbuckets().fold(0, |acc, bucket| acc + bucket.num_entries()), "Expect peers with supported protocol to be added." ); + assert!( + !kademlia + .kbucket(another_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with supported protocol to be added." + ); } } } diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs index b776e3e1ad9de8f84118f293f28ad00c88969ba0..376b8461be4e57a72ad45c7f29763bba19e40ac9 100644 --- a/substrate/client/network/src/error.rs +++ b/substrate/client/network/src/error.rs @@ -20,7 +20,7 @@ use crate::{config::TransportConfig, types::ProtocolName}; -use libp2p::{Multiaddr, PeerId}; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use std::fmt; diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 8f479825c8d77c7684333981d1763099b8a537c9..99a972f914e261e60ef8692fc3fab7a53051d3df 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -272,6 +272,10 @@ pub use sc_network_common::{ role::{ObservedRole, Roles}, types::ReputationChange, }; +pub use sc_network_types::{ + multiaddr::{self, Multiaddr}, + PeerId, +}; pub use service::{ metrics::NotificationMetrics, signature::Signature, @@ -285,7 +289,7 @@ pub use service::{ DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure, PublicKey, }; -pub use types::{multiaddr, Multiaddr, PeerId, ProtocolName}; +pub use types::ProtocolName; /// The maximum allowed number of established connections per peer. /// diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index 47a620db132e1b025277eeae0fc3f30cfe31cde1..ff5f492df246a6e4165700132d080c183b91b79a 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -20,9 +20,7 @@ use crate::{ config::{NetworkConfiguration, ProtocolId}, - multiaddr::Protocol, peer_store::PeerStoreProvider, - Multiaddr, }; use array_bytes::bytes2hex; @@ -36,12 +34,13 @@ use litep2p::{ identify::{Config as IdentifyConfig, IdentifyEvent}, kademlia::{ Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent, - KademliaHandle, QueryId, Quorum, Record, RecordKey, + KademliaHandle, QueryId, Quorum, Record, RecordKey, RecordsType, }, ping::{Config as PingConfig, PingEvent}, }, mdns::{Config as MdnsConfig, MdnsEvent}, }, + types::multiaddr::{Multiaddr, Protocol}, PeerId, ProtocolName, }; use parking_lot::RwLock; @@ -124,8 +123,8 @@ pub enum DiscoveryEvent { /// Query ID. query_id: QueryId, - /// Record. - record: Record, + /// Records. + records: RecordsType, }, /// Record was successfully stored on the DHT. @@ -227,7 +226,7 @@ impl Discovery { let (identify_config, identify_event_stream) = IdentifyConfig::new( "/substrate/1.0".to_string(), Some(user_agent), - config.public_addresses.clone(), + config.public_addresses.clone().into_iter().map(Into::into).collect(), ); let (mdns_config, mdns_event_stream) = match config.transport { @@ -266,12 +265,12 @@ impl Discovery { duration_to_next_find_query: Duration::from_secs(1), address_confirmations: LruMap::new(ByLength::new(8)), allow_non_global_addresses: config.allow_non_globals_in_dht, - public_addresses: config.public_addresses.iter().cloned().collect(), + public_addresses: config.public_addresses.iter().cloned().map(Into::into).collect(), next_kad_query: Some(Delay::new(KADEMLIA_QUERY_INTERVAL)), - local_protocols: HashSet::from_iter([ - kademlia_protocol_name(genesis_hash, fork_id), - legacy_kademlia_protocol_name(protocol_id), - ]), + local_protocols: HashSet::from_iter([kademlia_protocol_name( + genesis_hash, + fork_id, + )]), }, ping_config, identify_config, @@ -295,6 +294,11 @@ impl Discovery { addresses: Vec, ) { if self.local_protocols.is_disjoint(&supported_protocols) { + log::trace!( + target: "sub-libp2p", + "Ignoring self-reported address of peer {peer} as remote node is not part of the \ + Kademlia DHT supported by the local node.", + ); return } @@ -456,16 +460,13 @@ impl Stream for Discovery { peers: peers.into_iter().collect(), })) }, - Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, record })) => { + Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, records })) => { log::trace!( target: LOG_TARGET, - "`GET_RECORD` succeeded for {query_id:?}: {record:?}", + "`GET_RECORD` succeeded for {query_id:?}: {records:?}", ); - return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { - query_id, - record: record.record, - })); + return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, records })); }, Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) => return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })), diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs index 1137c73b56db8292fdee437ee3a656f38e789f51..ae287052b2d44934a8b4827e53c77cae0d707be9 100644 --- a/substrate/client/network/src/litep2p/mod.rs +++ b/substrate/client/network/src/litep2p/mod.rs @@ -38,7 +38,6 @@ use crate::{ request_response::{RequestResponseConfig, RequestResponseProtocol}, }, }, - multiaddr::{Multiaddr, Protocol}, peer_store::PeerStoreProvider, protocol, service::{ @@ -54,17 +53,23 @@ use futures::StreamExt; use libp2p::kad::RecordKey; use litep2p::{ config::ConfigBuilder, - crypto::ed25519::{Keypair, SecretKey}, + crypto::ed25519::Keypair, executor::Executor, protocol::{ - libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId}, + libp2p::{ + bitswap::Config as BitswapConfig, + kademlia::{QueryId, RecordsType}, + }, request_response::ConfigBuilder as RequestResponseConfigBuilder, }, transport::{ tcp::config::Config as TcpTransportConfig, websocket::config::Config as WebSocketTransportConfig, Endpoint, }, - types::ConnectionId, + types::{ + multiaddr::{Multiaddr, Protocol}, + ConnectionId, + }, Error as Litep2pError, Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName, }; use parking_lot::RwLock; @@ -81,7 +86,7 @@ use std::{ collections::{hash_map::Entry, HashMap, HashSet}, fs, future::Future, - io, iter, + iter, pin::Pin, sync::{ atomic::{AtomicUsize, Ordering}, @@ -200,12 +205,12 @@ impl Litep2pNetworkBackend { Protocol::Ip4(_), ) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_))) { - Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash) + Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash.into()) .map_or(None, |peer| Some((peer, Some(address)))), _ => None, }, Some(Protocol::P2p(multihash)) => - PeerId::from_multihash(multihash).map_or(None, |peer| Some((peer, None))), + PeerId::from_multihash(multihash.into()).map_or(None, |peer| Some((peer, None))), _ => None, }) .fold(HashMap::new(), |mut acc, (peer, maybe_address)| { @@ -244,16 +249,9 @@ impl Litep2pNetworkBackend { impl Litep2pNetworkBackend { /// Get `litep2p` keypair from `NodeKeyConfig`. fn get_keypair(node_key: &NodeKeyConfig) -> Result<(Keypair, litep2p::PeerId), Error> { - let secret = libp2p::identity::Keypair::try_into_ed25519(node_key.clone().into_keypair()?) - .map_err(|error| { - log::error!(target: LOG_TARGET, "failed to convert to ed25519: {error:?}"); - Error::Io(io::ErrorKind::InvalidInput.into()) - })? - .secret(); - - let mut secret = secret.as_ref().iter().cloned().collect::>(); - let secret = SecretKey::from_bytes(&mut secret) - .map_err(|_| Error::Io(io::ErrorKind::InvalidInput.into()))?; + let secret: litep2p::crypto::ed25519::SecretKey = + node_key.clone().into_keypair()?.secret().into(); + let local_identity = Keypair::from(secret); let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); @@ -327,6 +325,8 @@ impl Litep2pNetworkBackend { .listen_addresses .iter() .filter_map(|address| { + use sc_network_types::multiaddr::Protocol; + let mut iter = address.iter(); match iter.next() { @@ -367,12 +367,12 @@ impl Litep2pNetworkBackend { config_builder .with_websocket(WebSocketTransportConfig { - listen_addresses: websocket.into_iter().flatten().collect(), + listen_addresses: websocket.into_iter().flatten().map(Into::into).collect(), yamux_config: yamux_config.clone(), ..Default::default() }) .with_tcp(TcpTransportConfig { - listen_addresses: tcp.into_iter().flatten().collect(), + listen_addresses: tcp.into_iter().flatten().map(Into::into).collect(), yamux_config, ..Default::default() }) @@ -522,6 +522,8 @@ impl NetworkBackend for Litep2pNetworkBac // collect known addresses let known_addresses: HashMap> = known_addresses.into_iter().fold(HashMap::new(), |mut acc, (peer, address)| { + use sc_network_types::multiaddr::Protocol; + let address = match address.iter().last() { Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) => address.with(Protocol::P2p(peer.into())), @@ -529,7 +531,7 @@ impl NetworkBackend for Litep2pNetworkBac _ => return acc, }; - acc.entry(peer.into()).or_default().push(address); + acc.entry(peer.into()).or_default().push(address.into()); peer_store_handle.add_known_peer(peer); acc @@ -567,7 +569,7 @@ impl NetworkBackend for Litep2pNetworkBac Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?; let external_addresses: Arc>> = Arc::new(RwLock::new( - HashSet::from_iter(network_config.public_addresses.iter().cloned()), + HashSet::from_iter(network_config.public_addresses.iter().cloned().map(Into::into)), )); litep2p.listen_addresses().for_each(|address| { log::debug!(target: LOG_TARGET, "listening on: {address}"); @@ -713,7 +715,7 @@ impl NetworkBackend for Litep2pNetworkBac protocol, peers, } => { - let peers = self.add_addresses(peers.into_iter()); + let peers = self.add_addresses(peers.into_iter().map(Into::into)); match self.peerset_handles.get(&protocol) { Some(handle) => { @@ -722,9 +724,11 @@ impl NetworkBackend for Litep2pNetworkBac None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"), }; } - NetworkServiceCommand::AddKnownAddress { peer, mut address } => { + NetworkServiceCommand::AddKnownAddress { peer, address } => { + let mut address: Multiaddr = address.into(); + if !address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))) { - address.push(Protocol::P2p(peer.into())); + address.push(Protocol::P2p(litep2p::PeerId::from(peer).into())); } if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize { @@ -735,7 +739,7 @@ impl NetworkBackend for Litep2pNetworkBac } }, NetworkServiceCommand::SetReservedPeers { protocol, peers } => { - let peers = self.add_addresses(peers.into_iter()); + let peers = self.add_addresses(peers.into_iter().map(Into::into)); match self.peerset_handles.get(&protocol) { Some(handle) => { @@ -795,23 +799,30 @@ impl NetworkBackend for Litep2pNetworkBac self.peerstore_handle.add_known_peer(peer.into()); } } - Some(DiscoveryEvent::GetRecordSuccess { query_id, record }) => { + Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => { match self.pending_get_values.remove(&query_id) { None => log::warn!( target: LOG_TARGET, "`GET_VALUE` succeeded for a non-existent query", ), - Some((_key, started)) => { + Some((key, started)) => { log::trace!( target: LOG_TARGET, "`GET_VALUE` for {:?} ({query_id:?}) succeeded", - record.key, + key, ); - self.event_streams.send(Event::Dht( - DhtEvent::ValueFound(vec![ + let value_found = match records { + RecordsType::LocalStore(record) => vec![ (libp2p::kad::RecordKey::new(&record.key), record.value) - ]) + ], + RecordsType::Network(records) => records.into_iter().map(|peer_record| { + (libp2p::kad::RecordKey::new(&peer_record.record.key), peer_record.record.value) + }).collect(), + }; + + self.event_streams.send(Event::Dht( + DhtEvent::ValueFound(value_found) )); if let Some(ref metrics) = self.metrics { diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs index 86f11aa6e142e2a1bdc161e7a92e34f5833cd1bb..09b869abdf5f5e38050f3b275b6a95da0a342745 100644 --- a/substrate/client/network/src/litep2p/service.rs +++ b/substrate/client/network/src/litep2p/service.rs @@ -24,7 +24,6 @@ use crate::{ notification::{config::ProtocolControlHandle, peerset::PeersetCommand}, request_response::OutboundRequest, }, - multiaddr::Protocol, network_state::NetworkState, peer_store::PeerStoreProvider, service::out_events, @@ -35,15 +34,18 @@ use crate::{ use codec::DecodeAll; use futures::{channel::oneshot, stream::BoxStream}; -use libp2p::{identity::SigningError, kad::record::Key as KademliaKey, Multiaddr}; -use litep2p::crypto::ed25519::Keypair; +use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; +use litep2p::{crypto::ed25519::Keypair, types::multiaddr::Multiaddr as LiteP2pMultiaddr}; use parking_lot::RwLock; use sc_network_common::{ role::{ObservedRole, Roles}, types::ReputationChange, }; -use sc_network_types::PeerId; +use sc_network_types::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sc_utils::mpsc::TracingUnboundedSender; use std::{ @@ -165,10 +167,10 @@ pub struct Litep2pNetworkService { request_response_protocols: HashMap>, /// Listen addresses. - listen_addresses: Arc>>, + listen_addresses: Arc>>, /// External addresses. - external_addresses: Arc>>, + external_addresses: Arc>>, } impl Litep2pNetworkService { @@ -181,8 +183,8 @@ impl Litep2pNetworkService { peerset_handles: HashMap, block_announce_protocol: ProtocolName, request_response_protocols: HashMap>, - listen_addresses: Arc>>, - external_addresses: Arc>>, + listen_addresses: Arc>>, + external_addresses: Arc>>, ) -> Self { Self { local_peer_id, @@ -322,7 +324,7 @@ impl NetworkPeers for Litep2pNetworkService { fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet { protocol: self.block_announce_protocol.clone(), - peers: HashSet::from_iter([peer.concat()]), + peers: HashSet::from_iter([peer.concat().into()]), }); Ok(()) @@ -415,11 +417,11 @@ impl NetworkEventStream for Litep2pNetworkService { impl NetworkStateInfo for Litep2pNetworkService { fn external_addresses(&self) -> Vec { - self.external_addresses.read().iter().cloned().collect() + self.external_addresses.read().iter().cloned().map(Into::into).collect() } fn listen_addresses(&self) -> Vec { - self.listen_addresses.read().iter().cloned().collect() + self.listen_addresses.read().iter().cloned().map(Into::into).collect() } fn local_peer_id(&self) -> PeerId { diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs index a4c739f1448e685aad34819b627333e4c250d957..987405500dc9b22e8a19d667cc4e1fe26f18c16d 100644 --- a/substrate/client/network/src/peer_store.rs +++ b/substrate/client/network/src/peer_store.rs @@ -19,8 +19,9 @@ //! [`PeerStore`] manages peer reputations and provides connection candidates to //! [`crate::protocol_controller::ProtocolController`]. -use crate::{service::traits::PeerStore as PeerStoreT, PeerId}; +use crate::service::traits::PeerStore as PeerStoreT; +use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use partial_sort::PartialSort; diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs index 15d289d170ee83e52476d4b5a1c5a10bde3274c8..4f6d32ae3b356f8813bb38cab5b80050f600931d 100644 --- a/substrate/client/network/src/protocol/notifications/service/mod.rs +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -28,13 +28,13 @@ use crate::{ }, }, types::ProtocolName, - PeerId, }; use futures::{ stream::{FuturesUnordered, Stream}, StreamExt, }; +use libp2p::PeerId; use parking_lot::Mutex; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs index f0157f6d28dd10d7382427b7464e4f478d912c03..32ccb3348adfbce266561645c75fea466023ed72 100644 --- a/substrate/client/network/src/protocol/notifications/service/tests.rs +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -200,7 +200,7 @@ async fn send_async_notification_to_non_existent_peer() { if let Err(error::Error::PeerDoesntExist(peer_id)) = notif.send_async_notification(&peer.into(), vec![1, 3, 3, 7]).await { - assert_eq!(peer, peer_id); + assert_eq!(peer, peer_id.into()); } else { panic!("invalid error received from `send_async_notification()`"); } diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 2c3e6744e328f3900790e747a3a8d998f4e6964e..da51a7a4f9f43f4ac68632a7701d62983f08ec26 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -41,12 +41,10 @@ //! Even though this does not guarantee that `ProtocolController` and `Notifications` have the same //! view of the peers' states at any given moment, the eventual consistency is maintained. -use crate::{ - peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}, - PeerId, -}; +use crate::peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}; use futures::{channel::oneshot, future::Either, FutureExt, StreamExt}; +use libp2p::PeerId; use log::{debug, error, trace, warn}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_arithmetic::traits::SaturatedConversion; @@ -860,8 +858,9 @@ mod tests { use super::*; use crate::{ peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT}, - PeerId, ReputationChange, + ReputationChange, }; + use libp2p::PeerId; use sc_network_common::role::ObservedRole; use sc_utils::mpsc::{tracing_unbounded, TryRecvError}; use std::collections::HashSet; diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 807c5b5a80afa682ef07b9ba2326a528b5d1c8f5..27de12bc1ec9a50ca8c5773de8a6bdbda61d35ab 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -55,24 +55,26 @@ use crate::{ }, transport, types::ProtocolName, - Multiaddr, NotificationService, PeerId, ReputationChange, + NotificationService, ReputationChange, }; use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; +use libp2p::identity::ed25519; #[allow(deprecated)] use libp2p::{ connection_limits::Exceeded, core::{upgrade, ConnectedPoint, Endpoint}, identify::Info as IdentifyInfo, kad::record::Key as KademliaKey, - multiaddr, + multiaddr::{self, Multiaddr}, ping::Failure as PingFailure, swarm::{ AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor, ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr, }, + PeerId, }; use log::{debug, error, info, trace, warn}; use metrics::{Histogram, MetricSources, Metrics}; @@ -269,6 +271,15 @@ where let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); + // Convert to libp2p types. + let local_identity: ed25519::Keypair = local_identity.into(); + let local_public: ed25519::PublicKey = local_public.into(); + let local_peer_id: PeerId = local_peer_id.into(); + let listen_addresses: Vec = + network_config.listen_addresses.iter().cloned().map(Into::into).collect(); + let public_addresses: Vec = + network_config.public_addresses.iter().cloned().map(Into::into).collect(); + network_config.boot_nodes = network_config .boot_nodes .into_iter() @@ -370,7 +381,7 @@ where }; transport::build_transport( - local_identity.clone(), + local_identity.clone().into(), config_mem, network_config.yamux_window_size, yamux_maximum_buffer_size, @@ -462,7 +473,7 @@ where .find(|o| o.peer_id != bootnode.peer_id) { Err(Error::DuplicateBootnode { - address: bootnode.multiaddr.clone(), + address: bootnode.multiaddr.clone().into(), first_id: bootnode.peer_id.into(), second_id: other.peer_id.into(), }) @@ -478,7 +489,7 @@ where boot_node_ids .entry(bootnode.peer_id.into()) .or_default() - .push(bootnode.multiaddr.clone()); + .push(bootnode.multiaddr.clone().into()); } let boot_node_ids = Arc::new(boot_node_ids); @@ -502,11 +513,11 @@ where format!("{} ({})", network_config.client_version, network_config.node_name); let discovery_config = { - let mut config = DiscoveryConfig::new(local_public.to_peer_id()); + let mut config = DiscoveryConfig::new(local_peer_id); config.with_permanent_addresses( known_addresses .iter() - .map(|(peer, address)| (peer.into(), address.clone())) + .map(|(peer, address)| (peer.into(), address.clone().into())) .collect::>(), ); config.discovery_limit(u64::from(network_config.default_peers_set.out_peers) + 15); @@ -544,7 +555,7 @@ where let result = Behaviour::new( protocol, user_agent, - local_public, + local_public.into(), discovery_config, request_response_protocols, Arc::clone(&peer_store_handle), @@ -581,7 +592,7 @@ where crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, )), ) - .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) + .substream_upgrade_protocol_override(upgrade::Version::V1) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) // NOTE: 24 is somewhat arbitrary and should be tuned in the future if necessary. // See @@ -604,14 +615,14 @@ where }; // Listen on multiaddresses. - for addr in &network_config.listen_addresses { + for addr in &listen_addresses { if let Err(err) = Swarm::>::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. - for addr in &network_config.public_addresses { + for addr in &public_addresses { Swarm::>::add_external_address( &mut swarm, addr.clone(), @@ -619,15 +630,15 @@ where ); } - let listen_addresses = Arc::new(Mutex::new(HashSet::new())); + let listen_addresses_set = Arc::new(Mutex::new(HashSet::new())); let service = Arc::new(NetworkService { bandwidth, external_addresses, - listen_addresses: listen_addresses.clone(), + listen_addresses: listen_addresses_set.clone(), num_connected: num_connected.clone(), local_peer_id, - local_identity, + local_identity: local_identity.into(), to_worker, notification_protocol_ids, protocol_handles, @@ -638,7 +649,7 @@ where }); Ok(NetworkWorker { - listen_addresses, + listen_addresses: listen_addresses_set, num_connected, network_service: swarm, service, @@ -880,13 +891,13 @@ where H: ExHashT, { /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().iter().cloned().collect() + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().iter().cloned().map(Into::into).collect() } /// Returns the listener addresses (without trailing `/p2p/` with our `PeerId`). - fn listen_addresses(&self) -> Vec { - self.listen_addresses.lock().iter().cloned().collect() + fn listen_addresses(&self) -> Vec { + self.listen_addresses.lock().iter().cloned().map(Into::into).collect() } /// Returns the local Peer ID. @@ -998,10 +1009,14 @@ where self.sync_protocol_handle.set_reserved_only(reserved_only); } - fn add_known_address(&self, peer_id: sc_network_types::PeerId, addr: Multiaddr) { + fn add_known_address( + &self, + peer_id: sc_network_types::PeerId, + addr: sc_network_types::multiaddr::Multiaddr, + ) { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr.into())); } fn report_peer(&self, peer_id: sc_network_types::PeerId, cost_benefit: ReputationChange) { @@ -1034,7 +1049,7 @@ where let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddKnownAddress( peer.peer_id.into(), - peer.multiaddr, + peer.multiaddr.into(), )); self.sync_protocol_handle.add_reserved_peer(peer.peer_id.into()); @@ -1048,16 +1063,16 @@ where fn set_reserved_peers( &self, protocol: ProtocolName, - peers: HashSet, + peers: HashSet, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { return Err(format!("Cannot set reserved peers for unknown protocol: {}", protocol)) }; + let peers: HashSet = peers.into_iter().map(Into::into).collect(); let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; - let mut peers: HashSet = - HashSet::with_capacity(peers_addrs.len()); + let mut peers: HashSet = HashSet::with_capacity(peers_addrs.len()); for (peer_id, addr) in peers_addrs.into_iter() { // Make sure the local peer ID is never added to the PSM. @@ -1074,8 +1089,7 @@ where } } - self.protocol_handles[usize::from(*set_id)] - .set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect()); + self.protocol_handles[usize::from(*set_id)].set_reserved_peers(peers); Ok(()) } @@ -1083,7 +1097,7 @@ where fn add_peers_to_reserved_set( &self, protocol: ProtocolName, - peers: HashSet, + peers: HashSet, ) -> Result<(), String> { let Some(set_id) = self.notification_protocol_ids.get(&protocol) else { return Err(format!( @@ -1092,6 +1106,7 @@ where )) }; + let peers: HashSet = peers.into_iter().map(Into::into).collect(); let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { @@ -1723,8 +1738,8 @@ where { if let DialError::WrongPeerId { obtained, endpoint } = &error { if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { - let address_without_peer_id = parse_addr(address.clone()) - .map_or_else(|_| address.clone(), |r| r.1); + let address_without_peer_id = parse_addr(address.clone().into()) + .map_or_else(|_| address.clone(), |r| r.1.into()); // Only report for address of boot node that was added at startup of // the node and not for any address that the node learned of the @@ -1860,14 +1875,14 @@ where } pub(crate) fn ensure_addresses_consistent_with_transport<'a>( - addresses: impl Iterator, + addresses: impl Iterator, transport: &TransportConfig, ) -> Result<(), Error> { + use sc_network_types::multiaddr::Protocol; + if matches!(transport, TransportConfig::MemoryOnly) { let addresses: Vec<_> = addresses - .filter(|x| { - x.iter().any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - }) + .filter(|x| x.iter().any(|y| !matches!(y, Protocol::Memory(_)))) .cloned() .collect(); @@ -1879,7 +1894,7 @@ pub(crate) fn ensure_addresses_consistent_with_transport<'a>( } } else { let addresses: Vec<_> = addresses - .filter(|x| x.iter().any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))) + .filter(|x| x.iter().any(|y| matches!(y, Protocol::Memory(_)))) .cloned() .collect(); diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index 9bbaeb1026f9304d43d347893de5795fd0bb93b4..d1ea9a2ed568f7ee35416164e5e7f1a9d89d5c3b 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -28,7 +28,7 @@ use crate::{ request_responses::{IfDisconnected, RequestFailure}, service::{metrics::NotificationMetrics, signature::Signature, PeerStoreProvider}, types::ProtocolName, - Multiaddr, ReputationChange, + ReputationChange, }; use futures::{channel::oneshot, Stream}; @@ -36,7 +36,7 @@ use prometheus_endpoint::Registry; use sc_client_api::BlockBackend; use sc_network_common::{role::ObservedRole, ExHashT}; -use sc_network_types::PeerId; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration}; diff --git a/substrate/client/network/statement/Cargo.toml b/substrate/client/network/statement/Cargo.toml index bcfcf24864cff9492255e342b78b12215470ccc5..0dfaa491b65c93acb50f44bb002d655043317d99 100644 --- a/substrate/client/network/statement/Cargo.toml +++ b/substrate/client/network/statement/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" async-channel = "1.8.0" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index b25a3657b6abbd8d955905b00469e4ba19cdd513..964090444b22afdeca073ebc25d0be71817f1248 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -22,7 +22,7 @@ prost-build = "0.12.4" array-bytes = "6.2.2" async-channel = "1.8.0" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" futures-timer = "3.0.2" libp2p = "0.51.4" diff --git a/substrate/client/network/sync/src/block_announce_validator.rs b/substrate/client/network/sync/src/block_announce_validator.rs index 3c994dd69442a0a7b0f29820b8044f0308436c48..cb1d5ee6b22ee92104faa061fbc8958ce20d316d 100644 --- a/substrate/client/network/sync/src/block_announce_validator.rs +++ b/substrate/client/network/sync/src/block_announce_validator.rs @@ -156,7 +156,7 @@ impl BlockAnnounceValidator { return }, AllocateSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { - warn!( + debug!( target: LOG_TARGET, "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots for this peer are occupied.", number, diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs index 2e7e12af53d5373f4335361a108cd3980c7c3dcb..141edc7c884144455e43f5b7c5b2b62ae246bbab 100644 --- a/substrate/client/network/sync/src/service/mock.rs +++ b/substrate/client/network/sync/src/service/mock.rs @@ -23,10 +23,10 @@ use sc_network::{ config::MultiaddrWithPeerId, request_responses::{IfDisconnected, RequestFailure}, types::ProtocolName, - Multiaddr, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, ReputationChange, + NetworkPeers, NetworkRequest, NetworkSyncForkRequest, ReputationChange, }; use sc_network_common::role::ObservedRole; -use sc_network_types::PeerId; +use sc_network_types::{multiaddr::Multiaddr, PeerId}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; diff --git a/substrate/client/network/test/src/fuzz.rs b/substrate/client/network/test/src/fuzz.rs index 69d08d47d26a9ea4368ffdfea456711345d15d8b..b0cd6dcf999391a0fedd5a29a38b89d370e25771 100644 --- a/substrate/client/network/test/src/fuzz.rs +++ b/substrate/client/network/test/src/fuzz.rs @@ -20,6 +20,7 @@ //! and `PeerStore` to discover possible inconsistencies in peer management. use futures::prelude::*; +use libp2p::PeerId; use rand::{ distributions::{Distribution, Uniform, WeightedIndex}, seq::IteratorRandom, @@ -27,7 +28,7 @@ use rand::{ use sc_network::{ peer_store::{PeerStore, PeerStoreProvider}, protocol_controller::{IncomingIndex, Message, ProtoSetConfig, ProtocolController, SetId}, - PeerId, ReputationChange, + ReputationChange, }; use sc_utils::mpsc::tracing_unbounded; use std::{ diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 48a4b3d6e6e16a4281c6e504d1c0d9511d2eb677..8a8f9608051af0bcb523b405585ad7c0b81b4ae2 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -35,7 +35,7 @@ use std::{ }; use futures::{channel::oneshot, future::BoxFuture, pin_mut, prelude::*}; -use libp2p::{build_multiaddr, PeerId}; +use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use sc_block_builder::{BlockBuilder, BlockBuilderBuilder}; @@ -57,8 +57,8 @@ use sc_network::{ peer_store::PeerStore, request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, - Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, - NetworkWorker, NotificationMetrics, NotificationService, + NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, NetworkWorker, + NotificationMetrics, NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -71,6 +71,7 @@ use sc_network_sync::{ }, warp_request_handler, }; +use sc_network_types::{build_multiaddr, multiaddr::Multiaddr}; use sc_service::client::Client; use sp_blockchain::{ Backend as BlockchainBackend, HeaderBackend, Info as BlockchainInfo, Result as ClientResult, @@ -985,7 +986,7 @@ pub trait TestNetFactory: Default + Sized + Send { for peer in peers.iter_mut() { peer.network.add_known_address( network.service().local_peer_id().into(), - listen_addr.clone(), + listen_addr.clone().into(), ); } diff --git a/substrate/client/network/transactions/Cargo.toml b/substrate/client/network/transactions/Cargo.toml index 7510db808f4c198ea18e313b5b13f8466ead6cc8..d871b59b37bb120783aa2e27a23aa9ee814b3ddf 100644 --- a/substrate/client/network/transactions/Cargo.toml +++ b/substrate/client/network/transactions/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } futures = "0.3.30" libp2p = "0.51.4" log = { workspace = true, default-features = true } diff --git a/substrate/client/network/types/Cargo.toml b/substrate/client/network/types/Cargo.toml index 8815ccdca3c02c07815dcc699258cac90429c260..a9334aaa1705987a35322921616188113f011ed8 100644 --- a/substrate/client/network/types/Cargo.toml +++ b/substrate/client/network/types/Cargo.toml @@ -11,9 +11,14 @@ documentation = "https://docs.rs/sc-network-types" [dependencies] bs58 = "0.5.0" +ed25519-dalek = "2.1" libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } -litep2p = { git = "https://github.com/paritytech/litep2p", rev = "e03a6023882db111beeb24d8c0ceaac0721d3f0f" } +litep2p = "0.5.0" multiaddr = "0.17.0" multihash = { version = "0.17.0", default-features = false, features = ["identity", "multihash-impl", "sha2", "std"] } rand = "0.8.5" thiserror = "1.0.48" +zeroize = { version = "1.7.0", default-features = false } + +[dev-dependencies] +quickcheck = "1.0.3" diff --git a/substrate/client/network/types/src/ed25519.rs b/substrate/client/network/types/src/ed25519.rs new file mode 100644 index 0000000000000000000000000000000000000000..e85f405b13066b8be1eed82f8ca783aa62c777d9 --- /dev/null +++ b/substrate/client/network/types/src/ed25519.rs @@ -0,0 +1,551 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Ed25519 keys. + +use crate::PeerId; +use core::{cmp, fmt, hash}; +use ed25519_dalek::{self as ed25519, Signer as _, Verifier as _}; +use libp2p_identity::ed25519 as libp2p_ed25519; +use litep2p::crypto::ed25519 as litep2p_ed25519; +use zeroize::Zeroize; + +/// An Ed25519 keypair. +#[derive(Clone)] +pub struct Keypair(ed25519::SigningKey); + +impl Keypair { + /// Generate a new random Ed25519 keypair. + pub fn generate() -> Keypair { + Keypair::from(SecretKey::generate()) + } + + /// Convert the keypair into a byte array by concatenating the bytes + /// of the secret scalar and the compressed public point, + /// an informal standard for encoding Ed25519 keypairs. + pub fn to_bytes(&self) -> [u8; 64] { + self.0.to_keypair_bytes() + } + + /// Try to parse a keypair from the [binary format](https://datatracker.ietf.org/doc/html/rfc8032#section-5.1.5) + /// produced by [`Keypair::to_bytes`], zeroing the input on success. + /// + /// Note that this binary format is the same as `ed25519_dalek`'s and `ed25519_zebra`'s. + pub fn try_from_bytes(kp: &mut [u8]) -> Result { + let bytes = <[u8; 64]>::try_from(&*kp) + .map_err(|e| DecodingError::KeypairParseError(Box::new(e)))?; + + ed25519::SigningKey::from_keypair_bytes(&bytes) + .map(|k| { + kp.zeroize(); + Keypair(k) + }) + .map_err(|e| DecodingError::KeypairParseError(Box::new(e))) + } + + /// Sign a message using the private key of this keypair. + pub fn sign(&self, msg: &[u8]) -> Vec { + self.0.sign(msg).to_bytes().to_vec() + } + + /// Get the public key of this keypair. + pub fn public(&self) -> PublicKey { + PublicKey(self.0.verifying_key()) + } + + /// Get the secret key of this keypair. + pub fn secret(&self) -> SecretKey { + SecretKey(self.0.to_bytes()) + } +} + +impl fmt::Debug for Keypair { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Keypair").field("public", &self.0.verifying_key()).finish() + } +} + +impl From for Keypair { + fn from(kp: litep2p_ed25519::Keypair) -> Self { + Self::try_from_bytes(&mut kp.encode()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for litep2p_ed25519::Keypair { + fn from(kp: Keypair) -> Self { + Self::decode(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for Keypair { + fn from(kp: libp2p_ed25519::Keypair) -> Self { + Self::try_from_bytes(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +impl From for libp2p_ed25519::Keypair { + fn from(kp: Keypair) -> Self { + Self::try_from_bytes(&mut kp.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +/// Demote an Ed25519 keypair to a secret key. +impl From for SecretKey { + fn from(kp: Keypair) -> SecretKey { + SecretKey(kp.0.to_bytes()) + } +} + +/// Promote an Ed25519 secret key into a keypair. +impl From for Keypair { + fn from(sk: SecretKey) -> Keypair { + let signing = ed25519::SigningKey::from_bytes(&sk.0); + Keypair(signing) + } +} + +/// An Ed25519 public key. +#[derive(Eq, Clone)] +pub struct PublicKey(ed25519::VerifyingKey); + +impl fmt::Debug for PublicKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("PublicKey(compressed): ")?; + for byte in self.0.as_bytes() { + write!(f, "{byte:x}")?; + } + Ok(()) + } +} + +impl cmp::PartialEq for PublicKey { + fn eq(&self, other: &Self) -> bool { + self.0.as_bytes().eq(other.0.as_bytes()) + } +} + +impl hash::Hash for PublicKey { + fn hash(&self, state: &mut H) { + self.0.as_bytes().hash(state); + } +} + +impl cmp::PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for PublicKey { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + +impl PublicKey { + /// Verify the Ed25519 signature on a message using the public key. + pub fn verify(&self, msg: &[u8], sig: &[u8]) -> bool { + ed25519::Signature::try_from(sig).and_then(|s| self.0.verify(msg, &s)).is_ok() + } + + /// Convert the public key to a byte array in compressed form, i.e. + /// where one coordinate is represented by a single bit. + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + + /// Try to parse a public key from a byte array containing the actual key as produced by + /// `to_bytes`. + pub fn try_from_bytes(k: &[u8]) -> Result { + let k = + <[u8; 32]>::try_from(k).map_err(|e| DecodingError::PublicKeyParseError(Box::new(e)))?; + ed25519::VerifyingKey::from_bytes(&k) + .map_err(|e| DecodingError::PublicKeyParseError(Box::new(e))) + .map(PublicKey) + } + + /// Convert public key to `PeerId`. + pub fn to_peer_id(&self) -> PeerId { + litep2p::PeerId::from(litep2p::crypto::PublicKey::Ed25519(self.clone().into())).into() + } +} + +impl From for PublicKey { + fn from(k: litep2p_ed25519::PublicKey) -> Self { + Self::try_from_bytes(&k.encode()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for litep2p_ed25519::PublicKey { + fn from(k: PublicKey) -> Self { + Self::decode(&k.to_bytes()) + .expect("ed25519_dalek in substrate & litep2p to use the same format") + } +} + +impl From for PublicKey { + fn from(k: libp2p_ed25519::PublicKey) -> Self { + Self::try_from_bytes(&k.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +impl From for libp2p_ed25519::PublicKey { + fn from(k: PublicKey) -> Self { + Self::try_from_bytes(&k.to_bytes()) + .expect("ed25519_dalek in substrate & libp2p to use the same format") + } +} + +/// An Ed25519 secret key. +#[derive(Clone)] +pub struct SecretKey(ed25519::SecretKey); + +/// View the bytes of the secret key. +impl AsRef<[u8]> for SecretKey { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl fmt::Debug for SecretKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SecretKey") + } +} + +impl SecretKey { + /// Generate a new Ed25519 secret key. + pub fn generate() -> SecretKey { + let signing = ed25519::SigningKey::generate(&mut rand::rngs::OsRng); + SecretKey(signing.to_bytes()) + } + + /// Try to parse an Ed25519 secret key from a byte slice + /// containing the actual key, zeroing the input on success. + /// If the bytes do not constitute a valid Ed25519 secret key, an error is + /// returned. + pub fn try_from_bytes(mut sk_bytes: impl AsMut<[u8]>) -> Result { + let sk_bytes = sk_bytes.as_mut(); + let secret = <[u8; 32]>::try_from(&*sk_bytes) + .map_err(|e| DecodingError::SecretKeyParseError(Box::new(e)))?; + sk_bytes.zeroize(); + Ok(SecretKey(secret)) + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } +} + +impl Drop for SecretKey { + fn drop(&mut self) { + self.0.zeroize(); + } +} + +impl From for SecretKey { + fn from(sk: litep2p_ed25519::SecretKey) -> Self { + Self::try_from_bytes(&mut sk.to_bytes()).expect("Ed25519 key to be 32 bytes length") + } +} + +impl From for litep2p_ed25519::SecretKey { + fn from(sk: SecretKey) -> Self { + Self::from_bytes(&mut sk.to_bytes()) + .expect("litep2p `SecretKey` to accept 32 bytes as Ed25519 key") + } +} + +impl From for SecretKey { + fn from(sk: libp2p_ed25519::SecretKey) -> Self { + Self::try_from_bytes(&mut sk.as_ref().to_owned()) + .expect("Ed25519 key to be 32 bytes length") + } +} + +impl From for libp2p_ed25519::SecretKey { + fn from(sk: SecretKey) -> Self { + Self::try_from_bytes(&mut sk.to_bytes()) + .expect("libp2p `SecretKey` to accept 32 bytes as Ed25519 key") + } +} + +/// Error when decoding `ed25519`-related types. +#[derive(Debug, thiserror::Error)] +pub enum DecodingError { + #[error("failed to parse Ed25519 keypair: {0}")] + KeypairParseError(Box), + #[error("failed to parse Ed25519 secret key: {0}")] + SecretKeyParseError(Box), + #[error("failed to parse Ed25519 public key: {0}")] + PublicKeyParseError(Box), +} + +#[cfg(test)] +mod tests { + use super::*; + use quickcheck::*; + + fn eq_keypairs(kp1: &Keypair, kp2: &Keypair) -> bool { + kp1.public() == kp2.public() && kp1.0.to_bytes() == kp2.0.to_bytes() + } + + #[test] + fn ed25519_keypair_encode_decode() { + fn prop() -> bool { + let kp1 = Keypair::generate(); + let mut kp1_enc = kp1.to_bytes(); + let kp2 = Keypair::try_from_bytes(&mut kp1_enc).unwrap(); + eq_keypairs(&kp1, &kp2) && kp1_enc.iter().all(|b| *b == 0) + } + QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); + } + + #[test] + fn ed25519_keypair_from_secret() { + fn prop() -> bool { + let kp1 = Keypair::generate(); + let mut sk = kp1.0.to_bytes(); + let kp2 = Keypair::from(SecretKey::try_from_bytes(&mut sk).unwrap()); + eq_keypairs(&kp1, &kp2) && sk == [0u8; 32] + } + QuickCheck::new().tests(10).quickcheck(prop as fn() -> _); + } + + #[test] + fn ed25519_signature() { + let kp = Keypair::generate(); + let pk = kp.public(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + assert!(pk.verify(msg, &sig)); + + let mut invalid_sig = sig.clone(); + invalid_sig[3..6].copy_from_slice(&[10, 23, 42]); + assert!(!pk.verify(msg, &invalid_sig)); + + let invalid_msg = "h3ll0 w0rld".as_bytes(); + assert!(!pk.verify(invalid_msg, &sig)); + } + + #[test] + fn substrate_kp_to_libs() { + let kp = Keypair::generate(); + let kp_bytes = kp.to_bytes(); + let kp1: libp2p_ed25519::Keypair = kp.clone().into(); + let kp2: litep2p_ed25519::Keypair = kp.clone().into(); + let kp3 = libp2p_ed25519::Keypair::try_from_bytes(&mut kp_bytes.clone()).unwrap(); + let kp4 = litep2p_ed25519::Keypair::decode(&mut kp_bytes.clone()).unwrap(); + + assert_eq!(kp_bytes, kp1.to_bytes()); + assert_eq!(kp_bytes, kp2.encode()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + let sig3 = kp3.sign(msg); + let sig4 = kp4.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + assert_eq!(sig, sig3); + assert_eq!(sig, sig4); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + let pk3 = kp3.public(); + let pk4 = kp4.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + assert!(pk3.verify(msg, &sig)); + assert!(pk4.verify(msg, &sig)); + } + + #[test] + fn litep2p_kp_to_substrate_kp() { + let kp = litep2p_ed25519::Keypair::generate(); + let kp1: Keypair = kp.clone().into(); + let kp2 = Keypair::try_from_bytes(&mut kp.encode()).unwrap(); + + assert_eq!(kp.encode(), kp1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn libp2p_kp_to_substrate_kp() { + let kp = libp2p_ed25519::Keypair::generate(); + let kp1: Keypair = kp.clone().into(); + let kp2 = Keypair::try_from_bytes(&mut kp.to_bytes()).unwrap(); + + assert_eq!(kp.to_bytes(), kp1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + let sig1 = kp1.sign(msg); + let sig2 = kp2.sign(msg); + + assert_eq!(sig, sig1); + assert_eq!(sig, sig2); + + let pk1 = kp1.public(); + let pk2 = kp2.public(); + + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn substrate_pk_to_libs() { + let kp = Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.to_bytes(); + let pk1: libp2p_ed25519::PublicKey = pk.clone().into(); + let pk2: litep2p_ed25519::PublicKey = pk.clone().into(); + let pk3 = libp2p_ed25519::PublicKey::try_from_bytes(&pk_bytes).unwrap(); + let pk4 = litep2p_ed25519::PublicKey::decode(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + assert_eq!(pk_bytes, pk2.encode()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + assert!(pk3.verify(msg, &sig)); + assert!(pk4.verify(msg, &sig)); + } + + #[test] + fn litep2p_pk_to_substrate_pk() { + let kp = litep2p_ed25519::Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.clone().encode(); + let pk1: PublicKey = pk.clone().into(); + let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn libp2p_pk_to_substrate_pk() { + let kp = libp2p_ed25519::Keypair::generate(); + let pk = kp.public(); + let pk_bytes = pk.clone().to_bytes(); + let pk1: PublicKey = pk.clone().into(); + let pk2 = PublicKey::try_from_bytes(&pk_bytes).unwrap(); + + assert_eq!(pk_bytes, pk1.to_bytes()); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert!(pk.verify(msg, &sig)); + assert!(pk1.verify(msg, &sig)); + assert!(pk2.verify(msg, &sig)); + } + + #[test] + fn substrate_sk_to_libs() { + let sk = SecretKey::generate(); + let sk_bytes = sk.to_bytes(); + let sk1: libp2p_ed25519::SecretKey = sk.clone().into(); + let sk2: litep2p_ed25519::SecretKey = sk.clone().into(); + let sk3 = libp2p_ed25519::SecretKey::try_from_bytes(&mut sk_bytes.clone()).unwrap(); + let sk4 = litep2p_ed25519::SecretKey::from_bytes(&mut sk_bytes.clone()).unwrap(); + + let kp: Keypair = sk.into(); + let kp1: libp2p_ed25519::Keypair = sk1.into(); + let kp2: litep2p_ed25519::Keypair = sk2.into(); + let kp3: libp2p_ed25519::Keypair = sk3.into(); + let kp4: litep2p_ed25519::Keypair = sk4.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + assert_eq!(sig, kp3.sign(msg)); + assert_eq!(sig, kp4.sign(msg)); + } + + #[test] + fn litep2p_sk_to_substrate_sk() { + let sk = litep2p_ed25519::SecretKey::generate(); + let sk1: SecretKey = sk.clone().into(); + let sk2 = SecretKey::try_from_bytes(&mut sk.to_bytes()).unwrap(); + + let kp: litep2p_ed25519::Keypair = sk.into(); + let kp1: Keypair = sk1.into(); + let kp2: Keypair = sk2.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + } + + #[test] + fn libp2p_sk_to_substrate_sk() { + let sk = libp2p_ed25519::SecretKey::generate(); + let sk_bytes = sk.as_ref().to_owned(); + let sk1: SecretKey = sk.clone().into(); + let sk2 = SecretKey::try_from_bytes(sk_bytes).unwrap(); + + let kp: libp2p_ed25519::Keypair = sk.into(); + let kp1: Keypair = sk1.into(); + let kp2: Keypair = sk2.into(); + + let msg = "hello world".as_bytes(); + let sig = kp.sign(msg); + + assert_eq!(sig, kp1.sign(msg)); + assert_eq!(sig, kp2.sign(msg)); + } +} diff --git a/substrate/client/network/types/src/lib.rs b/substrate/client/network/types/src/lib.rs index 9a126c48c7eab66ab39dd55a081a3a938aa16b92..5684e38ab2e8474e171a24260a63c22fcc30694c 100644 --- a/substrate/client/network/types/src/lib.rs +++ b/substrate/client/network/types/src/lib.rs @@ -13,6 +13,12 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -mod peer_id; +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod ed25519; +pub mod multiaddr; +pub mod multihash; +mod peer_id; pub use peer_id::PeerId; diff --git a/substrate/client/network/types/src/multiaddr.rs b/substrate/client/network/types/src/multiaddr.rs new file mode 100644 index 0000000000000000000000000000000000000000..312bef9baab1254a963d5d886994d0812ea93382 --- /dev/null +++ b/substrate/client/network/types/src/multiaddr.rs @@ -0,0 +1,251 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use litep2p::types::multiaddr::{ + Error as LiteP2pError, Iter as LiteP2pIter, Multiaddr as LiteP2pMultiaddr, + Protocol as LiteP2pProtocol, +}; +use std::{ + fmt::{self, Debug, Display}, + str::FromStr, +}; + +mod protocol; +pub use protocol::Protocol; + +// Re-export the macro under shorter name under `multiaddr`. +pub use crate::build_multiaddr as multiaddr; + +/// [`Multiaddr`] type used in Substrate. Converted to libp2p's `Multiaddr` +/// or litep2p's `Multiaddr` when passed to the corresponding network backend. + +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +pub struct Multiaddr { + multiaddr: LiteP2pMultiaddr, +} + +impl Multiaddr { + /// Create a new, empty multiaddress. + pub fn empty() -> Self { + Self { multiaddr: LiteP2pMultiaddr::empty() } + } + + /// Adds an address component to the end of this multiaddr. + pub fn push(&mut self, p: Protocol<'_>) { + self.multiaddr.push(p.into()) + } + + /// Pops the last `Protocol` of this multiaddr, or `None` if the multiaddr is empty. + pub fn pop<'a>(&mut self) -> Option> { + self.multiaddr.pop().map(Into::into) + } + + /// Like [`Multiaddr::push`] but consumes `self`. + pub fn with(self, p: Protocol<'_>) -> Self { + self.multiaddr.with(p.into()).into() + } + + /// Returns the components of this multiaddress. + pub fn iter(&self) -> Iter<'_> { + self.multiaddr.iter().into() + } + + /// Return a copy of this [`Multiaddr`]'s byte representation. + pub fn to_vec(&self) -> Vec { + self.multiaddr.to_vec() + } +} + +impl Display for Multiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Display::fmt(&self.multiaddr, f) + } +} + +/// Remove an extra layer of nestedness by deferring to the wrapped value's [`Debug`]. +impl Debug for Multiaddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.multiaddr, f) + } +} + +impl AsRef<[u8]> for Multiaddr { + fn as_ref(&self) -> &[u8] { + self.multiaddr.as_ref() + } +} + +impl From for Multiaddr { + fn from(multiaddr: LiteP2pMultiaddr) -> Self { + Self { multiaddr } + } +} + +impl From for LiteP2pMultiaddr { + fn from(multiaddr: Multiaddr) -> Self { + multiaddr.multiaddr + } +} + +impl TryFrom> for Multiaddr { + type Error = ParseError; + + fn try_from(v: Vec) -> Result { + let multiaddr = LiteP2pMultiaddr::try_from(v)?; + Ok(Self { multiaddr }) + } +} + +/// Error when parsing a [`Multiaddr`] from string. +#[derive(Debug, thiserror::Error)] +pub enum ParseError { + /// Less data provided than indicated by length. + #[error("less data than indicated by length")] + DataLessThanLen, + /// Invalid multiaddress. + #[error("invalid multiaddress")] + InvalidMultiaddr, + /// Invalid protocol specification. + #[error("invalid protocol string")] + InvalidProtocolString, + /// Unknown protocol string identifier. + #[error("unknown protocol '{0}'")] + UnknownProtocolString(String), + /// Unknown protocol numeric id. + #[error("unknown protocol id {0}")] + UnknownProtocolId(u32), + /// Failed to decode unsigned varint. + #[error("failed to decode unsigned varint: {0}")] + InvalidUvar(Box), + /// Other error emitted when parsing into the wrapped type. + #[error("multiaddr parsing error: {0}")] + ParsingError(Box), +} + +impl From for ParseError { + fn from(error: LiteP2pError) -> Self { + match error { + LiteP2pError::DataLessThanLen => ParseError::DataLessThanLen, + LiteP2pError::InvalidMultiaddr => ParseError::InvalidMultiaddr, + LiteP2pError::InvalidProtocolString => ParseError::InvalidProtocolString, + LiteP2pError::UnknownProtocolString(s) => ParseError::UnknownProtocolString(s), + LiteP2pError::UnknownProtocolId(n) => ParseError::UnknownProtocolId(n), + LiteP2pError::InvalidUvar(e) => ParseError::InvalidUvar(Box::new(e)), + LiteP2pError::ParsingError(e) => ParseError::ParsingError(e), + error => ParseError::ParsingError(Box::new(error)), + } + } +} + +impl FromStr for Multiaddr { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + let multiaddr = LiteP2pMultiaddr::from_str(s)?; + Ok(Self { multiaddr }) + } +} + +impl TryFrom for Multiaddr { + type Error = ParseError; + + fn try_from(s: String) -> Result { + Self::from_str(&s) + } +} + +impl<'a> TryFrom<&'a str> for Multiaddr { + type Error = ParseError; + + fn try_from(s: &'a str) -> Result { + Self::from_str(s) + } +} + +/// Iterator over `Multiaddr` [`Protocol`]s. +pub struct Iter<'a>(LiteP2pIter<'a>); + +impl<'a> Iterator for Iter<'a> { + type Item = Protocol<'a>; + + fn next(&mut self) -> Option { + self.0.next().map(Into::into) + } +} + +impl<'a> From> for Iter<'a> { + fn from(iter: LiteP2pIter<'a>) -> Self { + Self(iter) + } +} + +impl<'a> IntoIterator for &'a Multiaddr { + type Item = Protocol<'a>; + type IntoIter = Iter<'a>; + + fn into_iter(self) -> Iter<'a> { + self.multiaddr.into_iter().into() + } +} + +impl<'a> FromIterator> for Multiaddr { + fn from_iter(iter: T) -> Self + where + T: IntoIterator>, + { + LiteP2pMultiaddr::from_iter(iter.into_iter().map(Into::into)).into() + } +} + +impl<'a> From> for Multiaddr { + fn from(p: Protocol<'a>) -> Multiaddr { + let protocol: LiteP2pProtocol = p.into(); + let multiaddr: LiteP2pMultiaddr = protocol.into(); + multiaddr.into() + } +} + +/// Easy way for a user to create a `Multiaddr`. +/// +/// Example: +/// +/// ```rust +/// use sc_network_types::build_multiaddr; +/// let addr = build_multiaddr!(Ip4([127, 0, 0, 1]), Tcp(10500u16)); +/// ``` +/// +/// Each element passed to `multiaddr!` should be a variant of the `Protocol` enum. The +/// optional parameter is turned into the proper type with the `Into` trait. +/// +/// For example, `Ip4([127, 0, 0, 1])` works because `Ipv4Addr` implements `From<[u8; 4]>`. +#[macro_export] +macro_rules! build_multiaddr { + ($($comp:ident $(($param:expr))*),+) => { + { + use std::iter; + let elem = iter::empty::<$crate::multiaddr::Protocol>(); + $( + let elem = { + let cmp = $crate::multiaddr::Protocol::$comp $(( $param.into() ))*; + elem.chain(iter::once(cmp)) + }; + )+ + elem.collect::<$crate::multiaddr::Multiaddr>() + } + } +} diff --git a/substrate/client/network/types/src/multiaddr/protocol.rs b/substrate/client/network/types/src/multiaddr/protocol.rs new file mode 100644 index 0000000000000000000000000000000000000000..800d08fe36bd657b855869bcb69a808961e32a5a --- /dev/null +++ b/substrate/client/network/types/src/multiaddr/protocol.rs @@ -0,0 +1,138 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::multihash::Multihash; +use litep2p::types::multiaddr::Protocol as LiteP2pProtocol; +use std::{ + borrow::Cow, + net::{Ipv4Addr, Ipv6Addr}, +}; + +/// [`Protocol`] describes all possible multiaddress protocols. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Protocol<'a> { + Dccp(u16), + Dns(Cow<'a, str>), + Dns4(Cow<'a, str>), + Dns6(Cow<'a, str>), + Dnsaddr(Cow<'a, str>), + Http, + Https, + Ip4(Ipv4Addr), + Ip6(Ipv6Addr), + P2pWebRtcDirect, + P2pWebRtcStar, + WebRTC, + Certhash(Multihash), + P2pWebSocketStar, + /// Contains the "port" to contact. Similar to TCP or UDP, 0 means "assign me a port". + Memory(u64), + Onion(Cow<'a, [u8; 10]>, u16), + Onion3(Cow<'a, [u8; 35]>, u16), + P2p(Multihash), + P2pCircuit, + Quic, + QuicV1, + Sctp(u16), + Tcp(u16), + Tls, + Noise, + Udp(u16), + Udt, + Unix(Cow<'a, str>), + Utp, + Ws(Cow<'a, str>), + Wss(Cow<'a, str>), +} + +impl<'a> From> for Protocol<'a> { + fn from(protocol: LiteP2pProtocol<'a>) -> Self { + match protocol { + LiteP2pProtocol::Dccp(port) => Protocol::Dccp(port), + LiteP2pProtocol::Dns(str) => Protocol::Dns(str), + LiteP2pProtocol::Dns4(str) => Protocol::Dns4(str), + LiteP2pProtocol::Dns6(str) => Protocol::Dns6(str), + LiteP2pProtocol::Dnsaddr(str) => Protocol::Dnsaddr(str), + LiteP2pProtocol::Http => Protocol::Http, + LiteP2pProtocol::Https => Protocol::Https, + LiteP2pProtocol::Ip4(ipv4_addr) => Protocol::Ip4(ipv4_addr), + LiteP2pProtocol::Ip6(ipv6_addr) => Protocol::Ip6(ipv6_addr), + LiteP2pProtocol::P2pWebRtcDirect => Protocol::P2pWebRtcDirect, + LiteP2pProtocol::P2pWebRtcStar => Protocol::P2pWebRtcStar, + LiteP2pProtocol::WebRTC => Protocol::WebRTC, + LiteP2pProtocol::Certhash(multihash) => Protocol::Certhash(multihash.into()), + LiteP2pProtocol::P2pWebSocketStar => Protocol::P2pWebSocketStar, + LiteP2pProtocol::Memory(port) => Protocol::Memory(port), + LiteP2pProtocol::Onion(str, port) => Protocol::Onion(str, port), + LiteP2pProtocol::Onion3(addr) => + Protocol::Onion3(Cow::Owned(*addr.hash()), addr.port()), + LiteP2pProtocol::P2p(multihash) => Protocol::P2p(multihash.into()), + LiteP2pProtocol::P2pCircuit => Protocol::P2pCircuit, + LiteP2pProtocol::Quic => Protocol::Quic, + LiteP2pProtocol::QuicV1 => Protocol::QuicV1, + LiteP2pProtocol::Sctp(port) => Protocol::Sctp(port), + LiteP2pProtocol::Tcp(port) => Protocol::Tcp(port), + LiteP2pProtocol::Tls => Protocol::Tls, + LiteP2pProtocol::Noise => Protocol::Noise, + LiteP2pProtocol::Udp(port) => Protocol::Udp(port), + LiteP2pProtocol::Udt => Protocol::Udt, + LiteP2pProtocol::Unix(str) => Protocol::Unix(str), + LiteP2pProtocol::Utp => Protocol::Utp, + LiteP2pProtocol::Ws(str) => Protocol::Ws(str), + LiteP2pProtocol::Wss(str) => Protocol::Wss(str), + } + } +} + +impl<'a> From> for LiteP2pProtocol<'a> { + fn from(protocol: Protocol<'a>) -> Self { + match protocol { + Protocol::Dccp(port) => LiteP2pProtocol::Dccp(port), + Protocol::Dns(str) => LiteP2pProtocol::Dns(str), + Protocol::Dns4(str) => LiteP2pProtocol::Dns4(str), + Protocol::Dns6(str) => LiteP2pProtocol::Dns6(str), + Protocol::Dnsaddr(str) => LiteP2pProtocol::Dnsaddr(str), + Protocol::Http => LiteP2pProtocol::Http, + Protocol::Https => LiteP2pProtocol::Https, + Protocol::Ip4(ipv4_addr) => LiteP2pProtocol::Ip4(ipv4_addr), + Protocol::Ip6(ipv6_addr) => LiteP2pProtocol::Ip6(ipv6_addr), + Protocol::P2pWebRtcDirect => LiteP2pProtocol::P2pWebRtcDirect, + Protocol::P2pWebRtcStar => LiteP2pProtocol::P2pWebRtcStar, + Protocol::WebRTC => LiteP2pProtocol::WebRTC, + Protocol::Certhash(multihash) => LiteP2pProtocol::Certhash(multihash.into()), + Protocol::P2pWebSocketStar => LiteP2pProtocol::P2pWebSocketStar, + Protocol::Memory(port) => LiteP2pProtocol::Memory(port), + Protocol::Onion(str, port) => LiteP2pProtocol::Onion(str, port), + Protocol::Onion3(str, port) => LiteP2pProtocol::Onion3((str.into_owned(), port).into()), + Protocol::P2p(multihash) => LiteP2pProtocol::P2p(multihash.into()), + Protocol::P2pCircuit => LiteP2pProtocol::P2pCircuit, + Protocol::Quic => LiteP2pProtocol::Quic, + Protocol::QuicV1 => LiteP2pProtocol::QuicV1, + Protocol::Sctp(port) => LiteP2pProtocol::Sctp(port), + Protocol::Tcp(port) => LiteP2pProtocol::Tcp(port), + Protocol::Tls => LiteP2pProtocol::Tls, + Protocol::Noise => LiteP2pProtocol::Noise, + Protocol::Udp(port) => LiteP2pProtocol::Udp(port), + Protocol::Udt => LiteP2pProtocol::Udt, + Protocol::Unix(str) => LiteP2pProtocol::Unix(str), + Protocol::Utp => LiteP2pProtocol::Utp, + Protocol::Ws(str) => LiteP2pProtocol::Ws(str), + Protocol::Wss(str) => LiteP2pProtocol::Wss(str), + } + } +} diff --git a/substrate/client/network/types/src/multihash.rs b/substrate/client/network/types/src/multihash.rs new file mode 100644 index 0000000000000000000000000000000000000000..91f5b6353a7187e72fb49b1731ed1be6d8319bff --- /dev/null +++ b/substrate/client/network/types/src/multihash.rs @@ -0,0 +1,192 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! [`Multihash`] implemenattion used by substrate. Currently it's a wrapper over +//! multihash used by litep2p, but it can be switched to other implementation if needed. + +use litep2p::types::multihash::{ + Code as LiteP2pCode, Error as LiteP2pError, Multihash as LiteP2pMultihash, MultihashDigest as _, +}; +use std::fmt::{self, Debug}; + +/// Default [`Multihash`] implementations. Only hashes used by substrate are defined. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Code { + /// Identity hasher. + Identity, + /// SHA-256 (32-byte hash size). + Sha2_256, +} + +impl Code { + /// Calculate digest using this [`Code`]'s hashing algorithm. + pub fn digest(&self, input: &[u8]) -> Multihash { + LiteP2pCode::from(*self).digest(input).into() + } +} + +/// Error generated when converting to [`Code`]. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Invalid multihash size. + #[error("invalid multihash size '{0}'")] + InvalidSize(u64), + /// The multihash code is not supported. + #[error("unsupported multihash code '{0:x}'")] + UnsupportedCode(u64), + /// Catch-all for other errors emitted when converting `u64` code to enum or parsing multihash + /// from bytes. Never generated as of multihash-0.17.0. + #[error("other error: {0}")] + Other(Box), +} + +impl From for Error { + fn from(error: LiteP2pError) -> Self { + match error { + LiteP2pError::InvalidSize(s) => Self::InvalidSize(s), + LiteP2pError::UnsupportedCode(c) => Self::UnsupportedCode(c), + e => Self::Other(Box::new(e)), + } + } +} + +impl From for LiteP2pCode { + fn from(code: Code) -> Self { + match code { + Code::Identity => LiteP2pCode::Identity, + Code::Sha2_256 => LiteP2pCode::Sha2_256, + } + } +} + +impl TryFrom for Code { + type Error = Error; + + fn try_from(code: LiteP2pCode) -> Result { + match code { + LiteP2pCode::Identity => Ok(Code::Identity), + LiteP2pCode::Sha2_256 => Ok(Code::Sha2_256), + _ => Err(Error::UnsupportedCode(code.into())), + } + } +} + +impl TryFrom for Code { + type Error = Error; + + fn try_from(code: u64) -> Result { + match LiteP2pCode::try_from(code) { + Ok(code) => code.try_into(), + Err(e) => Err(e.into()), + } + } +} + +impl From for u64 { + fn from(code: Code) -> Self { + LiteP2pCode::from(code).into() + } +} + +#[derive(Clone, Copy, Hash, PartialEq, Eq, Ord, PartialOrd)] +pub struct Multihash { + multihash: LiteP2pMultihash, +} + +impl Multihash { + /// Multihash code. + pub fn code(&self) -> u64 { + self.multihash.code() + } + + /// Multihash digest. + pub fn digest(&self) -> &[u8] { + self.multihash.digest() + } + + /// Wraps the digest in a multihash. + pub fn wrap(code: u64, input_digest: &[u8]) -> Result { + LiteP2pMultihash::wrap(code, input_digest).map(Into::into).map_err(Into::into) + } + + /// Parses a multihash from bytes. + /// + /// You need to make sure the passed in bytes have the length of 64. + pub fn from_bytes(bytes: &[u8]) -> Result { + LiteP2pMultihash::from_bytes(bytes).map(Into::into).map_err(Into::into) + } + + /// Returns the bytes of a multihash. + pub fn to_bytes(&self) -> Vec { + self.multihash.to_bytes() + } +} + +/// Remove extra layer of nestedness by deferring to the wrapped value's [`Debug`]. +impl Debug for Multihash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.multihash, f) + } +} + +impl From for Multihash { + fn from(multihash: LiteP2pMultihash) -> Self { + Multihash { multihash } + } +} + +impl From for LiteP2pMultihash { + fn from(multihash: Multihash) -> Self { + multihash.multihash + } +} + +// TODO: uncomment this after upgrading `multihash` crate to v0.19.1. +// +// impl From> for Multihash { +// fn from(generic: multihash::MultihashGeneric<64>) -> Self { +// LiteP2pMultihash::wrap(generic.code(), generic.digest()) +// .expect("both have size 64; qed") +// .into() +// } +// } +// +// impl From for multihash::Multihash<64> { +// fn from(multihash: Multihash) -> Self { +// multihash::Multihash::<64>::wrap(multihash.code(), multihash.digest()) +// .expect("both have size 64; qed") +// } +// } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn code_from_u64() { + assert_eq!(Code::try_from(0x00).unwrap(), Code::Identity); + assert_eq!(Code::try_from(0x12).unwrap(), Code::Sha2_256); + assert!(matches!(Code::try_from(0x01).unwrap_err(), Error::UnsupportedCode(0x01))); + } + + #[test] + fn code_into_u64() { + assert_eq!(u64::from(Code::Identity), 0x00); + assert_eq!(u64::from(Code::Sha2_256), 0x12); + } +} diff --git a/substrate/client/network/types/src/peer_id.rs b/substrate/client/network/types/src/peer_id.rs index 14ac4a1e9aae8028e6197327a9fe7eec633e40b4..076be0a66c7b79ae09b4a31738a1869035181b86 100644 --- a/substrate/client/network/types/src/peer_id.rs +++ b/substrate/client/network/types/src/peer_id.rs @@ -16,8 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use multiaddr::{Multiaddr, Protocol}; -use multihash::{Code, Error, Multihash}; +use crate::{ + multiaddr::{Multiaddr, Protocol}, + multihash::{Code, Error, Multihash}, +}; use rand::Rng; use std::{fmt, hash::Hash, str::FromStr}; @@ -185,7 +187,7 @@ pub enum ParseError { #[error("unsupported multihash code '{0}'")] UnsupportedCode(u64), #[error("invalid multihash")] - InvalidMultihash(#[from] multihash::Error), + InvalidMultihash(#[from] crate::multihash::Error), } impl FromStr for PeerId { diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index c4d07ceec1afdc83eec538c71d0e45b5456d4df0..2944ff7f4f49dd879b35a1ba70b214824fbdd089 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" bytes = "1.1" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } fnv = "1.0.6" futures = "0.3.30" futures-timer = "3.0.2" diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index c5613662b9f2fb8d3827005778437d49dd9fe3cb..d8f833e2b8d45eb217fe6f4f9d835038e6167705 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index e1f799e337200fa7b1a8b8d747a58aa8807ba946..8977c842d03806c50372fc58452d5c35206ae163 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -30,7 +30,7 @@ sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } sc-utils = { path = "../utils" } sc-rpc = { path = "../rpc" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } thiserror = { workspace = true } serde = { workspace = true, default-features = true } hex = "0.4" diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 363d11235dda4e5bc8595df0303eb7951a0903da..b195e05b6649abb524c973b350b52833a34fd794 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -2487,6 +2487,7 @@ async fn follow_report_multiple_pruned_block() { client.finalize_block(block_3_hash, None).unwrap(); // Finalizing block 3 directly will also result in block 1 and 2 being finalized. + // It will also mark block 2 and block 3 from the fork as pruned. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ @@ -2494,7 +2495,7 @@ async fn follow_report_multiple_pruned_block() { format!("{:?}", block_2_hash), format!("{:?}", block_3_hash), ], - pruned_block_hashes: vec![], + pruned_block_hashes: vec![format!("{:?}", block_2_f_hash), format!("{:?}", block_3_f_hash)], }); assert_eq!(event, expected); @@ -2504,7 +2505,6 @@ async fn follow_report_multiple_pruned_block() { // ^^^ finalized // -> block 1 -> block 2_f -> block 3_f // - // Mark block 4 as finalized to force block 2_f and 3_f to get pruned. let block_4 = BlockBuilderBuilder::new(&*client) .on_parent_block(block_3.hash()) @@ -2535,11 +2535,11 @@ async fn follow_report_multiple_pruned_block() { }); assert_eq!(event, expected); - // Block 4 and 5 be reported as pruned, not just the stale head (block 5). + // Blocks from the fork were pruned earlier. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_4_hash)], - pruned_block_hashes: vec![format!("{:?}", block_2_f_hash), format!("{:?}", block_3_f_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); } @@ -3714,16 +3714,8 @@ async fn follow_unique_pruned_blocks() { // The chainHead will see block 5 as the best block. However, the // client will finalize the block 6, which is on another fork. // - // When the block 6 is finalized, block 2 block 3 block 4 and block 5 are placed on an invalid - // fork. However, pruning of blocks happens on level N - 1. - // Therefore, no pruned blocks are reported yet. + // When the block 6 is finalized all blocks from the stale forks (2, 3, 4, 5) are pruned. // - // When the block 7 is finalized, block 3 is detected as stale. At this step, block 2 and 3 - // are reported as pruned. - // - // When the block 8 is finalized, block 5 block 4 and block 2 are detected as stale. However, - // only blocks 5 and 4 are reported as pruned. This is because the block 2 was previously - // reported. // Initial setup steps: let block_1_hash = @@ -3776,16 +3768,33 @@ async fn follow_unique_pruned_blocks() { }); assert_eq!(event, expected); - // Block 2 must be reported as pruned, even if it was the previous best. - let event: FollowEvent = get_next_event(&mut sub).await; + // All blocks from stale forks are pruned when we finalize block 6. + let mut event: FollowEvent = get_next_event(&mut sub).await; + + // Sort pruned block hashes to counter flaky test caused by event generation (get_pruned_hashes) + if let FollowEvent::Finalized(Finalized { pruned_block_hashes, .. }) = &mut event { + pruned_block_hashes.sort(); + } + let expected_pruned_block_hashes = { + let mut hashes = vec![ + format!("{:?}", block_2_hash), + format!("{:?}", block_3_hash), + format!("{:?}", block_4_hash), + format!("{:?}", block_5_hash), + ]; + hashes.sort(); + hashes + }; + let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![ format!("{:?}", block_1_hash), format!("{:?}", block_2_f_hash), format!("{:?}", block_6_hash), ], - pruned_block_hashes: vec![], + pruned_block_hashes: expected_pruned_block_hashes, }); + assert_eq!(event, expected); // Pruned hash can be unpinned. @@ -3802,9 +3811,10 @@ async fn follow_unique_pruned_blocks() { client.finalize_block(block_7_hash, None).unwrap(); let event: FollowEvent = get_next_event(&mut sub).await; + // All necessary blocks were pruned on block 6 finalization. let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_7_hash)], - pruned_block_hashes: vec![format!("{:?}", block_2_hash), format!("{:?}", block_3_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); @@ -3815,10 +3825,11 @@ async fn follow_unique_pruned_blocks() { // Finalize the block 8. client.finalize_block(block_8_hash, None).unwrap(); + // All necessary blocks were pruned on block 6 finalization. let event: FollowEvent = get_next_event(&mut sub).await; let expected = FollowEvent::Finalized(Finalized { finalized_block_hashes: vec![format!("{:?}", block_8_hash)], - pruned_block_hashes: vec![format!("{:?}", block_4_hash), format!("{:?}", block_5_hash)], + pruned_block_hashes: vec![], }); assert_eq!(event, expected); } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index dff34215b025c26fa3e6f4fa2949dcd0d2e4203a..7dd46b2ab4c31e7be4168e588a8c95e900cb213a 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index 6ec48efef846c719db4e009b3a14926db0577d5f..3b5372615e733dba860faa453063548189fc6354 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -143,7 +143,7 @@ async fn inner_pipe_from_stream( // // Process remaining items and terminate. Either::Right((Either::Right((None, pending_fut)), _)) => { - if pending_fut.await.is_err() { + if !pending_fut.is_terminated() && pending_fut.await.is_err() { return; } @@ -231,4 +231,28 @@ mod tests { _ = rx.next().await.unwrap(); assert!(sub.next::().await.is_none()); } + + #[tokio::test] + async fn subscription_is_dropped_when_stream_is_empty() { + let notify_rx = std::sync::Arc::new(tokio::sync::Notify::new()); + let notify_tx = notify_rx.clone(); + + let mut module = RpcModule::new(notify_tx); + module + .register_subscription("sub", "my_sub", "unsub", |_, pending, notify_tx| async move { + // emulate empty stream for simplicity: otherwise we need some mechanism + // to sync buffer and channel send operations + let stream = futures::stream::empty::<()>(); + // this should exit immediately + pipe_from_stream(pending, stream).await; + // notify that the `pipe_from_stream` has returned + notify_tx.notify_one(); + Ok(()) + }) + .unwrap(); + module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap(); + + // it should fire once `pipe_from_stream` returns + notify_rx.notified().await; + } } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index b93196e86f1d3674b2894a21783e269831a27656..dfdd485f15c00550119d9aebf8594b33cdba4df1 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -63,7 +63,7 @@ sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } sc-client-db = { path = "../db", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sc-executor = { path = "../executor" } sc-transaction-pool = { path = "../transaction-pool" } sp-transaction-pool = { path = "../../primitives/transaction-pool" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 35e8b53a09cf2d802a0c4a873f7ecb8099764e83..3c25c233775bea6ed8297d144bcadf823b918e7e 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -978,8 +978,12 @@ where // The stale heads are the leaves that will be displaced after the // block is finalized. - let stale_heads = - self.backend.blockchain().displaced_leaves_after_finalizing(block_number)?; + let stale_heads = self + .backend + .blockchain() + .displaced_leaves_after_finalizing(hash, block_number)? + .hashes() + .collect(); let header = self .backend diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 444cb4a06eb9e755c318971c0b851194593769fe..a51bb4012d5d8ac0a4fb39e73c5a872a33e5fa7d 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -54,10 +54,11 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ - build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_record_import, new_full_parts_with_genesis_builder, new_wasm_executor, - spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + build_network, gen_rpc_module, init_telemetry, new_client, new_db_backend, new_full_client, + new_full_parts, new_full_parts_record_import, new_full_parts_with_genesis_builder, + new_wasm_executor, propagate_transaction_notifications, spawn_tasks, BuildNetworkParams, + KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, + TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, @@ -75,7 +76,7 @@ pub use config::{ }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, - Properties, RuntimeGenesis, + Properties, }; pub use sc_consensus::ImportQueue; diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 8766868cedeaa6cbe4d9329ee58e21c0fa8d2bc5..e95e06cee267e68ca4b87f7179c8c66b9de40f61 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -20,7 +20,7 @@ array-bytes = "6.2.2" fdlimit = "0.3.0" futures = "0.3.30" log = { workspace = true, default-features = true } -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index 4fcb7c160cb420e00655716f551e37ca5864a6f5..6542830c998b3a4f29df2f6a32f9d71c0aee688a 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -1164,7 +1164,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // G -> A1 -> A2 // \ - // -> B1 -> B2 + // -> B1 -> B2 -> B3 let mut finality_notifications = client.finality_notification_stream(); @@ -1249,8 +1249,8 @@ fn finalizing_diverged_block_should_trigger_reorg() { ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); - finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); + finality_notification_check(&mut finality_notifications, &[b1.hash()], &[a2.hash()]); + finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1371,8 +1371,12 @@ fn finality_notifications_content() { // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); - finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[c1.hash()]); - finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[b2.hash()]); + finality_notification_check( + &mut finality_notifications, + &[a1.hash(), a2.hash()], + &[c1.hash(), b2.hash()], + ); + finality_notification_check(&mut finality_notifications, &[d3.hash(), d4.hash()], &[a3.hash()]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } @@ -1601,9 +1605,9 @@ fn doesnt_import_blocks_that_revert_finality() { block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); - finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); + finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[b2.hash()]); - finality_notification_check(&mut finality_notifications, &[a3.hash()], &[b2.hash()]); + finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); assert!(matches!(finality_notifications.try_recv().unwrap_err(), TryRecvError::Empty)); } diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index f19b5a19739e2b9c90f5848e2332e487d0ccd5fe..e60bd9410c6453594469c21661875334d1501c68 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -31,7 +31,7 @@ use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig, RpcBatchRequestConfig}, BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, - RuntimeGenesis, SpawnTaskHandle, TaskManager, + SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderBackend; @@ -46,16 +46,16 @@ mod client; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, authority_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, full_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, - chain_spec: GenericChainSpec, + chain_spec: GenericChainSpec, base_port: u16, nodes: usize, } -impl Drop for TestNet { +impl Drop for TestNet { fn drop(&mut self) { // Drop the nodes before dropping the runtime, as the runtime otherwise waits for all // futures to be ended and we run into a dead lock. @@ -162,7 +162,7 @@ where } } -impl TestNet +impl TestNet where F: Clone + Send + 'static, U: Clone + Send + 'static, @@ -193,12 +193,9 @@ where } } -fn node_config< - G: RuntimeGenesis + 'static, - E: ChainSpecExtension + Clone + 'static + Send + Sync, ->( +fn node_config( index: usize, - spec: &GenericChainSpec, + spec: &GenericChainSpec, role: Role, tokio_handle: tokio::runtime::Handle, key_seed: Option, @@ -272,19 +269,18 @@ fn node_config< } } -impl TestNet +impl TestNet where F: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { fn new( temp: &TempDir, - spec: GenericChainSpec, + spec: GenericChainSpec, full: impl Iterator Result<(F, U), Error>>, authorities: impl Iterator Result<(F, U), Error>)>, base_port: u16, - ) -> TestNet { + ) -> TestNet { sp_tracing::try_init_simple(); fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -365,10 +361,9 @@ fn tempdir_with_prefix(prefix: &str) -> TempDir { .expect("Error creating test dir") } -pub fn connectivity(spec: GenericChainSpec, full_builder: Fb) +pub fn connectivity(spec: GenericChainSpec, full_builder: Fb) where E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, Fb: Fn(Configuration) -> Result, F: TestNetNode, { @@ -442,8 +437,8 @@ where } } -pub fn sync( - spec: GenericChainSpec, +pub fn sync( + spec: GenericChainSpec, full_builder: Fb, mut make_block_and_import: B, mut extrinsic_factory: ExF, @@ -454,7 +449,6 @@ pub fn sync( ExF: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; const NUM_BLOCKS: usize = 512; @@ -513,15 +507,14 @@ pub fn sync( network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); } -pub fn consensus( - spec: GenericChainSpec, +pub fn consensus( + spec: GenericChainSpec, full_builder: Fb, authorities: impl IntoIterator, ) where Fb: Fn(Configuration) -> Result, F: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, - G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds diff --git a/substrate/client/state-db/Cargo.toml b/substrate/client/state-db/Cargo.toml index 400dda20c223443687292315b4974e5125570553..e203eb5a3282fa1ed73c75426b6039f046bef7d7 100644 --- a/substrate/client/state-db/Cargo.toml +++ b/substrate/client/state-db/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index fd053d326e93fa72f39419831773d33dae6c0fd2..d5bdc920f7c9b48991354f5df917a3b3d72c86fa 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } serde = { features = ["derive"], workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/telemetry/src/endpoints.rs b/substrate/client/telemetry/src/endpoints.rs index c7a60726a5656108dfb588d96c8ede33bb4de391..c49b114152ae1473a22bd3ef76e808ddfaef6bbd 100644 --- a/substrate/client/telemetry/src/endpoints.rs +++ b/substrate/client/telemetry/src/endpoints.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{multiaddr, Multiaddr}; +use libp2p::multiaddr::{self, Multiaddr}; use serde::{Deserialize, Deserializer, Serialize}; /// List of telemetry servers we want to talk to. Contains the URL of the server, and the diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index f8a201e7611c2fa50a871e663e5f6c21f76ba07a..7e3a4ee8639308bd4678288db583e31c5085af5b 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -37,9 +37,9 @@ #![warn(missing_docs)] use futures::{channel::mpsc, prelude::*}; +use libp2p::Multiaddr; use log::{error, warn}; use parking_lot::Mutex; -use sc_network::Multiaddr; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use serde::Serialize; use std::{ diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 9b2443799d3deb23967973aa6670c6d92ea0bbfd..0bbdbfb622ef1ba478b62174d277e49ac836ff64 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -18,9 +18,8 @@ use crate::TelemetryPayload; use futures::{channel::mpsc, prelude::*}; -use libp2p::core::transport::Transport; +use libp2p::{core::transport::Transport, Multiaddr}; use rand::Rng as _; -use sc_network::Multiaddr; use std::{ fmt, mem, pin::Pin, diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index cad59ef91e46d426c05d4c1c0d431eda8d44dec0..df674d24c6dd79b36ef7aa7aa36d389a7702678d 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" is-terminal = "0.4.9" chrono = "0.4.31" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } lazy_static = "1.4.0" libc = "0.2.152" log = { workspace = true, default-features = true } diff --git a/substrate/client/tracing/src/logging/mod.rs b/substrate/client/tracing/src/logging/mod.rs index 8b2ad9b598b5be905ccb97c15817f249022f2b78..05ec9fcf6ef08bc06a77a9f543322d57dc091400 100644 --- a/substrate/client/tracing/src/logging/mod.rs +++ b/substrate/client/tracing/src/logging/mod.rs @@ -141,6 +141,14 @@ where .add_directive( parse_default_directive("libp2p_mdns::behaviour::iface=off") .expect("provided directive is valid"), + ) + // Disable annoying log messages from rustls + .add_directive( + parse_default_directive("rustls::common_state=off") + .expect("provided directive is valid"), + ) + .add_directive( + parse_default_directive("rustls::conn=off").expect("provided directive is valid"), ); if let Ok(lvl) = std::env::var("RUST_LOG") { diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 5f0b90ffe5d3e7d26796832dacc1325c403cd8ac..351650297ffc599a0081dff9cf0514dc6c6fc810 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" futures-timer = "3.0.2" linked-hash-map = "0.5.4" diff --git a/substrate/client/transaction-pool/README.md b/substrate/client/transaction-pool/README.md index 7a53727d576103800375a99d0544b3b67efad79e..30a3a8118b5233cd76856f0cf1c2eab6e94a22ed 100644 --- a/substrate/client/transaction-pool/README.md +++ b/substrate/client/transaction-pool/README.md @@ -49,7 +49,7 @@ pool, it's broadcasting status, block inclusion, finality, etc. ## Transaction Validity details -Information retrieved from the the runtime are encapsulated in the `TransactionValidity` +Information retrieved from the runtime are encapsulated in the `TransactionValidity` type. ```rust diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 1bb72ef55442216e321c564838a93a75a44f62eb..be80a7706b3efe653ceaf071f42a4e017bc428db 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" log = { workspace = true, default-features = true } serde = { features = ["derive"], workspace = true, default-features = true } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 44e8d681b01cc611e714857d002a65d3261730c0..3942f06ce6eec966dd2883e679e3cfe570251b35 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps -parity-scale-codec = { version = "3.2.2", default-features = false, features = [ +parity-scale-codec = { version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index cd91ea7979651d0e1e0059adfec77f24b3c0620b..10e2feba62376d9cd8346f44531dbd3e6bc47870 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.2.2", optional = true } log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index 7116e69efa17ec88134699570b2235b8b0973d7c..a9cfd6d0fde0eb149e4f33db86823e7e711426cd 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -42,7 +42,6 @@ type BlockNumber = u64; type AccountId = u64; parameter_types! { - pub const BlockHashCount: BlockNumber = 250; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::MAX); } diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index cf50d7b22af9b11a82011fe8e23c78ccef1112fa..bfcda2299d5a116eca62a5870a84823876bc89f5 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { version = "0.4.20", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/asset-conversion/ops/Cargo.toml b/substrate/frame/asset-conversion/ops/Cargo.toml index e421e904a3a17933ebeefc3affea7e5d9cc5d702..c5efbf9f6f442b0430cecab10badb4ca13e49d0a 100644 --- a/substrate/frame/asset-conversion/ops/Cargo.toml +++ b/substrate/frame/asset-conversion/ops/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { version = "0.4.20", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index cd502148a8d8f116afcbcde8969490a454da81b7..4662469e46ce423edaab234041a852a42b0711bb 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index ed6df77e15232d7249fdcdb5da41a673ef420994..9647ae4db6baa6ada63350153b8a10993e940401 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index c641071df902d0fa78f4794292bacd824f9712bb..8083c12d4b39fb0b860a738f700168bcad0bc2f3 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index 92ff3a0c56589d204cabe9770281f34d30f1eca9..9264d2f4a643cdedd02fa17f81f90d949c2c0729 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index a7aba711a5686088241e4507e686683114641bf7..c21f9b5c904556f8a0c01a0e5f1e468e32685ee5 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index ed9240d99e8d5bb2b814fe80e0211c601378651c..16f71960d693bec3de2babe446470e2db94bb8b9 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -48,13 +48,11 @@ pub mod pallet { } #[pallet::storage] - #[pallet::getter(fn keys)] /// Keys of the current authority set. pub(super) type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn next_keys)] /// Keys of the next authority set. pub(super) type NextKeys = StorageValue<_, WeakBoundedVec, ValueQuery>; diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index 2bfd59a48e10e0cee249d62680e66d7ce426cc3f..dd78e3404ef0b3c30adfa5b24bd4444c2fec10d1 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 9f6ef2bc05ea18e96786bd59b697c0290fd2d398..d06b7f7454648ea529595aa91e1e3a0f9e2b5486 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index 5deb504d0a4f2846748bd0fb4970cfbfdde41733..3429d2f28a6cc7445e383a28e5f817b876251f2b 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 1cc9ac5d8fd25c318351ede5e934d1ef93d89db6..4da14aea12809f09c797d0eb0925e1c43bfdc8d6 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 8d904d3d21b8acbf8469b8213d1220c277d3dd11..56eb81b49e2db695abbb8d705db7cef21f599416 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -542,8 +542,8 @@ pub mod pallet { #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { - #[cfg(not(feature = "insecure_zero_ed"))] fn integrity_test() { + #[cfg(not(feature = "insecure_zero_ed"))] assert!( !>::ExistentialDeposit::get().is_zero(), "The existential deposit must be greater than zero!" @@ -555,6 +555,29 @@ pub mod pallet { T::MaxFreezes::get(), ::VARIANT_COUNT, ); } + + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Holds::::iter_keys().try_for_each(|k| { + if Holds::::decode_len(k).unwrap_or(0) > + T::RuntimeHoldReason::VARIANT_COUNT as usize + { + Err("Found `Hold` with too many elements") + } else { + Ok(()) + } + })?; + + Freezes::::iter_keys().try_for_each(|k| { + if Freezes::::decode_len(k).unwrap_or(0) > T::MaxFreezes::get() as usize { + Err("Found `Freeze` with too many elements") + } else { + Ok(()) + } + })?; + + Ok(()) + } } #[pallet::call(weight(>::WeightInfo))] diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index 38d9c07ff7e002f6a7a57189b7ebce6830b52c75..568c3fbb7cf0926f9f23b2bdd23d840fa85bad5c 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -91,7 +91,7 @@ impl, I: 'static> OnRuntimeUpgrade for ResetInactive { StorageVersion::new(0).put::>(); log::info!(target: LOG_TARGET, "Storage to version 0"); - T::DbWeight::get().reads_writes(1, 2) + T::DbWeight::get().reads_writes(1, 3) } else { log::info!( target: LOG_TARGET, diff --git a/substrate/frame/balances/src/tests/general_tests.rs b/substrate/frame/balances/src/tests/general_tests.rs index 0f3e015d0a89242dd321285b84a134bd907702ec..a855fae5616af369776454955bd83ffaa5656303 100644 --- a/substrate/frame/balances/src/tests/general_tests.rs +++ b/substrate/frame/balances/src/tests/general_tests.rs @@ -109,3 +109,35 @@ fn regression_historic_acc_does_not_evaporate_reserve() { }); }); } + +#[cfg(feature = "try-runtime")] +#[test] +fn try_state_works() { + use crate::{Config, Freezes, Holds}; + use frame_support::{ + storage, + traits::{Get, Hooks, VariantCount}, + }; + + ExtBuilder::default().build_and_execute_with(|| { + storage::unhashed::put( + &Holds::::hashed_key_for(1), + &vec![0u8; ::RuntimeHoldReason::VARIANT_COUNT as usize + 1], + ); + + assert!(format!("{:?}", Balances::try_state(0).unwrap_err()) + .contains("Found `Hold` with too many elements")); + }); + + ExtBuilder::default().build_and_execute_with(|| { + let max_freezes: u32 = ::MaxFreezes::get(); + + storage::unhashed::put( + &Freezes::::hashed_key_for(1), + &vec![0u8; max_freezes as usize + 1], + ); + + assert!(format!("{:?}", Balances::try_state(0).unwrap_err()) + .contains("Found `Freeze` with too many elements")); + }); +} diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index bfdf91c091b51ec46678dbee32cc72bbc0494e52..51abc306265d60a3ffc990fb6878831e12593a60 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index f181f4d41cdcb2567654d21b0fefb88b507d6f69..890ac1399b9dfcee8d71348d8eac9ff7b3ae2546 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 04a10314a959f496436d6b988e8a3a56b747c35a..b5824ab2ec2eeaa2126bd72f6832e87736a8f914 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 5d3aaa7890488018eb3715ed13845a7e3d130b50..e4f3c272a63e4938aa2efd6fa30784543383c82a 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "..", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 3307e47e9818934d629ff1989aff08ff03de9acf..fac0054359060fd3c63741eaa1897c66990f5523 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index c099fc48b7a3bceb94e8b026386c84aed4edf818..c930868bf10159326bae978553eadcbd3fe03128 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -303,12 +303,10 @@ pub mod pallet { /// Number of bounty proposals that have been made. #[pallet::storage] - #[pallet::getter(fn bounty_count)] pub type BountyCount, I: 'static = ()> = StorageValue<_, BountyIndex, ValueQuery>; /// Bounties that have been made. #[pallet::storage] - #[pallet::getter(fn bounties)] pub type Bounties, I: 'static = ()> = StorageMap< _, Twox64Concat, @@ -318,13 +316,11 @@ pub mod pallet { /// The description of each bounty. #[pallet::storage] - #[pallet::getter(fn bounty_descriptions)] pub type BountyDescriptions, I: 'static = ()> = StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; /// Bounty indices that have been approved but not yet funded. #[pallet::storage] - #[pallet::getter(fn bounty_approvals)] pub type BountyApprovals, I: 'static = ()> = StorageValue<_, BoundedVec, ValueQuery>; @@ -849,7 +845,7 @@ impl, I: 'static> Pallet { description.try_into().map_err(|_| Error::::ReasonTooBig)?; ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); - let index = Self::bounty_count(); + let index = BountyCount::::get(); // reserve deposit for new bounty let bond = T::BountyDepositBase::get() + diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index de747db53749902ad6164265e3cad139ddefa7da..a89f4ff9fbf301a8d03d9ad040a1b4009e7b18f6 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -534,7 +534,7 @@ fn propose_bounty_works() { assert_eq!(Balances::free_balance(0), 100 - deposit); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -545,9 +545,12 @@ fn propose_bounty_works() { } ); - assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); + assert_eq!( + pallet_bounties::BountyDescriptions::::get(0).unwrap(), + b"1234567890".to_vec() + ); - assert_eq!(Bounties::bounty_count(), 1); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); }); } @@ -598,10 +601,10 @@ fn close_bounty_works() { assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100 - deposit); - assert_eq!(Bounties::bounties(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); assert!(!pallet_treasury::Proposals::::contains_key(0)); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -622,7 +625,7 @@ fn approve_bounty_works() { let deposit: u64 = 80 + 5; assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -632,7 +635,7 @@ fn approve_bounty_works() { status: BountyStatus::Approved, } ); - assert_eq!(Bounties::bounty_approvals(), vec![0]); + assert_eq!(pallet_bounties::BountyApprovals::::get(), vec![0]); assert_noop!( Bounties::close_bounty(RuntimeOrigin::root(), 0), @@ -650,7 +653,7 @@ fn approve_bounty_works() { assert_eq!(Balances::free_balance(0), 100); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -693,7 +696,7 @@ fn assign_curator_works() { assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -720,7 +723,7 @@ fn assign_curator_works() { let expected_deposit = Bounties::calculate_curator_deposit(&fee); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -755,7 +758,7 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -773,7 +776,7 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -817,7 +820,7 @@ fn award_and_claim_bounty_works() { assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee, @@ -851,8 +854,8 @@ fn award_and_claim_bounty_works() { assert_eq!(Balances::free_balance(3), 56); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -892,8 +895,8 @@ fn claim_handles_high_fee() { assert_eq!(Balances::free_balance(3), 0); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -918,7 +921,7 @@ fn cancel_and_refund() { )); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 0, @@ -978,8 +981,8 @@ fn award_and_cancel() { assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Bounties::bounties(0), None); - assert_eq!(Bounties::bounty_descriptions(0), None); + assert_eq!(pallet_bounties::Bounties::::get(0), None); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0), None); }); } @@ -1015,7 +1018,7 @@ fn expire_and_unassign() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(0), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1065,7 +1068,7 @@ fn extend_expiry() { assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1079,7 +1082,7 @@ fn extend_expiry() { assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, @@ -1190,7 +1193,7 @@ fn unassign_curator_self() { assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0)); assert_eq!( - Bounties::bounties(0).unwrap(), + pallet_bounties::Bounties::::get(0).unwrap(), Bounty { proposer: 0, fee: 10, diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index ce8d41530451f978873a15a9a2e5538976ae706e..8a84fbfdfb701c87cde1dfb935ad1dcb6cf56e04 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } sp-api = { path = "../../primitives/api", default-features = false } @@ -30,6 +30,7 @@ frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } +sp-tracing = { path = "../../primitives/tracing" } pretty_assertions = "1.3.0" [features] diff --git a/substrate/frame/broker/src/adapt_price.rs b/substrate/frame/broker/src/adapt_price.rs index fbcd7afdf0da059fb7023de3422f15b2344d5407..9b2e1dd8997bd68adfe8aef71bdad74f8c0ff529 100644 --- a/substrate/frame/broker/src/adapt_price.rs +++ b/substrate/frame/broker/src/adapt_price.rs @@ -17,59 +17,122 @@ #![deny(missing_docs)] -use crate::CoreIndex; +use crate::{CoreIndex, SaleInfoRecord}; use sp_arithmetic::{traits::One, FixedU64}; -use sp_runtime::Saturating; +use sp_runtime::{FixedPointNumber, FixedPointOperand, Saturating}; + +/// Performance of a past sale. +#[derive(Copy, Clone)] +pub struct SalePerformance { + /// The price at which the last core was sold. + /// + /// Will be `None` if no cores have been offered. + pub sellout_price: Option, + + /// The minimum price that was achieved in this sale. + pub end_price: Balance, + + /// The number of cores we want to sell, ideally. + pub ideal_cores_sold: CoreIndex, + + /// Number of cores which are/have been offered for sale. + pub cores_offered: CoreIndex, + + /// Number of cores which have been sold; never more than cores_offered. + pub cores_sold: CoreIndex, +} + +/// Result of `AdaptPrice::adapt_price`. +#[derive(Copy, Clone)] +pub struct AdaptedPrices { + /// New minimum price to use. + pub end_price: Balance, + + /// Price the controller is optimizing for. + /// + /// This is the price "expected" by the controller based on the previous sale. We assume that + /// sales in this period will be around this price, assuming stable market conditions. + /// + /// Think of it as the expected market price. This can be used for determining what to charge + /// for renewals, that don't yet have any price information for example. E.g. for expired + /// legacy leases. + pub target_price: Balance, +} + +impl SalePerformance { + /// Construct performance via data from a `SaleInfoRecord`. + pub fn from_sale(record: &SaleInfoRecord) -> Self { + Self { + sellout_price: record.sellout_price, + end_price: record.end_price, + ideal_cores_sold: record.ideal_cores_sold, + cores_offered: record.cores_offered, + cores_sold: record.cores_sold, + } + } + + #[cfg(test)] + fn new(sellout_price: Option, end_price: Balance) -> Self { + Self { sellout_price, end_price, ideal_cores_sold: 0, cores_offered: 0, cores_sold: 0 } + } +} /// Type for determining how to set price. -pub trait AdaptPrice { +pub trait AdaptPrice { /// Return the factor by which the regular price must be multiplied during the leadin period. /// /// - `when`: The amount through the leadin period; between zero and one. fn leadin_factor_at(when: FixedU64) -> FixedU64; - /// Return the correction factor by which the regular price must be multiplied based on market - /// performance. + + /// Return adapted prices for next sale. /// - /// - `sold`: The number of cores sold. - /// - `target`: The target number of cores to be sold (must be larger than zero). - /// - `limit`: The maximum number of cores to be sold. - fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64; + /// Based on the previous sale's performance. + fn adapt_price(performance: SalePerformance) -> AdaptedPrices; } -impl AdaptPrice for () { +impl AdaptPrice for () { fn leadin_factor_at(_: FixedU64) -> FixedU64 { FixedU64::one() } - fn adapt_price(_: CoreIndex, _: CoreIndex, _: CoreIndex) -> FixedU64 { - FixedU64::one() + fn adapt_price(performance: SalePerformance) -> AdaptedPrices { + let price = performance.sellout_price.unwrap_or(performance.end_price); + AdaptedPrices { end_price: price, target_price: price } } } -/// Simple implementation of `AdaptPrice` giving a monotonic leadin and a linear price change based -/// on cores sold. -pub struct Linear; -impl AdaptPrice for Linear { +/// Simple implementation of `AdaptPrice` with two linear phases. +/// +/// One steep one downwards to the target price, which is 1/10 of the maximum price and a more flat +/// one down to the minimum price, which is 1/100 of the maximum price. +pub struct CenterTargetPrice(core::marker::PhantomData); + +impl AdaptPrice for CenterTargetPrice { fn leadin_factor_at(when: FixedU64) -> FixedU64 { - FixedU64::from(2).saturating_sub(when) - } - fn adapt_price(sold: CoreIndex, target: CoreIndex, limit: CoreIndex) -> FixedU64 { - if sold <= target { - // Range of [0.5, 1.0]. - FixedU64::from_rational(1, 2).saturating_add(FixedU64::from_rational( - sold.into(), - target.saturating_mul(2).into(), - )) + if when <= FixedU64::from_rational(1, 2) { + FixedU64::from(100).saturating_sub(when.saturating_mul(180.into())) } else { - // Range of (1.0, 2]. - - // Unchecked math: In this branch we know that sold > target. The limit must be >= sold - // by construction, and thus target must be < limit. - FixedU64::one().saturating_add(FixedU64::from_rational( - (sold - target).into(), - (limit - target).into(), - )) + FixedU64::from(19).saturating_sub(when.saturating_mul(18.into())) } } + + fn adapt_price(performance: SalePerformance) -> AdaptedPrices { + let Some(sellout_price) = performance.sellout_price else { + return AdaptedPrices { + end_price: performance.end_price, + target_price: FixedU64::from(10).saturating_mul_int(performance.end_price), + } + }; + + let price = FixedU64::from_rational(1, 10).saturating_mul_int(sellout_price); + let price = if price == Balance::zero() { + // We could not recover from a price equal 0 ever. + sellout_price + } else { + price + }; + + AdaptedPrices { end_price: price, target_price: sellout_price } + } } #[cfg(test)] @@ -78,37 +141,103 @@ mod tests { #[test] fn linear_no_panic() { - for limit in 0..10 { - for target in 1..10 { - for sold in 0..=limit { - let price = Linear::adapt_price(sold, target, limit); - - if sold > target { - assert!(price > FixedU64::one()); - } else { - assert!(price <= FixedU64::one()); - } - } + for sellout in 0..11 { + for price in 0..10 { + let sellout_price = if sellout == 11 { None } else { Some(sellout) }; + CenterTargetPrice::adapt_price(SalePerformance::new(sellout_price, price)); } } } #[test] - fn linear_bound_check() { - // Using constraints from pallet implementation i.e. `limit >= sold`. - // Check extremes - let limit = 10; - let target = 5; - - // Maximally sold: `sold == limit` - assert_eq!(Linear::adapt_price(limit, target, limit), FixedU64::from_float(2.0)); - // Ideally sold: `sold == target` - assert_eq!(Linear::adapt_price(target, target, limit), FixedU64::one()); - // Minimally sold: `sold == 0` - assert_eq!(Linear::adapt_price(0, target, limit), FixedU64::from_float(0.5)); - // Optimistic target: `target == limit` - assert_eq!(Linear::adapt_price(limit, limit, limit), FixedU64::one()); - // Pessimistic target: `target == 0` - assert_eq!(Linear::adapt_price(limit, 0, limit), FixedU64::from_float(2.0)); + fn leadin_price_bound_check() { + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from(0)), + FixedU64::from(100) + ); + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_rational(1, 4)), + FixedU64::from(55) + ); + + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_float(0.5)), + FixedU64::from(10) + ); + + assert_eq!( + CenterTargetPrice::::leadin_factor_at(FixedU64::from_rational(3, 4)), + FixedU64::from_float(5.5) + ); + assert_eq!(CenterTargetPrice::::leadin_factor_at(FixedU64::one()), FixedU64::one()); + } + + #[test] + fn no_op_sale_is_good() { + let prices = CenterTargetPrice::adapt_price(SalePerformance::new(None, 1)); + assert_eq!(prices.target_price, 10); + assert_eq!(prices.end_price, 1); + } + + #[test] + fn price_stays_stable_on_optimal_sale() { + // Check price stays stable if sold at the optimal price: + let mut performance = SalePerformance::new(Some(1000), 100); + for _ in 0..10 { + let prices = CenterTargetPrice::adapt_price(performance); + performance.sellout_price = Some(1000); + performance.end_price = prices.end_price; + + assert!(prices.end_price <= 101); + assert!(prices.end_price >= 99); + assert!(prices.target_price <= 1001); + assert!(prices.target_price >= 999); + } + } + + #[test] + fn price_adjusts_correctly_upwards() { + let performance = SalePerformance::new(Some(10_000), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 10_000); + assert_eq!(prices.end_price, 1000); + } + + #[test] + fn price_adjusts_correctly_downwards() { + let performance = SalePerformance::new(Some(100), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 100); + assert_eq!(prices.end_price, 10); + } + + #[test] + fn price_never_goes_to_zero_and_recovers() { + // Check price stays stable if sold at the optimal price: + let sellout_price = 1; + let mut performance = SalePerformance::new(Some(sellout_price), 1); + for _ in 0..11 { + let prices = CenterTargetPrice::adapt_price(performance); + performance.sellout_price = Some(sellout_price); + performance.end_price = prices.end_price; + + assert!(prices.end_price <= sellout_price); + assert!(prices.end_price > 0); + } + } + + #[test] + fn renewal_price_is_correct_on_no_sale() { + let performance = SalePerformance::new(None, 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 1000); + assert_eq!(prices.end_price, 100); + } + + #[test] + fn renewal_price_is_sell_out() { + let performance = SalePerformance::new(Some(1000), 100); + let prices = CenterTargetPrice::adapt_price(performance); + assert_eq!(prices.target_price, 1000); } } diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 7533e3dc68c41fa54d39b7bb234ecaafe4e56fab..9cb5ad096c83b74e50a77993c2c3e0049ec2c876 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -214,8 +214,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 20u32.into(), - regular_price: 10u32.into(), + start_price: 1000u32.into(), + end_price: 10u32.into(), region_begin: latest_region_begin + config.region_length, region_end: latest_region_begin + config.region_length * 2, ideal_cores_sold: 0, @@ -288,8 +288,8 @@ mod benches { #[extrinsic_call] _(RawOrigin::Signed(caller), region.core); - let id = AllowedRenewalId { core: region.core, when: region.begin + region_len * 2 }; - assert!(AllowedRenewals::::get(id).is_some()); + let id = PotentialRenewalId { core: region.core, when: region.begin + region_len * 2 }; + assert!(PotentialRenewals::::get(id).is_some()); Ok(()) } @@ -670,20 +670,20 @@ mod benches { (T::TimeslicePeriod::get() * (region_len * 3).into()).try_into().ok().unwrap(), ); - let id = AllowedRenewalId { core, when }; - let record = AllowedRenewalRecord { + let id = PotentialRenewalId { core, when }; + let record = PotentialRenewalRecord { price: 1u32.into(), completion: CompletionStatus::Complete(new_schedule()), }; - AllowedRenewals::::insert(id, record); + PotentialRenewals::::insert(id, record); let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] _(RawOrigin::Signed(caller), core, when); - assert!(AllowedRenewals::::get(id).is_none()); - assert_last_event::(Event::AllowedRenewalDropped { core, when }.into()); + assert!(PotentialRenewals::::get(id).is_none()); + assert_last_event::(Event::PotentialRenewalDropped { core, when }.into()); Ok(()) } @@ -776,12 +776,12 @@ mod benches { let config = new_config_record::(); let now = frame_system::Pallet::::block_number(); - let price = 10u32.into(); + let end_price = 10u32.into(); let commit_timeslice = Broker::::latest_timeslice_ready_to_commit(&config); let sale = SaleInfoRecordOf:: { sale_start: now, leadin_length: Zero::zero(), - price, + end_price, sellout_price: None, region_begin: commit_timeslice, region_end: commit_timeslice.saturating_add(config.region_length), @@ -815,8 +815,8 @@ mod benches { Event::SaleInitialized { sale_start: 2u32.into(), leadin_length: 1u32.into(), - start_price: 20u32.into(), - regular_price: 10u32.into(), + start_price: 1000u32.into(), + end_price: 10u32.into(), region_begin: sale.region_begin + config.region_length, region_end: sale.region_end + config.region_length, ideal_cores_sold: 0, diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 45a0a514c307adaeb8be65f514e6c46251210d06..79c1a1f79796330a7377f06ef5f1a62045eb2848 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -70,7 +70,10 @@ impl Pallet { Ok(()) } - pub(crate) fn do_start_sales(price: BalanceOf, extra_cores: CoreIndex) -> DispatchResult { + pub(crate) fn do_start_sales( + end_price: BalanceOf, + extra_cores: CoreIndex, + ) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; // Determine the core count @@ -93,7 +96,7 @@ impl Pallet { let old_sale = SaleInfoRecord { sale_start: now, leadin_length: Zero::zero(), - price, + end_price, sellout_price: None, region_begin: commit_timeslice, region_end: commit_timeslice.saturating_add(config.region_length), @@ -102,7 +105,7 @@ impl Pallet { cores_offered: 0, cores_sold: 0, }; - Self::deposit_event(Event::::SalesStarted { price, core_count }); + Self::deposit_event(Event::::SalesStarted { price: end_price, core_count }); Self::rotate_sale(old_sale, &config, &status); Status::::put(&status); Ok(()) @@ -121,12 +124,8 @@ impl Pallet { let price = Self::sale_price(&sale, now); ensure!(price_limit >= price, Error::::Overpriced); - Self::charge(&who, price)?; - let core = sale.first_core.saturating_add(sale.cores_sold); - sale.cores_sold.saturating_inc(); - if sale.cores_sold <= sale.ideal_cores_sold || sale.sellout_price.is_none() { - sale.sellout_price = Some(price); - } + let core = Self::purchase_core(&who, price, &mut sale)?; + SaleInfo::::put(&sale); let id = Self::issue(core, sale.region_begin, sale.region_end, Some(who.clone()), Some(price)); @@ -135,7 +134,7 @@ impl Pallet { Ok(id) } - /// Must be called on a core in `AllowedRenewals` whose value is a timeslice equal to the + /// Must be called on a core in `PotentialRenewals` whose value is a timeslice equal to the /// current sale status's `region_end`. pub(crate) fn do_renew(who: T::AccountId, core: CoreIndex) -> Result { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; @@ -143,14 +142,15 @@ impl Pallet { let mut sale = SaleInfo::::get().ok_or(Error::::NoSales)?; Self::ensure_cores_for_sale(&status, &sale)?; - let renewal_id = AllowedRenewalId { core, when: sale.region_begin }; - let record = AllowedRenewals::::get(renewal_id).ok_or(Error::::NotAllowed)?; + let renewal_id = PotentialRenewalId { core, when: sale.region_begin }; + let record = PotentialRenewals::::get(renewal_id).ok_or(Error::::NotAllowed)?; let workload = record.completion.drain_complete().ok_or(Error::::IncompleteAssignment)?; let old_core = core; - let core = sale.first_core.saturating_add(sale.cores_sold); - Self::charge(&who, record.price)?; + + let core = Self::purchase_core(&who, record.price, &mut sale)?; + Self::deposit_event(Event::Renewed { who, old_core, @@ -161,19 +161,24 @@ impl Pallet { workload: workload.clone(), }); - sale.cores_sold.saturating_inc(); - Workplan::::insert((sale.region_begin, core), &workload); let begin = sale.region_end; let price_cap = record.price + config.renewal_bump * record.price; let now = frame_system::Pallet::::block_number(); let price = Self::sale_price(&sale, now).min(price_cap); - let new_record = AllowedRenewalRecord { price, completion: Complete(workload) }; - AllowedRenewals::::remove(renewal_id); - AllowedRenewals::::insert(AllowedRenewalId { core, when: begin }, &new_record); + log::debug!( + "Renew with: sale price: {:?}, price cap: {:?}, old price: {:?}", + price, + price_cap, + record.price + ); + let new_record = PotentialRenewalRecord { price, completion: Complete(workload) }; + PotentialRenewals::::remove(renewal_id); + PotentialRenewals::::insert(PotentialRenewalId { core, when: begin }, &new_record); SaleInfo::::put(&sale); if let Some(workload) = new_record.completion.drain_complete() { + log::debug!("Recording renewable price for next run: {:?}", price); Self::deposit_event(Event::Renewable { core, price, begin, workload }); } Ok(core) @@ -281,17 +286,19 @@ impl Pallet { let duration = region.end.saturating_sub(region_id.begin); if duration == config.region_length && finality == Finality::Final { if let Some(price) = region.paid { - let renewal_id = AllowedRenewalId { core: region_id.core, when: region.end }; - let assigned = match AllowedRenewals::::get(renewal_id) { - Some(AllowedRenewalRecord { completion: Partial(w), price: p }) + let renewal_id = PotentialRenewalId { core: region_id.core, when: region.end }; + let assigned = match PotentialRenewals::::get(renewal_id) { + Some(PotentialRenewalRecord { completion: Partial(w), price: p }) if price == p => w, _ => CoreMask::void(), } | region_id.mask; let workload = if assigned.is_complete() { Complete(workplan) } else { Partial(assigned) }; - let record = AllowedRenewalRecord { price, completion: workload }; - AllowedRenewals::::insert(&renewal_id, &record); + let record = PotentialRenewalRecord { price, completion: workload }; + // Note: This entry alone does not yet actually allow renewals (the completion + // status has to be complete for `do_renew` to accept it). + PotentialRenewals::::insert(&renewal_id, &record); if let Some(workload) = record.completion.drain_complete() { Self::deposit_event(Event::Renewable { core: region_id.core, @@ -444,10 +451,10 @@ impl Pallet { pub(crate) fn do_drop_renewal(core: CoreIndex, when: Timeslice) -> DispatchResult { let status = Status::::get().ok_or(Error::::Uninitialized)?; ensure!(status.last_committed_timeslice >= when, Error::::StillValid); - let id = AllowedRenewalId { core, when }; - ensure!(AllowedRenewals::::contains_key(id), Error::::UnknownRenewal); - AllowedRenewals::::remove(id); - Self::deposit_event(Event::AllowedRenewalDropped { core, when }); + let id = PotentialRenewalId { core, when }; + ensure!(PotentialRenewals::::contains_key(id), Error::::UnknownRenewal); + PotentialRenewals::::remove(id); + Self::deposit_event(Event::PotentialRenewalDropped { core, when }); Ok(()) } diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index d59c4c9c6b24e4e576580715378a32b48f61eb91..0774c02e1cf10e7360c89dfc6c1dc11f28b563ac 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -65,7 +65,7 @@ pub mod pallet { use sp_runtime::traits::{Convert, ConvertBack}; use sp_std::vec::Vec; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -92,7 +92,7 @@ pub mod pallet { type Coretime: CoretimeInterface; /// The algorithm to determine the next price on the basis of market performance. - type PriceAdapter: AdaptPrice; + type PriceAdapter: AdaptPrice>; /// Reversible conversion from local balance to Relay-chain balance. This will typically be /// the `Identity`, but provided just in case the chains use different representations. @@ -136,10 +136,12 @@ pub mod pallet { #[pallet::storage] pub type SaleInfo = StorageValue<_, SaleInfoRecordOf, OptionQuery>; - /// Records of allowed renewals. + /// Records of potential renewals. + /// + /// Renewals will only actually be allowed if `CompletionStatus` is actually `Complete`. #[pallet::storage] - pub type AllowedRenewals = - StorageMap<_, Twox64Concat, AllowedRenewalId, AllowedRenewalRecordOf, OptionQuery>; + pub type PotentialRenewals = + StorageMap<_, Twox64Concat, PotentialRenewalId, PotentialRenewalRecordOf, OptionQuery>; /// The current (unassigned or provisionally assigend) Regions. #[pallet::storage] @@ -290,14 +292,13 @@ pub mod pallet { /// The price of Bulk Coretime at the beginning of the Leadin Period. start_price: BalanceOf, /// The price of Bulk Coretime after the Leadin Period. - regular_price: BalanceOf, + end_price: BalanceOf, /// The first timeslice of the Regions which are being sold in this sale. region_begin: Timeslice, /// The timeslice on which the Regions which are being sold in the sale terminate. /// (i.e. One after the last timeslice which the Regions control.) region_end: Timeslice, - /// The number of cores we want to sell, ideally. Selling this amount would result in - /// no change to the price for the next sale. + /// The number of cores we want to sell, ideally. ideal_cores_sold: CoreIndex, /// Number of cores which are/have been offered for sale. cores_offered: CoreIndex, @@ -413,7 +414,7 @@ pub mod pallet { assignment: Vec<(CoreAssignment, PartsOf57600)>, }, /// Some historical Instantaneous Core Pool payment record has been dropped. - AllowedRenewalDropped { + PotentialRenewalDropped { /// The timeslice whose renewal is no longer available. when: Timeslice, /// The core whose workload is no longer available to be renewed for `when`. @@ -558,7 +559,7 @@ pub mod pallet { /// Begin the Bulk Coretime sales rotation. /// /// - `origin`: Must be Root or pass `AdminOrigin`. - /// - `initial_price`: The price of Bulk Coretime in the first sale. + /// - `end_price`: The price after the leadin period of Bulk Coretime in the first sale. /// - `extra_cores`: Number of extra cores that should be requested on top of the cores /// required for `Reservations` and `Leases`. /// @@ -570,11 +571,11 @@ pub mod pallet { ))] pub fn start_sales( origin: OriginFor, - initial_price: BalanceOf, + end_price: BalanceOf, extra_cores: CoreIndex, ) -> DispatchResultWithPostInfo { T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_start_sales(initial_price, extra_cores)?; + Self::do_start_sales(end_price, extra_cores)?; Ok(Pays::No.into()) } diff --git a/substrate/frame/broker/src/migration.rs b/substrate/frame/broker/src/migration.rs index 95aa28250a628e8352f6e4852a5588cbdad2a5e8..f354e447fe84eba4df56d02e82c69519ba67cf7d 100644 --- a/substrate/frame/broker/src/migration.rs +++ b/substrate/frame/broker/src/migration.rs @@ -77,6 +77,57 @@ mod v1 { } } +mod v2 { + use super::*; + use frame_support::{ + pallet_prelude::{OptionQuery, Twox64Concat}, + storage_alias, + }; + + #[storage_alias] + pub type AllowedRenewals = StorageMap< + Pallet, + Twox64Concat, + PotentialRenewalId, + PotentialRenewalRecordOf, + OptionQuery, + >; + + pub struct MigrateToV2Impl(PhantomData); + + impl UncheckedOnRuntimeUpgrade for MigrateToV2Impl { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + let mut count = 0; + for (renewal_id, renewal) in AllowedRenewals::::drain() { + PotentialRenewals::::insert(renewal_id, renewal); + count += 1; + } + + log::info!( + target: LOG_TARGET, + "Storage migration v2 for pallet-broker finished.", + ); + + // calculate and return migration weights + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + Ok((AllowedRenewals::::iter_keys().count() as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let old_count = u32::decode(&mut &state[..]).expect("Known good"); + let new_count = PotentialRenewals::::iter_values().count() as u32; + + ensure!(old_count == new_count, "Renewal count should not change"); + Ok(()) + } + } +} + /// Migrate the pallet storage from `0` to `1`. pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< 0, @@ -85,3 +136,11 @@ pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< Pallet, ::DbWeight, >; + +pub type MigrateV1ToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + v2::MigrateToV2Impl, + Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index 6219b4eff1b457294c9f3656e4be145c3639e2e0..6fff6aa10080c5a888ba35b4c3aeb1d180f9361b 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -199,7 +199,7 @@ impl crate::Config for Test { type WeightInfo = (); type PalletId = TestBrokerId; type AdminOrigin = EnsureOneOrRoot; - type PriceAdapter = Linear; + type PriceAdapter = CenterTargetPrice>; } pub fn advance_to(b: u64) { @@ -255,6 +255,10 @@ impl TestExt { Self(new_config()) } + pub fn new_with_config(config: ConfigRecordOf) -> Self { + Self(config) + } + pub fn advance_notice(mut self, advance_notice: Timeslice) -> Self { self.0.advance_notice = advance_notice as u64; self diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index f929f0d50dcfaf3d8e7e77d07e260d54c75b1a27..e953afd6dc3c8818338d810d0c9d9d37c0b742af 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ }; use frame_system::RawOrigin::Root; use pretty_assertions::assert_eq; -use sp_runtime::{traits::Get, TokenError}; +use sp_runtime::{traits::Get, Perbill, TokenError}; use CoreAssignment::*; use CoretimeTraceItem::*; use Finality::*; @@ -78,9 +78,9 @@ fn drop_renewal_works() { let e = Error::::StillValid; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); advance_to(12); - assert_eq!(AllowedRenewals::::iter().count(), 1); + assert_eq!(PotentialRenewals::::iter().count(), 1); assert_ok!(Broker::do_drop_renewal(region.core, region.begin + 3)); - assert_eq!(AllowedRenewals::::iter().count(), 0); + assert_eq!(PotentialRenewals::::iter().count(), 0); let e = Error::::UnknownRenewal; assert_noop!(Broker::do_drop_renewal(region.core, region.begin + 3), e); }); @@ -361,22 +361,91 @@ fn migration_works() { #[test] fn renewal_works() { - TestExt::new().endow(1, 1000).execute_with(|| { + let b = 100_000; + TestExt::new().endow(1, b).execute_with(move || { assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); - assert_eq!(balance(1), 900); + assert_eq!(balance(1), 99_900); assert_ok!(Broker::do_assign(region, None, 1001, Final)); // Should now be renewable. advance_to(6); assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::TooEarly); let core = Broker::do_renew(1, region.core).unwrap(); - assert_eq!(balance(1), 800); + assert_eq!(balance(1), 99_800); advance_to(8); assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::SoldOut); advance_to(12); assert_ok!(Broker::do_renew(1, core)); - assert_eq!(balance(1), 690); + assert_eq!(balance(1), 99_690); + }); +} + +#[test] +/// Renewals have to affect price as well. Otherwise a market where everything is a renewal would +/// not work. Renewals happening in the leadin or after are effectively competing with the open +/// market and it makes sense to adjust the price to what was paid here. Assuming all renewals were +/// done in the interlude and only normal sales happen in the leadin, renewals will have no effect +/// on price. If there are no cores left for sale on the open markent, renewals will affect price +/// even in the interlude, making sure renewal prices stay in the range of the open market. +fn renewals_affect_price() { + sp_tracing::try_init_simple(); + let b = 100_000; + let config = ConfigRecord { + advance_notice: 2, + interlude_length: 10, + leadin_length: 20, + ideal_bulk_proportion: Perbill::from_percent(100), + limit_cores_offered: None, + // Region length is in time slices (2 blocks): + region_length: 20, + renewal_bump: Perbill::from_percent(10), + contribution_timeout: 5, + }; + TestExt::new_with_config(config).endow(1, b).execute_with(|| { + let price = 910; + assert_ok!(Broker::do_start_sales(10, 1)); + advance_to(11); + let region = Broker::do_purchase(1, u64::max_value()).unwrap(); + // Price is lower, because already one block in: + let b = b - price; + assert_eq!(balance(1), b); + assert_ok!(Broker::do_assign(region, None, 1001, Final)); + advance_to(40); + assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::TooEarly); + let core = Broker::do_renew(1, region.core).unwrap(); + // First renewal has same price as initial purchase. + let b = b - price; + assert_eq!(balance(1), b); + advance_to(51); + assert_noop!(Broker::do_purchase(1, u64::max_value()), Error::::SoldOut); + advance_to(81); + assert_ok!(Broker::do_renew(1, core)); + // Renewal bump in effect + let price = price + Perbill::from_percent(10) * price; + let b = b - price; + assert_eq!(balance(1), b); + + // Move after interlude and leadin - should reduce price. + advance_to(159); + Broker::do_renew(1, region.core).unwrap(); + let price = price + Perbill::from_percent(10) * price; + let b = b - price; + assert_eq!(balance(1), b); + + advance_to(161); + // Should have the reduced price now: + Broker::do_renew(1, region.core).unwrap(); + let price = 100; + let b = b - price; + assert_eq!(balance(1), b); + + // Price should be bumped normally again: + advance_to(201); + Broker::do_renew(1, region.core).unwrap(); + let price = 110; + let b = b - price; + assert_eq!(balance(1), b); }); } @@ -916,7 +985,8 @@ fn short_leases_are_cleaned() { #[test] fn leases_can_be_renewed() { - TestExt::new().endow(1, 1000).execute_with(|| { + let initial_balance = 100_000; + TestExt::new().endow(1, initial_balance).execute_with(|| { // Timeslice period is 2. // // Sale 1 starts at block 7, Sale 2 starts at 13. @@ -927,13 +997,13 @@ fn leases_can_be_renewed() { // Start the sales with only one core for this lease. assert_ok!(Broker::do_start_sales(100, 0)); - // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next + // Advance to sale period 1, we should get an PotentialRenewal for task 2001 for the next // sale. advance_sale_period(); assert_eq!( - AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), - Some(AllowedRenewalRecord { - price: 100, + PotentialRenewals::::get(PotentialRenewalId { core: 0, when: 10 }), + Some(PotentialRenewalRecord { + price: 1000, completion: CompletionStatus::Complete( vec![ScheduleItem { mask: CoreMask::complete(), assignment: Task(2001) }] .try_into() @@ -947,8 +1017,8 @@ fn leases_can_be_renewed() { // Advance to sale period 2, where we can renew. advance_sale_period(); assert_ok!(Broker::do_renew(1, 0)); - // We renew for the base price of the previous sale period. - assert_eq!(balance(1), 900); + // We renew for the price of the previous sale period. + assert_eq!(balance(1), initial_balance - 1000); // We just renewed for this period. advance_sale_period(); @@ -1023,14 +1093,14 @@ fn short_leases_cannot_be_renewed() { // The lease is removed. assert_eq!(Leases::::get().len(), 0); - // We should have got an entry in AllowedRenewals, but we don't because rotate_sale + // We should have got an entry in PotentialRenewals, but we don't because rotate_sale // schedules leases a period in advance. This renewal should be in the period after next // because while bootstrapping our way into the sale periods, we give everything a lease for // period 1, so they can renew for period 2. So we have a core until the end of period 1, // but we are not marked as able to renew because we expired before sale period 1 starts. // // This should be fixed. - assert_eq!(AllowedRenewals::::get(AllowedRenewalId { core: 0, when: 10 }), None); + assert_eq!(PotentialRenewals::::get(PotentialRenewalId { core: 0, when: 10 }), None); // And the lease has been removed from storage. assert_eq!(Leases::::get().len(), 0); @@ -1102,7 +1172,7 @@ fn purchase_requires_valid_status_and_sale_info() { let mut dummy_sale = SaleInfoRecord { sale_start: 0, leadin_length: 0, - price: 200, + end_price: 200, sellout_price: None, region_begin: 0, region_end: 3, @@ -1144,7 +1214,7 @@ fn renewal_requires_valid_status_and_sale_info() { let mut dummy_sale = SaleInfoRecord { sale_start: 0, leadin_length: 0, - price: 200, + end_price: 200, sellout_price: None, region_begin: 0, region_end: 3, @@ -1163,11 +1233,11 @@ fn renewal_requires_valid_status_and_sale_info() { assert_ok!(Broker::do_start_sales(200, 1)); assert_noop!(Broker::do_renew(1, 1), Error::::NotAllowed); - let record = AllowedRenewalRecord { + let record = PotentialRenewalRecord { price: 100, completion: CompletionStatus::Partial(CoreMask::from_chunk(0, 20)), }; - AllowedRenewals::::insert(AllowedRenewalId { core: 1, when: 4 }, &record); + PotentialRenewals::::insert(PotentialRenewalId { core: 1, when: 4 }, &record); assert_noop!(Broker::do_renew(1, 1), Error::::IncompleteAssignment); }); } @@ -1274,7 +1344,7 @@ fn config_works() { /// Ensure that a lease that ended before `start_sales` was called can be renewed. #[test] fn renewal_works_leases_ended_before_start_sales() { - TestExt::new().endow(1, 1000).execute_with(|| { + TestExt::new().endow(1, 100_000).execute_with(|| { let config = Configuration::::get().unwrap(); // This lease is ended before `start_stales` was called. @@ -1304,7 +1374,7 @@ fn renewal_works_leases_ended_before_start_sales() { let new_core = Broker::do_renew(1, 0).unwrap(); // Renewing the active lease doesn't work. assert_noop!(Broker::do_renew(1, 1), Error::::SoldOut); - assert_eq!(balance(1), 900); + assert_eq!(balance(1), 99000); // This intializes the third sale and the period 2. advance_sale_period(); @@ -1312,7 +1382,7 @@ fn renewal_works_leases_ended_before_start_sales() { // Renewing the active lease doesn't work. assert_noop!(Broker::do_renew(1, 0), Error::::SoldOut); - assert_eq!(balance(1), 800); + assert_eq!(balance(1), 98900); // All leases should have ended assert!(Leases::::get().is_empty()); @@ -1324,7 +1394,7 @@ fn renewal_works_leases_ended_before_start_sales() { assert_eq!(0, Broker::do_renew(1, new_core).unwrap()); // Renew the task 2. assert_eq!(1, Broker::do_renew(1, 0).unwrap()); - assert_eq!(balance(1), 600); + assert_eq!(balance(1), 98790); // This intializes the fifth sale and the period 4. advance_sale_period(); diff --git a/substrate/frame/broker/src/tick_impls.rs b/substrate/frame/broker/src/tick_impls.rs index 04e9a65bf8f67b11ef66edf7268eda74e336e65d..20637cf7b903cf73fb5d9bfe08e70013303bcb1a 100644 --- a/substrate/frame/broker/src/tick_impls.rs +++ b/substrate/frame/broker/src/tick_impls.rs @@ -17,10 +17,7 @@ use super::*; use frame_support::{pallet_prelude::*, weights::WeightMeter}; -use sp_arithmetic::{ - traits::{One, SaturatedConversion, Saturating, Zero}, - FixedPointNumber, -}; +use sp_arithmetic::traits::{One, SaturatedConversion, Saturating, Zero}; use sp_runtime::traits::ConvertBack; use sp_std::{vec, vec::Vec}; use CompletionStatus::Complete; @@ -163,31 +160,13 @@ impl Pallet { InstaPoolIo::::mutate(old_sale.region_end, |r| r.system.saturating_reduce(old_pooled)); // Calculate the start price for the upcoming sale. - let price = { - let offered = old_sale.cores_offered; - let ideal = old_sale.ideal_cores_sold; - let sold = old_sale.cores_sold; - - let maybe_purchase_price = if offered == 0 { - // No cores offered for sale - no purchase price. - None - } else if sold >= ideal { - // Sold more than the ideal amount. We should look for the last purchase price - // before the sell-out. If there was no purchase at all, then we avoid having a - // price here so that we make no alterations to it (since otherwise we would - // increase it). - old_sale.sellout_price - } else { - // Sold less than the ideal - we fall back to the regular price. - Some(old_sale.price) - }; - if let Some(purchase_price) = maybe_purchase_price { - T::PriceAdapter::adapt_price(sold.min(offered), ideal, offered) - .saturating_mul_int(purchase_price) - } else { - old_sale.price - } - }; + let new_prices = T::PriceAdapter::adapt_price(SalePerformance::from_sale(&old_sale)); + + log::debug!( + "Rotated sale, new prices: {:?}, {:?}", + new_prices.end_price, + new_prices.target_price + ); // Set workload for the reserved (system, probably) workloads. let region_begin = old_sale.region_end; @@ -220,12 +199,15 @@ impl Pallet { let expire = until < region_end; if expire { // last time for this one - make it renewable in the next sale. - let renewal_id = AllowedRenewalId { core: first_core, when: region_end }; - let record = AllowedRenewalRecord { price, completion: Complete(schedule) }; - AllowedRenewals::::insert(renewal_id, &record); + let renewal_id = PotentialRenewalId { core: first_core, when: region_end }; + let record = PotentialRenewalRecord { + price: new_prices.target_price, + completion: Complete(schedule), + }; + PotentialRenewals::::insert(renewal_id, &record); Self::deposit_event(Event::Renewable { core: first_core, - price, + price: new_prices.target_price, begin: region_end, workload: record.completion.drain_complete().unwrap_or_default(), }); @@ -244,12 +226,19 @@ impl Pallet { let sale_start = now.saturating_add(config.interlude_length); let leadin_length = config.leadin_length; let ideal_cores_sold = (config.ideal_bulk_proportion * cores_offered as u32) as u16; + let sellout_price = if cores_offered > 0 { + // No core sold -> price was too high -> we have to adjust downwards. + Some(new_prices.end_price) + } else { + None + }; + // Update SaleInfo let new_sale = SaleInfoRecord { sale_start, leadin_length, - price, - sellout_price: None, + end_price: new_prices.end_price, + sellout_price, region_begin, region_end, first_core, @@ -257,12 +246,13 @@ impl Pallet { cores_offered, cores_sold: 0, }; + SaleInfo::::put(&new_sale); Self::deposit_event(Event::SaleInitialized { sale_start, leadin_length, start_price: Self::sale_price(&new_sale, now), - regular_price: price, + end_price: new_prices.end_price, region_begin, region_end, ideal_cores_sold, diff --git a/substrate/frame/broker/src/types.rs b/substrate/frame/broker/src/types.rs index f2cae9a41ad480b1e4608a4d556b10d166282c1d..885cac9a5c23d47add53ce4e2eff02da49d3cdc9 100644 --- a/substrate/frame/broker/src/types.rs +++ b/substrate/frame/broker/src/types.rs @@ -152,25 +152,28 @@ impl CompletionStatus { } } -/// The identity of a possible Core workload renewal. +/// The identity of a possibly renewable Core workload. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct AllowedRenewalId { +pub struct PotentialRenewalId { /// The core whose workload at the sale ending with `when` may be renewed to begin at `when`. pub core: CoreIndex, /// The point in time that the renewable workload on `core` ends and a fresh renewal may begin. pub when: Timeslice, } -/// A record of an allowed renewal. +/// A record of a potential renewal. +/// +/// The renewal will only actually be allowed if `CompletionStatus` is `Complete` at the time of +/// renewal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct AllowedRenewalRecord { +pub struct PotentialRenewalRecord { /// The price for which the next renewal can be made. pub price: Balance, /// The workload which will be scheduled on the Core in the case a renewal is made, or if /// incomplete, then the parts of the core which have been scheduled. pub completion: CompletionStatus, } -pub type AllowedRenewalRecordOf = AllowedRenewalRecord>; +pub type PotentialRenewalRecordOf = PotentialRenewalRecord>; /// General status of the system. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -211,7 +214,7 @@ pub struct SaleInfoRecord { /// The length in blocks of the Leadin Period (where the price is decreasing). pub leadin_length: BlockNumber, /// The price of Bulk Coretime after the Leadin Period. - pub price: Balance, + pub end_price: Balance, /// The first timeslice of the Regions which are being sold in this sale. pub region_begin: Timeslice, /// The timeslice on which the Regions which are being sold in the sale terminate. (i.e. One @@ -225,8 +228,9 @@ pub struct SaleInfoRecord { /// The index of the first core which is for sale. Core of Regions which are sold have /// incrementing indices from this. pub first_core: CoreIndex, - /// The latest price at which Bulk Coretime was purchased until surpassing the ideal number of - /// cores were sold. + /// The price at which cores have been sold out. + /// + /// Will only be `None` if no core was offered for sale. pub sellout_price: Option, /// Number of cores which have been sold; never more than cores_offered. pub cores_sold: CoreIndex, @@ -263,8 +267,11 @@ pub struct ConfigRecord { pub leadin_length: BlockNumber, /// The length in timeslices of Regions which are up for sale in forthcoming sales. pub region_length: Timeslice, - /// The proportion of cores available for sale which should be sold in order for the price - /// to remain the same in the next sale. + /// The proportion of cores available for sale which should be sold. + /// + /// If more cores are sold than this, then further sales will no longer be considered in + /// determining the sellout price. In other words the sellout price will be the last price + /// paid, without going over this limit. pub ideal_bulk_proportion: Perbill, /// An artificial limit to the number of cores which are allowed to be sold. If `Some` then /// no more cores will be sold than this. diff --git a/substrate/frame/broker/src/utility_impls.rs b/substrate/frame/broker/src/utility_impls.rs index 4163817a8b584bd949210aff383398ff50b95d58..9cceb7f970a9f55bf8a9420937b1f8d2c7826479 100644 --- a/substrate/frame/broker/src/utility_impls.rs +++ b/substrate/frame/broker/src/utility_impls.rs @@ -63,7 +63,7 @@ impl Pallet { pub fn sale_price(sale: &SaleInfoRecordOf, now: BlockNumberFor) -> BalanceOf { let num = now.saturating_sub(sale.sale_start).min(sale.leadin_length).saturated_into(); let through = FixedU64::from_rational(num, sale.leadin_length.saturated_into()); - T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.price) + T::PriceAdapter::leadin_factor_at(through).saturating_mul_int(sale.end_price) } pub(crate) fn charge(who: &T::AccountId, amount: BalanceOf) -> DispatchResult { @@ -72,6 +72,25 @@ impl Pallet { Ok(()) } + /// Buy a core at the specified price (price is to be determined by the caller). + /// + /// Note: It is the responsibility of the caller to write back the changed `SaleInfoRecordOf` to + /// storage. + pub(crate) fn purchase_core( + who: &T::AccountId, + price: BalanceOf, + sale: &mut SaleInfoRecordOf, + ) -> Result { + Self::charge(who, price)?; + log::debug!("Purchased core at: {:?}", price); + let core = sale.first_core.saturating_add(sale.cores_sold); + sale.cores_sold.saturating_inc(); + if sale.cores_sold <= sale.ideal_cores_sold || sale.sellout_price.is_none() { + sale.sellout_price = Some(price); + } + Ok(core) + } + pub fn issue( core: CoreIndex, begin: Timeslice, diff --git a/substrate/frame/broker/src/weights.rs b/substrate/frame/broker/src/weights.rs index 2aa1c282a41d8a98e6a0a824e5eaa150922b2356..d9d9d348e47e9db76731b41fd65086c689fc86f0 100644 --- a/substrate/frame/broker/src/weights.rs +++ b/substrate/frame/broker/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_broker` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_broker -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/broker/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_broker +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/broker/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -90,8 +88,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_804_000, 0) + // Minimum execution time: 1_945_000 picoseconds. + Weight::from_parts(2_142_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -100,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 18_451_000 picoseconds. - Weight::from_parts(18_853_000, 7496) + // Minimum execution time: 16_274_000 picoseconds. + Weight::from_parts(16_828_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -111,8 +109,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 16_899_000 picoseconds. - Weight::from_parts(17_645_000, 7496) + // Minimum execution time: 15_080_000 picoseconds. + Weight::from_parts(15_874_000, 7496) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -122,19 +120,19 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 10_239_000 picoseconds. - Weight::from_parts(10_754_000, 1526) + // Minimum execution time: 8_761_000 picoseconds. + Weight::from_parts(9_203_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -146,12 +144,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 51_250_000 picoseconds. - Weight::from_parts(54_643_012, 8499) - // Standard Error: 147 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + // Minimum execution time: 26_057_000 picoseconds. + Weight::from_parts(46_673_357, 8499) + // Standard Error: 456 + .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(16_u64)) + .saturating_add(T::DbWeight::get().writes(15_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -162,13 +160,13 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `2120` - // Minimum execution time: 43_660_000 picoseconds. - Weight::from_parts(45_543_000, 2120) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 40_907_000 picoseconds. + Weight::from_parts(42_566_000, 2136) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -178,8 +176,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `Authorship::Author` (r:1 w:0) /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:0) @@ -188,43 +186,43 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 63_122_000 picoseconds. - Weight::from_parts(64_366_000, 4698) + // Minimum execution time: 65_209_000 picoseconds. + Weight::from_parts(68_604_000, 4698) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(18_251_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 15_860_000 picoseconds. + Weight::from_parts(16_393_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 18_551_000 picoseconds. - Weight::from_parts(19_727_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 17_651_000 picoseconds. + Weight::from_parts(18_088_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(21_060_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 18_576_000 picoseconds. + Weight::from_parts(19_810_000, 3551) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -233,22 +231,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `740` + // Measured: `741` // Estimated: `4681` - // Minimum execution time: 32_394_000 picoseconds. - Weight::from_parts(33_324_000, 4681) + // Minimum execution time: 31_015_000 picoseconds. + Weight::from_parts(31_932_000, 4681) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -257,10 +255,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `776` // Estimated: `5996` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(39_274_000, 5996) + // Minimum execution time: 36_473_000 picoseconds. + Weight::from_parts(37_382_000, 5996) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -275,10 +273,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 70_453_000 picoseconds. - Weight::from_parts(70_652_822, 6196) - // Standard Error: 75_524 - .saturating_add(Weight::from_parts(2_335_289, 0).saturating_mul(m.into())) + // Minimum execution time: 64_957_000 picoseconds. + Weight::from_parts(66_024_232, 6196) + // Standard Error: 50_170 + .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -290,21 +288,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 43_945_000 picoseconds. - Weight::from_parts(45_249_000, 3593) + // Minimum execution time: 39_939_000 picoseconds. + Weight::from_parts(40_788_000, 3593) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `3550` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(32_995_000, 3550) + // Measured: `604` + // Estimated: `3551` + // Minimum execution time: 31_709_000 picoseconds. + Weight::from_parts(37_559_000, 3551) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -318,8 +316,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 48_053_000 picoseconds. - Weight::from_parts(51_364_000, 3533) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(53_945_000, 3533) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -335,21 +333,21 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 57_372_000 picoseconds. - Weight::from_parts(59_466_000, 3593) + // Minimum execution time: 50_770_000 picoseconds. + Weight::from_parts(63_117_000, 3593) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 27_768_000 picoseconds. - Weight::from_parts(29_000_000, 4698) + // Minimum execution time: 33_396_000 picoseconds. + Weight::from_parts(36_247_000, 4698) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -358,20 +356,18 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(5_201_705, 0) + // Minimum execution time: 3_625_000 picoseconds. + Weight::from_parts(4_011_396, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_889_000 picoseconds. - Weight::from_parts(7_380_363, 1487) - // Standard Error: 21 - .saturating_add(Weight::from_parts(63, 0).saturating_mul(n.into())) + // Minimum execution time: 6_217_000 picoseconds. + Weight::from_parts(6_608_394, 1487) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -389,8 +385,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `972` // Estimated: `4437` - // Minimum execution time: 50_156_000 picoseconds. - Weight::from_parts(51_610_000, 4437) + // Minimum execution time: 46_853_000 picoseconds. + Weight::from_parts(47_740_000, 4437) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -405,14 +401,12 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 38_246_000 picoseconds. - Weight::from_parts(40_008_850, 8499) - // Standard Error: 94 - .saturating_add(Weight::from_parts(964, 0).saturating_mul(n.into())) + // Minimum execution time: 34_240_000 picoseconds. + Weight::from_parts(35_910_175, 8499) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(15_u64)) } @@ -424,8 +418,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_962_000 picoseconds. - Weight::from_parts(8_313_000, 3493) + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_336_000, 3493) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -437,8 +431,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 17_457_000 picoseconds. - Weight::from_parts(18_387_000, 4681) + // Minimum execution time: 15_029_000 picoseconds. + Weight::from_parts(15_567_000, 4681) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -446,8 +440,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(149_000, 0) + // Minimum execution time: 123_000 picoseconds. + Weight::from_parts(136_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -455,8 +449,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_634_000, 0) + // Minimum execution time: 1_775_000 picoseconds. + Weight::from_parts(1_911_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -471,8 +465,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `4068` - // Minimum execution time: 13_043_000 picoseconds. - Weight::from_parts(13_541_000, 4068) + // Minimum execution time: 11_859_000 picoseconds. + Weight::from_parts(12_214_000, 4068) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -482,8 +476,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_606_000 picoseconds. - Weight::from_parts(6_964_000, 1526) + // Minimum execution time: 5_864_000 picoseconds. + Weight::from_parts(6_231_000, 1526) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -497,8 +491,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_804_000, 0) + // Minimum execution time: 1_945_000 picoseconds. + Weight::from_parts(2_142_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Reservations` (r:1 w:1) @@ -507,8 +501,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `5016` // Estimated: `7496` - // Minimum execution time: 18_451_000 picoseconds. - Weight::from_parts(18_853_000, 7496) + // Minimum execution time: 16_274_000 picoseconds. + Weight::from_parts(16_828_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -518,8 +512,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6218` // Estimated: `7496` - // Minimum execution time: 16_899_000 picoseconds. - Weight::from_parts(17_645_000, 7496) + // Minimum execution time: 15_080_000 picoseconds. + Weight::from_parts(15_874_000, 7496) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -529,19 +523,19 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 10_239_000 picoseconds. - Weight::from_parts(10_754_000, 1526) + // Minimum execution time: 8_761_000 picoseconds. + Weight::from_parts(9_203_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Configuration` (r:1 w:0) /// Proof: `Broker::Configuration` (`max_values`: Some(1), `max_size`: Some(31), added: 526, mode: `MaxEncodedLen`) - /// Storage: `Broker::InstaPoolIo` (r:3 w:3) - /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Broker::Reservations` (r:1 w:0) - /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) /// Storage: `Broker::Leases` (r:1 w:1) /// Proof: `Broker::Leases` (`max_values`: Some(1), `max_size`: Some(41), added: 536, mode: `MaxEncodedLen`) + /// Storage: `Broker::Reservations` (r:1 w:0) + /// Proof: `Broker::Reservations` (`max_values`: Some(1), `max_size`: Some(6011), added: 6506, mode: `MaxEncodedLen`) + /// Storage: `Broker::InstaPoolIo` (r:3 w:3) + /// Proof: `Broker::InstaPoolIo` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:0 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) /// Storage: `Broker::Status` (r:0 w:1) @@ -553,12 +547,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `6330` // Estimated: `8499` - // Minimum execution time: 51_250_000 picoseconds. - Weight::from_parts(54_643_012, 8499) - // Standard Error: 147 - .saturating_add(Weight::from_parts(18, 0).saturating_mul(n.into())) + // Minimum execution time: 26_057_000 picoseconds. + Weight::from_parts(46_673_357, 8499) + // Standard Error: 456 + .saturating_add(Weight::from_parts(2_677, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(16_u64)) + .saturating_add(RocksDbWeight::get().writes(15_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) @@ -569,13 +563,13 @@ impl WeightInfo for () { /// Storage: `System::Digest` (r:1 w:0) /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Broker::Regions` (r:0 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn purchase() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `2120` - // Minimum execution time: 43_660_000 picoseconds. - Weight::from_parts(45_543_000, 2120) + // Measured: `651` + // Estimated: `2136` + // Minimum execution time: 40_907_000 picoseconds. + Weight::from_parts(42_566_000, 2136) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -585,8 +579,8 @@ impl WeightInfo for () { /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::SaleInfo` (r:1 w:1) /// Proof: `Broker::SaleInfo` (`max_values`: Some(1), `max_size`: Some(57), added: 552, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:2) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:2) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) /// Storage: `Authorship::Author` (r:1 w:0) /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `System::Digest` (r:1 w:0) @@ -595,43 +589,43 @@ impl WeightInfo for () { /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn renew() -> Weight { // Proof Size summary in bytes: - // Measured: `753` + // Measured: `769` // Estimated: `4698` - // Minimum execution time: 63_122_000 picoseconds. - Weight::from_parts(64_366_000, 4698) + // Minimum execution time: 65_209_000 picoseconds. + Weight::from_parts(68_604_000, 4698) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 17_552_000 picoseconds. - Weight::from_parts(18_251_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 15_860_000 picoseconds. + Weight::from_parts(16_393_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Regions` (r:1 w:2) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn partition() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 18_551_000 picoseconds. - Weight::from_parts(19_727_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 17_651_000 picoseconds. + Weight::from_parts(18_088_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Regions` (r:1 w:3) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn interlace() -> Weight { // Proof Size summary in bytes: - // Measured: `495` - // Estimated: `3550` - // Minimum execution time: 20_636_000 picoseconds. - Weight::from_parts(21_060_000, 3550) + // Measured: `496` + // Estimated: `3551` + // Minimum execution time: 18_576_000 picoseconds. + Weight::from_parts(19_810_000, 3551) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -640,22 +634,22 @@ impl WeightInfo for () { /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) fn assign() -> Weight { // Proof Size summary in bytes: - // Measured: `740` + // Measured: `741` // Estimated: `4681` - // Minimum execution time: 32_394_000 picoseconds. - Weight::from_parts(33_324_000, 4681) + // Minimum execution time: 31_015_000 picoseconds. + Weight::from_parts(31_932_000, 4681) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) /// Storage: `Broker::Workplan` (r:1 w:1) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// Storage: `Broker::InstaPoolIo` (r:2 w:2) @@ -664,10 +658,10 @@ impl WeightInfo for () { /// Proof: `Broker::InstaPoolContribution` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) fn pool() -> Weight { // Proof Size summary in bytes: - // Measured: `775` + // Measured: `776` // Estimated: `5996` - // Minimum execution time: 38_128_000 picoseconds. - Weight::from_parts(39_274_000, 5996) + // Minimum execution time: 36_473_000 picoseconds. + Weight::from_parts(37_382_000, 5996) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -682,10 +676,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `859` // Estimated: `6196 + m * (2520 ±0)` - // Minimum execution time: 70_453_000 picoseconds. - Weight::from_parts(70_652_822, 6196) - // Standard Error: 75_524 - .saturating_add(Weight::from_parts(2_335_289, 0).saturating_mul(m.into())) + // Minimum execution time: 64_957_000 picoseconds. + Weight::from_parts(66_024_232, 6196) + // Standard Error: 50_170 + .saturating_add(Weight::from_parts(1_290_632, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(m.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -697,21 +691,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 43_945_000 picoseconds. - Weight::from_parts(45_249_000, 3593) + // Minimum execution time: 39_939_000 picoseconds. + Weight::from_parts(40_788_000, 3593) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) /// Storage: `Broker::Regions` (r:1 w:1) - /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Proof: `Broker::Regions` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) fn drop_region() -> Weight { // Proof Size summary in bytes: - // Measured: `603` - // Estimated: `3550` - // Minimum execution time: 30_680_000 picoseconds. - Weight::from_parts(32_995_000, 3550) + // Measured: `604` + // Estimated: `3551` + // Minimum execution time: 31_709_000 picoseconds. + Weight::from_parts(37_559_000, 3551) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -725,8 +719,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `601` // Estimated: `3533` - // Minimum execution time: 48_053_000 picoseconds. - Weight::from_parts(51_364_000, 3533) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(53_945_000, 3533) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -742,21 +736,21 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `995` // Estimated: `3593` - // Minimum execution time: 57_372_000 picoseconds. - Weight::from_parts(59_466_000, 3593) + // Minimum execution time: 50_770_000 picoseconds. + Weight::from_parts(63_117_000, 3593) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:0) /// Proof: `Broker::Status` (`max_values`: Some(1), `max_size`: Some(18), added: 513, mode: `MaxEncodedLen`) - /// Storage: `Broker::AllowedRenewals` (r:1 w:1) - /// Proof: `Broker::AllowedRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) + /// Storage: `Broker::PotentialRenewals` (r:1 w:1) + /// Proof: `Broker::PotentialRenewals` (`max_values`: None, `max_size`: Some(1233), added: 3708, mode: `MaxEncodedLen`) fn drop_renewal() -> Weight { // Proof Size summary in bytes: // Measured: `661` // Estimated: `4698` - // Minimum execution time: 27_768_000 picoseconds. - Weight::from_parts(29_000_000, 4698) + // Minimum execution time: 33_396_000 picoseconds. + Weight::from_parts(36_247_000, 4698) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -765,20 +759,18 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_588_000 picoseconds. - Weight::from_parts(5_201_705, 0) + // Minimum execution time: 3_625_000 picoseconds. + Weight::from_parts(4_011_396, 0) } /// Storage: `Broker::CoreCountInbox` (r:1 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn process_core_count(n: u32, ) -> Weight { + fn process_core_count(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `404` // Estimated: `1487` - // Minimum execution time: 6_889_000 picoseconds. - Weight::from_parts(7_380_363, 1487) - // Standard Error: 21 - .saturating_add(Weight::from_parts(63, 0).saturating_mul(n.into())) + // Minimum execution time: 6_217_000 picoseconds. + Weight::from_parts(6_608_394, 1487) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -796,8 +788,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `972` // Estimated: `4437` - // Minimum execution time: 50_156_000 picoseconds. - Weight::from_parts(51_610_000, 4437) + // Minimum execution time: 46_853_000 picoseconds. + Weight::from_parts(47_740_000, 4437) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -812,14 +804,12 @@ impl WeightInfo for () { /// Storage: `Broker::Workplan` (r:0 w:10) /// Proof: `Broker::Workplan` (`max_values`: None, `max_size`: Some(1216), added: 3691, mode: `MaxEncodedLen`) /// The range of component `n` is `[0, 1000]`. - fn rotate_sale(n: u32, ) -> Weight { + fn rotate_sale(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6281` // Estimated: `8499` - // Minimum execution time: 38_246_000 picoseconds. - Weight::from_parts(40_008_850, 8499) - // Standard Error: 94 - .saturating_add(Weight::from_parts(964, 0).saturating_mul(n.into())) + // Minimum execution time: 34_240_000 picoseconds. + Weight::from_parts(35_910_175, 8499) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(15_u64)) } @@ -831,8 +821,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3493` - // Minimum execution time: 7_962_000 picoseconds. - Weight::from_parts(8_313_000, 3493) + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_336_000, 3493) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -844,8 +834,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1423` // Estimated: `4681` - // Minimum execution time: 17_457_000 picoseconds. - Weight::from_parts(18_387_000, 4681) + // Minimum execution time: 15_029_000 picoseconds. + Weight::from_parts(15_567_000, 4681) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -853,8 +843,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 133_000 picoseconds. - Weight::from_parts(149_000, 0) + // Minimum execution time: 123_000 picoseconds. + Weight::from_parts(136_000, 0) } /// Storage: `Broker::CoreCountInbox` (r:0 w:1) /// Proof: `Broker::CoreCountInbox` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) @@ -862,8 +852,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_407_000 picoseconds. - Weight::from_parts(2_634_000, 0) + // Minimum execution time: 1_775_000 picoseconds. + Weight::from_parts(1_911_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Broker::Status` (r:1 w:1) @@ -878,8 +868,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `603` // Estimated: `4068` - // Minimum execution time: 13_043_000 picoseconds. - Weight::from_parts(13_541_000, 4068) + // Minimum execution time: 11_859_000 picoseconds. + Weight::from_parts(12_214_000, 4068) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -889,8 +879,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `239` // Estimated: `1526` - // Minimum execution time: 6_606_000 picoseconds. - Weight::from_parts(6_964_000, 1526) + // Minimum execution time: 5_864_000 picoseconds. + Weight::from_parts(6_231_000, 1526) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index 14a5e25e13da6a246e3935c90839a350bf773d2e..09271632df54b74601d5d318a4749bffcbc86777 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 1973564d0dc1dacca9d30421cda2b4b03771cef9..947cfcfaa96a2daeb88c68c9cda501c42554fbe3 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -109,7 +109,7 @@ fn activate_bounty( child_bounty_setup.reason.clone(), )?; - child_bounty_setup.bounty_id = Bounties::::bounty_count() - 1; + child_bounty_setup.bounty_id = pallet_bounties::BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index 1eedeaa5a1ae3c5aa6b3804117921eab63768d1e..04a1f9799cb86e8831a849620b8288fc0692a9c8 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -181,19 +181,16 @@ pub mod pallet { /// Number of total child bounties. #[pallet::storage] - #[pallet::getter(fn child_bounty_count)] pub type ChildBountyCount = StorageValue<_, BountyIndex, ValueQuery>; /// Number of child bounties per parent bounty. /// Map of parent bounty index to number of child bounties. #[pallet::storage] - #[pallet::getter(fn parent_child_bounties)] pub type ParentChildBounties = StorageMap<_, Twox64Concat, BountyIndex, u32, ValueQuery>; /// Child bounties that have been added. #[pallet::storage] - #[pallet::getter(fn child_bounties)] pub type ChildBounties = StorageDoubleMap< _, Twox64Concat, @@ -205,13 +202,11 @@ pub mod pallet { /// The description of each child-bounty. #[pallet::storage] - #[pallet::getter(fn child_bounty_descriptions)] pub type ChildBountyDescriptions = StorageMap<_, Twox64Concat, BountyIndex, BoundedVec>; /// The cumulative child-bounty curator fee for each parent bounty. #[pallet::storage] - #[pallet::getter(fn children_curator_fees)] pub type ChildrenCuratorFees = StorageMap<_, Twox64Concat, BountyIndex, BalanceOf, ValueQuery>; @@ -251,7 +246,7 @@ pub mod pallet { description.try_into().map_err(|_| BountiesError::::ReasonTooBig)?; ensure!(value >= T::ChildBountyValueMinimum::get(), BountiesError::::InvalidValue); ensure!( - Self::parent_child_bounties(parent_bounty_id) <= + ParentChildBounties::::get(parent_bounty_id) <= T::MaxActiveChildBountyCount::get() as u32, Error::::TooManyChildBounties, ); @@ -276,15 +271,15 @@ pub mod pallet { )?; // Get child-bounty ID. - let child_bounty_id = Self::child_bounty_count(); + let child_bounty_id = ChildBountyCount::::get(); let child_bounty_account = Self::child_bounty_account_id(child_bounty_id); // Transfer funds from parent bounty to child-bounty. T::Currency::transfer(&parent_bounty_account, &child_bounty_account, value, KeepAlive)?; // Increment the active child-bounty count. - >::mutate(parent_bounty_id, |count| count.saturating_inc()); - >::put(child_bounty_id.saturating_add(1)); + ParentChildBounties::::mutate(parent_bounty_id, |count| count.saturating_inc()); + ChildBountyCount::::put(child_bounty_id.saturating_add(1)); // Create child-bounty instance. Self::create_child_bounty( @@ -710,12 +705,12 @@ pub mod pallet { }); // Update the active child-bounty tracking count. - >::mutate(parent_bounty_id, |count| { + ParentChildBounties::::mutate(parent_bounty_id, |count| { count.saturating_dec() }); // Remove the child-bounty description. - >::remove(child_bounty_id); + ChildBountyDescriptions::::remove(child_bounty_id); // Remove the child-bounty instance from the state. *maybe_child_bounty = None; @@ -817,7 +812,7 @@ impl Pallet { fn ensure_bounty_active( bounty_id: BountyIndex, ) -> Result<(T::AccountId, BlockNumberFor), DispatchError> { - let parent_bounty = pallet_bounties::Pallet::::bounties(bounty_id) + let parent_bounty = pallet_bounties::Bounties::::get(bounty_id) .ok_or(BountiesError::::InvalidIndex)?; if let BountyStatus::Active { curator, update_due } = parent_bounty.get_status() { Ok((curator, update_due)) @@ -862,7 +857,7 @@ impl Pallet { ChildrenCuratorFees::::mutate(parent_bounty_id, |value| { *value = value.saturating_sub(child_bounty.fee) }); - >::mutate(parent_bounty_id, |count| { + ParentChildBounties::::mutate(parent_bounty_id, |count| { *count = count.saturating_sub(1) }); @@ -880,7 +875,7 @@ impl Pallet { debug_assert!(transfer_result.is_ok()); // Remove the child-bounty description. - >::remove(child_bounty_id); + ChildBountyDescriptions::::remove(child_bounty_id); *maybe_child_bounty = None; @@ -901,14 +896,14 @@ impl pallet_bounties::ChildBountyManager> for Pallet fn child_bounties_count( bounty_id: pallet_bounties::BountyIndex, ) -> pallet_bounties::BountyIndex { - Self::parent_child_bounties(bounty_id) + ParentChildBounties::::get(bounty_id) } fn children_curator_fees(bounty_id: pallet_bounties::BountyIndex) -> BalanceOf { // This is asked for when the parent bounty is being claimed. No use of // keeping it in state after that. Hence removing. - let children_fee_total = Self::children_curator_fees(bounty_id); - >::remove(bounty_id); + let children_fee_total = ChildrenCuratorFees::::get(bounty_id); + ChildrenCuratorFees::::remove(bounty_id); children_fee_total } } diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 30601f821e4384827059ffc694ab0108a347bf8b..d9405d3d28977c2cac3b42a034eb5fcfe8e56e8f 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -264,7 +264,7 @@ fn add_child_bounty() { // DB check. // Check the child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -275,10 +275,13 @@ fn add_child_bounty() { ); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 1); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 1); // Check the child-bounty description status. - assert_eq!(ChildBounties::child_bounty_descriptions(0).unwrap(), b"12345-p1".to_vec(),); + assert_eq!( + pallet_child_bounties::ChildBountyDescriptions::::get(0).unwrap(), + b"12345-p1".to_vec(), + ); }); } @@ -340,7 +343,7 @@ fn child_bounty_assign_curator() { assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -364,7 +367,7 @@ fn child_bounty_assign_curator() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -441,7 +444,7 @@ fn award_claim_child_bounty() { let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -479,7 +482,7 @@ fn award_claim_child_bounty() { assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); }); } @@ -528,7 +531,7 @@ fn close_child_bounty_added() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Parent-bounty account status. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -582,7 +585,7 @@ fn close_child_bounty_active() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Ensure child-bounty curator balance is unreserved. assert_eq!(Balances::free_balance(8), 101); @@ -647,7 +650,7 @@ fn close_child_bounty_pending() { ); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 1); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 1); // Ensure no changes in child-bounty curator balance. assert_eq!(Balances::reserved_balance(8), expected_child_deposit); @@ -739,7 +742,7 @@ fn child_bounty_curator_proposed_unassign_curator() { assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -757,7 +760,7 @@ fn child_bounty_curator_proposed_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -820,7 +823,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -838,7 +841,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -859,7 +862,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -877,7 +880,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -896,7 +899,7 @@ fn child_bounty_active_unassign_curator() { assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -914,7 +917,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -935,7 +938,7 @@ fn child_bounty_active_unassign_curator() { let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -963,7 +966,7 @@ fn child_bounty_active_unassign_curator() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1025,7 +1028,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1056,7 +1059,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1087,7 +1090,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { let expected_deposit = CuratorDepositMin::get(); assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1116,7 +1119,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Verify updated child-bounty status. assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1186,7 +1189,7 @@ fn close_parent_with_child_bounty() { assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::root(), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Try close parent-bounty again. // Should pass this time. @@ -1235,7 +1238,7 @@ fn children_curator_fee_calculation_test() { // Propose curator for child-bounty. assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); // Check curator fee added to the sum. - assert_eq!(ChildBounties::children_curator_fees(0), fee); + assert_eq!(pallet_child_bounties::ChildrenCuratorFees::::get(0), fee); // Accept curator for child-bounty. assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); // Award child-bounty. @@ -1244,7 +1247,7 @@ fn children_curator_fee_calculation_test() { let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( - ChildBounties::child_bounties(0, 0).unwrap(), + pallet_child_bounties::ChildBounties::::get(0, 0).unwrap(), ChildBounty { parent_bounty: 0, value: 10, @@ -1264,7 +1267,7 @@ fn children_curator_fee_calculation_test() { assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); // Check the child-bounty count. - assert_eq!(ChildBounties::parent_child_bounties(0), 0); + assert_eq!(pallet_child_bounties::ParentChildBounties::::get(0), 0); // Award the parent bounty. assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 9)); diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 850390409abc9646afbdb058dd2392d9f35869aa..d966370238bc4f2c8597555187a1830280752732 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 52c8fceb504b2aec9b7314c9cf4c5916ec9071c8..bd4ded1a1170c9356730eaef9ffbc28e0b6a356a 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.3" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/contracts/README.md b/substrate/frame/contracts/README.md index 09dc770300ca8491af062bbde9e820deccdd1341..6440f14b9eced560971304b5f809dc0b64cde50a 100644 --- a/substrate/frame/contracts/README.md +++ b/substrate/frame/contracts/README.md @@ -34,19 +34,6 @@ calls are reverted. Assuming correct error handling by contract A, A's other cal One `ref_time` `Weight` is defined as one picosecond of execution time on the runtime's reference machine. -#### Schedule - -The `Schedule` is where, among other things, the cost of every action a contract can do is defined. These costs are derived -from the benchmarks of this pallet. Instead of looking at the raw benchmark results it is advised to look at the `Schedule` -if one wants to manually inspect the performance characteristics. The `Schedule` can be printed like this: - -```sh -RUST_LOG=runtime::contracts=info cargo run --features runtime-benchmarks --bin substrate-node -- benchmark pallet --extra -p pallet_contracts -e print_schedule -``` - -Please note that the `Schedule` will be printed multiple times. This is because we are (ab)using a benchmark to print -the struct. - ### Revert Behaviour Contract call failures are not cascading. When failures occur in a sub-call, they do not "bubble up", and the call will @@ -125,7 +112,7 @@ Contracts can emit messages to the client when called as RPC through the API. This is exposed in [ink!](https://use.ink) via [`ink_env::debug_message()`](https://paritytech.github.io/ink/ink_env/fn.debug_message.html). -Those messages are gathered into an internal buffer and sent to the RPC client. It is up the the individual client if +Those messages are gathered into an internal buffer and sent to the RPC client. It is up to the individual client if and how those messages are presented to the user. This buffer is also printed as a debug message. In order to see these messages on the node console the log level for the diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 387c3ca39d049f7674541e24a36b300683326260..a348b7308d1232109a4763dcc764fdac87deee6d 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -12,7 +12,7 @@ description = "A mock network for testing pallet-contracts" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs index cc81b6bd636e54187e3ba09500fc39b472ed1943..bfdf6dd97eaf17fc0601f78c7c179e411c3a143d 100644 --- a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -47,17 +47,15 @@ pub mod pallet { pub struct Pallet(_); #[pallet::storage] - #[pallet::getter(fn parachain_id)] pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn received_dmp)] /// A queue of received DMP messages pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; impl Get for Pallet { fn get() -> ParaId { - Self::parachain_id() + ParachainId::::get() } } @@ -89,6 +87,14 @@ pub mod pallet { ParachainId::::put(para_id); } + pub fn parachain_id() -> ParaId { + ParachainId::::get() + } + + pub fn received_dmp() -> Vec> { + ReceivedDmp::::get() + } + fn handle_xcmp_message( sender: ParaId, _sent_at: RelayBlockNumber, @@ -169,7 +175,7 @@ pub mod pallet { limit, Weight::zero(), ); - >::append(x); + ReceivedDmp::::append(x); Self::deposit_event(Event::ExecutedDownward(id, outcome)); }, }, diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index b46d7df6c2bcf1f99a48540d5f1c5ef104d09206..f35846ba32c31e0b260ffe428f459f0a14b9256b 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -49,10 +49,6 @@ use xcm_executor::{traits::JustTry, Config, XcmExecutor}; pub type SovereignAccountOf = (AccountId32Aliases, ParentIsPreset); -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -64,7 +60,6 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 36a7de499ba99311889980ba2c02d8502602b945..8829fff3d0436d95b3e44d87b2258d13c167efc1 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -43,10 +43,6 @@ use super::{ primitives::{AccountId, Balance}, }; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; @@ -58,7 +54,6 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type BlockWeights = (); type BlockLength = (); type Version = (); diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 1794d09d5ad28e08bf9579bed4f311f469be832c..356b42268da6f534e99aacb60ead7111d5dd3b58 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -132,6 +132,7 @@ struct HostFn { alias_to: Option, /// Formulating the predicate inverted makes the expression using it simpler. not_deprecated: bool, + cfg: Option, } enum HostFnReturn { @@ -163,13 +164,13 @@ impl ToTokens for HostFn { impl HostFn { pub fn try_from(mut item: syn::ItemFn) -> syn::Result { let err = |span, msg| { - let msg = format!("Invalid host function definition. {}", msg); + let msg = format!("Invalid host function definition.\n{}", msg); syn::Error::new(span, msg) }; // process attributes let msg = - "only #[version()], #[unstable], #[prefixed_alias] and #[deprecated] attributes are allowed."; + "Only #[version()], #[unstable], #[prefixed_alias], #[cfg] and #[deprecated] attributes are allowed."; let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path().is_ident("doc")); @@ -177,6 +178,7 @@ impl HostFn { let mut is_stable = true; let mut alias_to = None; let mut not_deprecated = true; + let mut cfg = None; while let Some(attr) = attrs.pop() { let ident = attr.path().get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { @@ -206,7 +208,13 @@ impl HostFn { } not_deprecated = false; }, - _ => return Err(err(span, msg)), + "cfg" => { + if cfg.is_some() { + return Err(err(span, "#[cfg] can only be specified once")) + } + cfg = Some(attr); + }, + id => return Err(err(span, &format!("Unsupported attribute \"{id}\". {msg}"))), } } let name = item.sig.ident.to_string(); @@ -311,6 +319,7 @@ impl HostFn { is_stable, alias_to, not_deprecated, + cfg, }) }, _ => Err(err(span, &msg)), @@ -528,8 +537,9 @@ fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { /// - real implementation, to register it in the contract execution environment; /// - dummy implementation, to be used as mocks for contract validation step. fn expand_impls(def: &EnvDef) -> TokenStream2 { - let impls = expand_functions(def, true, quote! { crate::wasm::Runtime }); - let dummy_impls = expand_functions(def, false, quote! { () }); + let impls = expand_functions(def, ExpandMode::Impl); + let dummy_impls = expand_functions(def, ExpandMode::MockImpl); + let bench_impls = expand_functions(def, ExpandMode::BenchImpl); quote! { impl<'a, E: Ext> crate::wasm::Environment> for Env @@ -545,6 +555,14 @@ fn expand_impls(def: &EnvDef) -> TokenStream2 { } } + #[cfg(feature = "runtime-benchmarks")] + pub struct BenchEnv(::core::marker::PhantomData); + + #[cfg(feature = "runtime-benchmarks")] + impl BenchEnv { + #bench_impls + } + impl crate::wasm::Environment<()> for Env { fn define( @@ -560,18 +578,38 @@ fn expand_impls(def: &EnvDef) -> TokenStream2 { } } -fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) -> TokenStream2 { +enum ExpandMode { + Impl, + BenchImpl, + MockImpl, +} + +impl ExpandMode { + fn expand_blocks(&self) -> bool { + match *self { + ExpandMode::Impl | ExpandMode::BenchImpl => true, + ExpandMode::MockImpl => false, + } + } + + fn host_state(&self) -> TokenStream2 { + match *self { + ExpandMode::Impl | ExpandMode::BenchImpl => quote! { crate::wasm::runtime::Runtime }, + ExpandMode::MockImpl => quote! { () }, + } + } +} + +fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 { let impls = def.host_funcs.iter().map(|f| { // skip the context and memory argument let params = f.item.sig.inputs.iter().skip(2); - - let (module, name, body, wasm_output, output) = ( - f.module(), - &f.name, - &f.item.block, - f.returns.to_wasm_sig(), - &f.item.sig.output - ); + let module = f.module(); + let cfg = &f.cfg; + let name = &f.name; + let body = &f.item.block; + let wasm_output = f.returns.to_wasm_sig(); + let output = &f.item.sig.output; let is_stable = f.is_stable; let not_deprecated = f.not_deprecated; @@ -608,23 +646,34 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) // - We replace any code by unreachable! // - Allow unused variables as the code that uses is not expanded // - We don't need to map the error as we simply panic if they code would ever be executed - let inner = if expand_blocks { - quote! { || #output { - let (memory, ctx) = __caller__ - .data() - .memory() - .expect("Memory must be set when setting up host data; qed") - .data_and_store_mut(&mut __caller__); - #wrapped_body_with_trace - } } - } else { - quote! { || -> #wasm_output { - // This is part of the implementation for `Environment<()>` which is not - // meant to be actually executed. It is only for validation which will - // never call host functions. - ::core::unreachable!() - } } + let expand_blocks = expand_mode.expand_blocks(); + let inner = match expand_mode { + ExpandMode::Impl => { + quote! { || #output { + let (memory, ctx) = __caller__ + .data() + .memory() + .expect("Memory must be set when setting up host data; qed") + .data_and_store_mut(&mut __caller__); + #wrapped_body_with_trace + } } + }, + ExpandMode::BenchImpl => { + let body = &body.stmts; + quote!{ + #(#body)* + } + }, + ExpandMode::MockImpl => { + quote! { || -> #wasm_output { + // This is part of the implementation for `Environment<()>` which is not + // meant to be actually executed. It is only for validation which will + // never call host functions. + ::core::unreachable!() + } } + }, }; + let into_host = if expand_blocks { quote! { |reason| { @@ -655,6 +704,11 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) .map_err(TrapReason::from) .map_err(#into_host)? }; + + // Charge gas for host function execution. + __caller__.data_mut().charge_gas(crate::wasm::RuntimeCosts::HostFn) + .map_err(TrapReason::from) + .map_err(#into_host)?; } } else { quote! { } @@ -676,29 +730,51 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) quote! { } }; - quote! { - // We need to allow all interfaces when runtime benchmarks are performed because - // we generate the weights even when those interfaces are not enabled. This - // is necessary as the decision whether we allow unstable or deprecated functions - // is a decision made at runtime. Generation of the weights happens statically. - if ::core::cfg!(feature = "runtime-benchmarks") || - ((#is_stable || __allow_unstable__) && (#not_deprecated || __allow_deprecated__)) - { - #allow_unused - linker.define(#module, #name, ::wasmi::Func::wrap(&mut*store, |mut __caller__: ::wasmi::Caller<#host_state>, #( #params, )*| -> #wasm_output { - #sync_gas_before - let mut func = #inner; - let result = func().map_err(#into_host).map(::core::convert::Into::into); - #sync_gas_after - result - }))?; - } + match expand_mode { + ExpandMode::BenchImpl => { + let name = Ident::new(&format!("{module}_{name}"), Span::call_site()); + quote! { + pub fn #name(ctx: &mut crate::wasm::Runtime, memory: &mut [u8], #(#params),*) #output { + #inner + } + } + }, + _ => { + let host_state = expand_mode.host_state(); + quote! { + // We need to allow all interfaces when runtime benchmarks are performed because + // we generate the weights even when those interfaces are not enabled. This + // is necessary as the decision whether we allow unstable or deprecated functions + // is a decision made at runtime. Generation of the weights happens statically. + #cfg + if ::core::cfg!(feature = "runtime-benchmarks") || + ((#is_stable || __allow_unstable__) && (#not_deprecated || __allow_deprecated__)) + { + #allow_unused + linker.define(#module, #name, ::wasmi::Func::wrap(&mut*store, |mut __caller__: ::wasmi::Caller<#host_state>, #( #params, )*| -> #wasm_output { + #sync_gas_before + let mut func = #inner; + let result = func().map_err(#into_host).map(::core::convert::Into::into); + #sync_gas_after + result + }))?; + } + } + }, } }); - quote! { - let __allow_unstable__ = matches!(allow_unstable, AllowUnstableInterface::Yes); - let __allow_deprecated__ = matches!(allow_deprecated, AllowDeprecatedInterface::Yes); - #( #impls )* + + match expand_mode { + ExpandMode::BenchImpl => { + quote! { + #( #impls )* + } + }, + _ => quote! { + let __allow_unstable__ = matches!(allow_unstable, AllowUnstableInterface::Yes); + let __allow_deprecated__ = matches!(allow_deprecated, AllowDeprecatedInterface::Yes); + #( #impls )* + }, } } diff --git a/substrate/frame/contracts/src/benchmarking/call_builder.rs b/substrate/frame/contracts/src/benchmarking/call_builder.rs index 285fe0052b4d95a200eaebd0a86fb77d729aacfd..5d73d825fca9a76d15742c7a44b32b11a95fed51 100644 --- a/substrate/frame/contracts/src/benchmarking/call_builder.rs +++ b/substrate/frame/contracts/src/benchmarking/call_builder.rs @@ -25,6 +25,7 @@ use crate::{ }; use codec::{Encode, HasCompact}; use core::fmt::Debug; +use frame_benchmarking::benchmarking; use sp_core::Get; use sp_std::prelude::*; @@ -57,6 +58,16 @@ pub struct CallSetup { data: Vec, } +impl Default for CallSetup +where + T: Config + pallet_balances::Config, + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, +{ + fn default() -> Self { + Self::new(WasmModule::dummy()) + } +} + impl CallSetup where T: Config + pallet_balances::Config, @@ -70,6 +81,17 @@ where let storage_meter = Meter::new(&origin, None, 0u32.into()).unwrap(); + // Whitelist contract account, as it is already accounted for in the call benchmark + benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&contract.account_id).into(), + ); + + // Whitelist the contract's contractInfo as it is already accounted for in the call + // benchmark + benchmarking::add_to_whitelist( + crate::ContractInfoOf::::hashed_key_for(&contract.account_id).into(), + ); + Self { contract, dest, @@ -150,21 +172,29 @@ where } #[macro_export] -macro_rules! call_builder( - ($func: ident, $module:expr) => { - $crate::call_builder!($func, _contract, $module); +macro_rules! memory( + ($($bytes:expr,)*) => { + vec![] + .into_iter() + $(.chain($bytes))* + .collect::>() }; - ($func: ident, $contract: ident, $module:expr) => { - let mut setup = CallSetup::::new($module); - $crate::call_builder!($func, $contract, setup: setup); +); + +#[macro_export] +macro_rules! build_runtime( + ($runtime:ident, $memory:ident: [$($segment:expr,)*]) => { + $crate::build_runtime!($runtime, _contract, $memory: [$($segment,)*]); }; - ($func:ident, setup: $setup: ident) => { - $crate::call_builder!($func, _contract, setup: $setup); + ($runtime:ident, $contract:ident, $memory:ident: [$($bytes:expr,)*]) => { + $crate::build_runtime!($runtime, $contract); + let mut $memory = $crate::memory!($($bytes,)*); }; - ($func:ident, $contract: ident, setup: $setup: ident) => { - let data = $setup.data(); - let $contract = $setup.contract(); - let (mut ext, module) = $setup.ext(); - let $func = CallSetup::::prepare_call(&mut ext, module, data); + ($runtime:ident, $contract:ident) => { + let mut setup = CallSetup::::default(); + let $contract = setup.contract(); + let input = setup.data(); + let (mut ext, _) = setup.ext(); + let mut $runtime = crate::wasm::Runtime::new(&mut ext, input); }; ); diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index b97cf168e26d4562e4f4a3b7f07ad243174e7a98..65bcf30683c05e83515815ae33ffe5d9c4aadb70 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -288,17 +288,15 @@ impl WasmModule { module.into() } - /// Creates a wasm module that calls the imported function named `getter_name` `repeat` - /// times. The imported function is expected to have the "getter signature" of - /// (out_ptr: u32, len_ptr: u32) -> (). - pub fn getter(module_name: &'static str, getter_name: &'static str, repeat: u32) -> Self { + /// Creates a wasm module that calls the imported function `noop` `repeat` times. + pub fn noop(repeat: u32) -> Self { let pages = max_pages::(); ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: module_name, - name: getter_name, - params: vec![ValueType::I32, ValueType::I32], + module: "seal0", + name: "noop", + params: vec![], return_type: None, }], // Write the output buffer size. The output size will be overwritten by the @@ -312,35 +310,7 @@ impl WasmModule { call_body: Some(body::repeated( repeat, &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), // call the imported function - ], - )), - ..Default::default() - } - .into() - } - - /// Creates a wasm module that calls the imported hash function named `name` `repeat` times - /// with an input of size `data_size`. Hash functions have the signature - /// (input_ptr: u32, input_len: u32, output_ptr: u32) -> () - pub fn hasher(name: &'static str, repeat: u32, data_size: u32) -> Self { - ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name, - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - repeat, - &[ - Instruction::I32Const(0), // input_ptr - Instruction::I32Const(data_size as i32), // input_len - Instruction::I32Const(0), // output_ptr - Instruction::Call(0), + Instruction::Call(0), // call the imported function ], )), ..Default::default() @@ -353,21 +323,6 @@ impl WasmModule { pub mod body { use super::*; - /// When generating contract code by repeating a Wasm sequence, it's sometimes necessary - /// to change those instructions on each repetition. The variants of this enum describe - /// various ways in which this can happen. - pub enum DynInstr { - /// Insert the associated instruction. - Regular(Instruction), - /// Insert a I32Const with incrementing value for each insertion. - /// (start_at, increment_by) - Counter(u32, u32), - } - - pub fn plain(instructions: Vec) -> FuncBody { - FuncBody::new(Vec::new(), Instructions::new(instructions)) - } - pub fn repeated(repetitions: u32, instructions: &[Instruction]) -> FuncBody { repeated_with_locals(&[], repetitions, instructions) } @@ -401,24 +356,6 @@ pub mod body { instructions.push(Instruction::End); FuncBody::new(locals.to_vec(), Instructions::new(instructions)) } - - pub fn repeated_dyn(repetitions: u32, mut instructions: Vec) -> FuncBody { - // We need to iterate over indices because we cannot cycle over mutable references - let body = (0..instructions.len()) - .cycle() - .take(instructions.len() * usize::try_from(repetitions).unwrap()) - .flat_map(|idx| match &mut instructions[idx] { - DynInstr::Regular(instruction) => vec![instruction.clone()], - DynInstr::Counter(offset, increment_by) => { - let current = *offset; - *offset += *increment_by; - vec![Instruction::I32Const(current as i32)] - }, - }) - .chain(sp_std::iter::once(Instruction::End)) - .collect(); - FuncBody::new(Vec::new(), Instructions::new(body)) - } } /// The maximum amount of pages any contract is allowed to have according to the current `Schedule`. diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 952ef180be21860b4a13a26fdf07bf03251f00a2..7c993bc9a7718d7ada98198bf7c1f7df236aead1 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -23,33 +23,31 @@ mod code; mod sandbox; use self::{ call_builder::CallSetup, - code::{ - body::{self, DynInstr::*}, - DataSegment, ImportedFunction, ImportedMemory, Location, ModuleDefinition, WasmModule, - }, + code::{body, ImportedMemory, Location, ModuleDefinition, WasmModule}, sandbox::Sandbox, }; use crate::{ - exec::Key, + exec::{Key, SeedOf}, migration::{ codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, v16, MigrationStep, }, + wasm::BenchEnv, Pallet as Contracts, *, }; use codec::{Encode, MaxEncodedLen}; use frame_benchmarking::v2::*; use frame_support::{ - self, + self, assert_ok, pallet_prelude::StorageVersion, traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, }; use frame_system::RawOrigin; use pallet_balances; -use pallet_contracts_uapi::CallFlags; +use pallet_contracts_uapi::{CallFlags, ReturnErrorCode}; use sp_runtime::traits::{Bounded, Hash}; use sp_std::prelude::*; -use wasm_instrument::parity_wasm::elements::{BlockType, Instruction, Local, ValueType}; +use wasm_instrument::parity_wasm::elements::{Instruction, Local, ValueType}; /// How many runs we do per API benchmark. /// @@ -442,13 +440,6 @@ mod benchmarks { Ok(()) } - // This constructs a contract that is maximal expensive to instrument. - // It creates a maximum number of metering blocks per byte. - // The size of the salt influences the runtime because is is hashed in order to - // determine the contract address. All code is generated to the `call` function so that - // we don't benchmark the actual execution of this code but merely what it takes to load - // a code of that size into the sandbox. - // // `c`: Size of the code in bytes. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. @@ -482,7 +473,6 @@ mod benchmarks { assert_eq!(T::Currency::balance(&addr), value + Pallet::::min_balance()); } - // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. // `i`: Size of the input in bytes. // `s`: Size of the salt in bytes. #[benchmark(pov_mode = Measured)] @@ -621,507 +611,306 @@ mod benchmarks { } #[benchmark(pov_mode = Measured)] - fn seal_caller(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_caller", r)); - - let res; + fn noop_host_fn(r: Linear<0, API_BENCHMARK_RUNS>) { + let mut setup = CallSetup::::new(WasmModule::noop(r)); + let (mut ext, module) = setup.ext(); + let func = CallSetup::::prepare_call(&mut ext, module, vec![]); #[block] { - res = func.call(); + func.call(); } - assert_eq!(res.did_revert(), false); } #[benchmark(pov_mode = Measured)] - fn seal_is_contract(r: Linear<0, API_BENCHMARK_RUNS>) { - let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_is_contract", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: accounts_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, account_len as u32), // address_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info().unwrap(); - // every account would be a contract (worst case) - for acc in accounts.iter() { - >::insert(acc, info.clone()); - } + fn seal_caller() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + &::decode(&mut &memory[4..]).unwrap(), + runtime.ext().caller().account_id().unwrap() + ); } #[benchmark(pov_mode = Measured)] - fn seal_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { - let accounts = (0..r).map(|n| account::("account", n, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let accounts_bytes = accounts.iter().flat_map(|a| a.encode()).collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_code_hash", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { - offset: 0, - value: 32u32.to_le_bytes().to_vec(), // output length - }, - DataSegment { offset: 36, value: accounts_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(36, account_len as u32), // address_ptr - Regular(Instruction::I32Const(4)), // ptr to output data - Regular(Instruction::I32Const(0)), // ptr to output length - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info().unwrap(); - // every account would be a contract (worst case) - for acc in accounts.iter() { - >::insert(acc, info.clone()); - } + fn seal_is_contract() { + let Contract { account_id, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); - let res; + build_runtime!(runtime, memory: [account_id.encode(), ]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_is_contract(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), 1); } #[benchmark(pov_mode = Measured)] - fn seal_own_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_own_code_hash", r)); + fn seal_code_hash() { + let contract = Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], contract.account_id.encode(), ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_code_hash(&mut runtime, &mut memory, 4 + len, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + as Decode>::decode(&mut &memory[4..]).unwrap(), + contract.info().unwrap().code_hash + ); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_origin(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_caller_is_origin", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_own_code_hash() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, contract, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_own_code_hash(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!( + as Decode>::decode(&mut &memory[4..]).unwrap(), + contract.info().unwrap().code_hash + ); } #[benchmark(pov_mode = Measured)] - fn seal_caller_is_root(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "caller_is_root", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_origin(Origin::Root); - call_builder!(func, setup: setup); + fn seal_caller_is_origin() { + build_runtime!(runtime, memory: []); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller_is_origin(&mut runtime, &mut memory); } - assert_eq!(res.did_revert(), false); + assert_eq!(result.unwrap(), 1u32); } #[benchmark(pov_mode = Measured)] - fn seal_address(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_address", r)); + fn seal_caller_is_root() { + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::Root); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_caller_is_root(&mut runtime, &mut [0u8; 0]); } - assert_eq!(res.did_revert(), false); + assert_eq!(result.unwrap(), 1u32); } #[benchmark(pov_mode = Measured)] - fn seal_gas_left(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal1", "gas_left", r)); + fn seal_address() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_address(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + &::decode(&mut &memory[4..]).unwrap(), + runtime.ext().address() + ); } #[benchmark(pov_mode = Measured)] - fn seal_balance(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_balance", r)); + fn seal_gas_left() { + // use correct max_encoded_len when new version of parity-scale-codec is released + let len = 18u32; + assert!(::max_encoded_len() as u32 != len); + build_runtime!(runtime, memory: [32u32.to_le_bytes(), vec![0u8; len as _], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_gas_left(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().gas_meter().gas_left() + ); } #[benchmark(pov_mode = Measured)] - fn seal_value_transferred(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_value_transferred", r)); - - let res; + fn seal_balance() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_balance(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().balance().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_minimum_balance(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_minimum_balance", r)); - - let res; + fn seal_value_transferred() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_value_transferred(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().value_transferred().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_block_number(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_block_number", r)); - - let res; + fn seal_minimum_balance() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_minimum_balance(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + ::decode(&mut &memory[4..]).unwrap(), + runtime.ext().minimum_balance().into() + ); } #[benchmark(pov_mode = Measured)] - fn seal_now(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::getter("seal0", "seal_now", r)); - - let res; + fn seal_block_number() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_block_number(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!( + >::decode(&mut &memory[4..]).unwrap(), + runtime.ext().block_number() + ); } #[benchmark(pov_mode = Measured)] - fn seal_weight_to_fee(r: Linear<0, API_BENCHMARK_RUNS>) { - let pages = code::max_pages::(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "weight_to_fee", - params: vec![ValueType::I64, ValueType::I64, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I64Const(500_000), - Instruction::I64Const(300_000), - Instruction::I32Const(4), - Instruction::I32Const(0), - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_now() { + let len = as MaxEncodedLen>::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_now(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(>::decode(&mut &memory[4..]).unwrap(), *runtime.ext().now()); } #[benchmark(pov_mode = Measured)] - fn seal_input(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_input", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: 0u32.to_le_bytes().to_vec() }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), - ], - )), - ..Default::default() - }); - - call_builder!(func, code); - - let res; + fn seal_weight_to_fee() { + let len = ::max_encoded_len() as u32; + build_runtime!(runtime, memory: [len.to_le_bytes(), vec![0u8; len as _], ]); + let weight = Weight::from_parts(500_000, 300_000); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_weight_to_fee( + &mut runtime, + &mut memory, + weight.ref_time(), + weight.proof_size(), + 4, + 0, + ); } - assert_eq!(res.did_revert(), false); - } - - #[benchmark(pov_mode = Measured)] - fn seal_input_per_byte( - n: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let buffer_size = code::max_pages::() * 64 * 1024 - 4; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_input", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: buffer_size.to_le_bytes().to_vec(), - }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); - let instance = Contract::::new(code, vec![])?; - let data = vec![42u8; n.min(buffer_size) as usize]; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, data); - Ok(()) + assert_ok!(result); + assert_eq!( + >::decode(&mut &memory[4..]).unwrap(), + runtime.ext().get_weight_price(weight) + ); } - // We cannot call `seal_return` multiple times. Therefore our weight determination is not - // as precise as with other APIs. Because this function can only be called once per - // contract it cannot be used as an attack vector. #[benchmark(pov_mode = Measured)] - fn seal_return(r: Linear<0, 1>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_return", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; + fn seal_input(n: Linear<0, { code::max_pages::() * 64 * 1024 - 4 }>) { + let mut setup = CallSetup::::default(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![42u8; n as usize]); + let mut memory = memory!(n.to_le_bytes(), vec![0u8; n as usize],); + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_input(&mut runtime, &mut memory, 4, 0); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(&memory[4..], &vec![42u8; n as usize]); } #[benchmark(pov_mode = Measured)] - fn seal_return_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_return", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // flags - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(n as i32), // data_len - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, code); + fn seal_return(n: Linear<0, { code::max_pages::() * 64 * 1024 - 4 }>) { + build_runtime!(runtime, memory: [n.to_le_bytes(), vec![42u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_seal_return(&mut runtime, &mut memory, 0, 0, n); } - assert_eq!(res.did_revert(), false); + + assert!(matches!( + result, + Err(crate::wasm::TrapReason::Return(crate::wasm::ReturnData { .. })) + )); } - // The same argument as for `seal_return` is true here. #[benchmark(pov_mode = Measured)] - fn seal_terminate(r: Linear<0, 1>) -> Result<(), BenchmarkError> { + fn seal_terminate( + n: Linear<0, { T::MaxDelegateDependencies::get() }>, + ) -> Result<(), BenchmarkError> { let beneficiary = account::("beneficiary", 0, 0); - let beneficiary_bytes = beneficiary.encode(); - let beneficiary_len = beneficiary_bytes.len(); let caller = whitelisted_caller(); + build_runtime!(runtime, memory: [beneficiary.encode(),]); + T::Currency::set_balance(&caller, caller_funding::()); - // Maximize the delegate_dependencies to account for the worst-case scenario. - let code_hashes = (0..T::MaxDelegateDependencies::get()) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - Contracts::::store_code_raw(new_code.code, caller.clone())?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ - ImportedFunction { - module: "seal0", - name: "seal_terminate", - params: vec![ValueType::I32, ValueType::I32], - return_type: None, - }, - ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ], - data_segments: vec![ - DataSegment { offset: 0, value: beneficiary_bytes }, - DataSegment { offset: beneficiary_len as u32, value: code_hashes_bytes }, - ], - deploy_body: Some(body::repeated_dyn( - T::MaxDelegateDependencies::get(), - vec![ - Counter(beneficiary_len as u32, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ], - )), - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // beneficiary_ptr - Instruction::I32Const(beneficiary_len as i32), // beneficiary_len - Instruction::Call(0), - ], - )), - ..Default::default() + (0..n).for_each(|i| { + let new_code = WasmModule::::dummy_with_bytes(65 + i); + Contracts::::store_code_raw(new_code.code, caller.clone()).unwrap(); + runtime.ext().lock_delegate_dependency(new_code.hash).unwrap(); }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!( - T::Currency::balance(&instance.account_id), - Pallet::::min_balance() * 2u32.into() - ); - assert_ne!( - T::Currency::balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &instance.account_id - ), - 0u32.into() - ); - assert_eq!( - ContractInfoOf::::get(&instance.account_id) - .unwrap() - .delegate_dependencies_count() as u32, - T::MaxDelegateDependencies::get() - ); - #[extrinsic_call] - call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]); - - if r > 0 { - assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!( - T::Currency::balance_on_hold( - &HoldReason::StorageDepositReserve.into(), - &instance.account_id - ), - 0u32.into() - ); - assert_eq!( - T::Currency::total_balance(&beneficiary), - Pallet::::min_balance() * 2u32.into() - ); + + let result; + #[block] + { + result = BenchEnv::seal1_terminate(&mut runtime, &mut memory, 0); } + + assert!(matches!(result, Err(crate::wasm::TrapReason::Termination))); + Ok(()) } @@ -1129,161 +918,77 @@ mod benchmarks { // number (< 1 KB). Therefore we are not overcharging too much in case a smaller subject is // used. #[benchmark(pov_mode = Measured)] - fn seal_random(r: Linear<0, API_BENCHMARK_RUNS>) { - let pages = code::max_pages::(); + fn seal_random() { let subject_len = T::Schedule::get().limits.subject_len; assert!(subject_len < 1024); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_random", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { - offset: 0, - value: (pages * 64 * 1024 - subject_len - 4).to_le_bytes().to_vec(), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(4), // subject_ptr - Instruction::I32Const(subject_len as i32), // subject_len - Instruction::I32Const((subject_len + 4) as i32), // out_ptr - Instruction::I32Const(0), // out_len_ptr - Instruction::Call(0), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let output_len = + <(SeedOf, BlockNumberFor) as MaxEncodedLen>::max_encoded_len() as u32; - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } + build_runtime!(runtime, memory: [ + output_len.to_le_bytes(), + vec![42u8; subject_len as _], + vec![0u8; output_len as _], + ]); - // Overhead of calling the function without any topic. - // We benchmark for the worst case (largest event). - #[benchmark(pov_mode = Measured)] - fn seal_deposit_event(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_deposit_event", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // topics_ptr - Instruction::I32Const(0), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(0), // data_len - Instruction::Call(0), - ], - )), - ..Default::default() - }); - - call_builder!(func, code); - - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_random( + &mut runtime, + &mut memory, + 4, // subject_ptr + subject_len, // subject_len + subject_len + 4, // output_ptr + 0, // output_len_ptr + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_ok!(<(SeedOf, BlockNumberFor)>::decode(&mut &memory[subject_len as _..])); } // Benchmark the overhead that topics generate. // `t`: Number of topics // `n`: Size of event payload in bytes #[benchmark(pov_mode = Measured)] - fn seal_deposit_event_per_topic_and_byte( + fn seal_deposit_event( t: Linear<0, { T::Schedule::get().limits.event_topics }>, n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) { let topics = (0..t).map(|i| T::Hashing::hash_of(&i)).collect::>().encode(); - let topics_len = topics.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_deposit_event", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: topics }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // topics_ptr - Instruction::I32Const(topics_len as i32), // topics_len - Instruction::I32Const(0), // data_ptr - Instruction::I32Const(n as i32), // data_len - Instruction::Call(0), - Instruction::End, - ])), - ..Default::default() - }); + let topics_len = topics.len() as u32; - call_builder!(func, code); + build_runtime!(runtime, memory: [ + n.to_le_bytes(), + topics, + vec![0u8; n as _], + ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_deposit_event( + &mut runtime, + &mut memory, + 4, // topics_ptr + topics_len, // topics_len + 4 + topics_len, // data_ptr + 0, // data_len + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); } - // Benchmark debug_message call with zero input data. + // Benchmark debug_message call // Whereas this function is used in RPC mode only, it still should be secured // against an excessive use. - #[benchmark(pov_mode = Measured)] - fn seal_debug_message(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory { min_pages: 1, max_pages: 1 }), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_debug_message", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated( - r, - &[ - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // value_len - Instruction::Call(0), - Instruction::Drop, - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.enable_debug_message(); - call_builder!(func, setup: setup); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - // Vary size of input in bytes up to maximum allowed contract memory - // or maximum allowed debug buffer size, whichever is less. + // + // i: size of input in bytes up to maximum allowed contract memory or maximum allowed debug + // buffer size, whichever is less. #[benchmark] - fn seal_debug_message_per_byte( + fn seal_debug_message( i: Linear< 0, { @@ -1291,1619 +996,586 @@ mod benchmarks { .min(T::MaxDebugBufferLen::get()) }, >, - ) -> Result<(), BenchmarkError> { - // We benchmark versus messages containing printable ASCII codes. - // About 1Kb goes to the contract code instructions, - // whereas all the space left we use for the initialization of the debug messages data. - let message = (0..T::MaxCodeLen::get() - 1024) - .zip((32..127).cycle()) - .map(|i| i.1) - .collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory { - min_pages: T::Schedule::get().limits.memory_pages, - max_pages: T::Schedule::get().limits.memory_pages, - }), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_debug_message", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: message }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(i as i32), // value_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); + ) { + let mut setup = CallSetup::::default(); setup.enable_debug_message(); - call_builder!(func, setup: setup); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + // Fill memory with printable ASCII bytes. + let mut memory = (0..i).zip((32..127).cycle()).map(|i| i.1).collect::>(); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_debug_message(&mut runtime, &mut memory, 0, i); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); assert_eq!(setup.debug_message().unwrap().len() as u32, i); - Ok(()) } - // Only the overhead of calling the function itself with minimal arguments. - // The contract is a bit more complex because it needs to use different keys in order - // to generate unique storage accesses. However, it is still dominated by the storage - // accesses. We store something at all the keys that we are about to write to - // because re-writing at an existing key is always more expensive than writing - // to an key with no data behind it. - // - // # Note - // - // We need to use a smaller `r` because the keys are big and writing them all into the wasm - // might exceed the code size. + // n: new byte size + // o: old byte size #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let keys_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: keys_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - } - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage_per_new_byte( + fn seal_set_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, + o: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(n as i32), // value_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + let value = vec![1u8; n as usize]; - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_set_storage_per_old_byte( - n: Linear<0, { T::Schedule::get().limits.payload_len }>, - ) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "set_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), /* value_len is 0 as testing vs - * pre-existing value len */ - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + build_runtime!(runtime, instance, memory: [ key.to_vec(), value.clone(), ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - // Similar to seal_set_storage. We store all the keys that we are about to - // delete beforehand in order to prevent any optimizations that could occur when - // deleting a non existing key. We generate keys of a maximum length, and have to - // the amount of runs in order to make resulting contract code size less than MaxCodeLen. - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_clear_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + info.write(&key, Some(vec![42u8; o as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal2_set_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + max_key_len, // value_ptr + n, // value_len + ); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert_eq!(info.read(&key).unwrap(), value); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_clear_storage_per_byte( + fn seal_clear_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "clear_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // We make sure that all storage accesses are to unique keys. - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_get_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key_bytes }, - DataSegment { - offset: key_bytes_len as u32, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + info.write(&key, Some(vec![42u8; n as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_clear_storage(&mut runtime, &mut memory, 0, max_key_len); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); + assert!(info.read(&key).is_none()); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_get_storage_per_byte( + fn seal_get_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "get_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key.clone() }, - DataSegment { - offset: max_key_len, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - let res; + info.write(&key, Some(vec![42u8; n as usize]), None, false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let out_ptr = max_key_len + 4; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_get_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert_eq!(&info.read(&key).unwrap(), &memory[out_ptr as usize..]); Ok(()) } - // We make sure that all storage accesses are to unique keys. #[benchmark(skip_meta, pov_mode = Measured)] fn seal_contains_storage( - r: Linear<0, { API_BENCHMARK_RUNS / 2 }>, + n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), ]); let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + + info.write(&key, Some(vec![42u8; n as usize]), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal1_contains_storage(&mut runtime, &mut memory, 0, max_key_len); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), n); Ok(()) } #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_contains_storage_per_byte( + fn seal_take_storage( n: Linear<0, { T::Schedule::get().limits.payload_len }>, ) -> Result<(), BenchmarkError> { let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "contains_storage", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: key.clone() }], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); + let key = Key::::try_from_var(vec![0u8; max_key_len as usize]) + .map_err(|_| "Key has wrong length")?; + build_runtime!(runtime, instance, memory: [ key.to_vec(), n.to_le_bytes(), vec![0u8; n as _], ]); let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } - - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_take_storage(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let keys = (0..r) - .map(|n| { - let mut h = T::Hashing::hash_of(&n).as_ref().to_vec(); - h.resize(max_key_len.try_into().unwrap(), n.to_le_bytes()[0]); - h - }) - .collect::>(); - let key_bytes = keys.iter().flatten().cloned().collect::>(); - let key_bytes_len = key_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key_bytes }, - DataSegment { - offset: key_bytes_len as u32, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, max_key_len as u32), // key_ptr - Regular(Instruction::I32Const(max_key_len as i32)), // key_len - Regular(Instruction::I32Const((key_bytes_len + 4) as i32)), // out_ptr - Regular(Instruction::I32Const(key_bytes_len as i32)), // out_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - for key in keys { - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![]), - None, - false, - ) + let value = vec![42u8; n as usize]; + info.write(&key, Some(value.clone()), None, false) .map_err(|_| "Failed to write to storage during setup.")?; - } - >::insert(&instance.account_id, info); - let res; + let out_ptr = max_key_len + 4; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_take_storage( + &mut runtime, + &mut memory, + 0, // key_ptr + max_key_len, // key_len + out_ptr, // out_ptr + max_key_len, // out_len_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - #[benchmark(skip_meta, pov_mode = Measured)] - fn seal_take_storage_per_byte( - n: Linear<0, { T::Schedule::get().limits.payload_len }>, - ) -> Result<(), BenchmarkError> { - let max_key_len = T::MaxStorageKeyLen::get(); - let key = vec![0u8; max_key_len as usize]; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "take_storage", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: key.clone() }, - DataSegment { - offset: max_key_len, - value: T::Schedule::get().limits.payload_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // key_ptr - Instruction::I32Const(max_key_len as i32), // key_len - Instruction::I32Const((max_key_len + 4) as i32), // out_ptr - Instruction::I32Const(max_key_len as i32), // out_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - call_builder!(func, instance, code); - let info = instance.info()?; - info.write( - &Key::::try_from_var(key).map_err(|_| "Key has wrong length")?, - Some(vec![42u8; n as usize]), - None, - false, - ) - .map_err(|_| "Failed to write to storage during setup.")?; - >::insert(&instance.account_id, info); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert!(&info.read(&key).is_none()); + assert_eq!(&value, &memory[out_ptr as usize..]); Ok(()) } // We transfer to unique accounts. #[benchmark(pov_mode = Measured)] - fn seal_transfer(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let accounts = - (0..r).map(|i| account::("receiver", i, 0)).collect::>(); - let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); - let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); + fn seal_transfer() { + let account = account::("receiver", 0, 0); let value = Pallet::::min_balance(); assert!(value > 0u32.into()); - let value_bytes = value.encode(); - let value_len = value_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_transfer", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len as u32, value: account_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(value_len as u32, account_len as u32), // account_ptr - Regular(Instruction::I32Const(account_len as i32)), // account_len - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(value_len as i32)), // value_len - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_balance(value * (r + 1).into()); - call_builder!(func, setup: setup); - for account in &accounts { - assert_eq!(T::Currency::total_balance(account), 0u32.into()); - } + let mut setup = CallSetup::::default(); + setup.set_balance(value); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - - for account in &accounts { - assert_eq!(T::Currency::total_balance(account), value); - } - Ok(()) - } - - // We call unique accounts. - // - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let dummy_code = WasmModule::::dummy_with_bytes(0); - let callees = (0..r) - .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) - .collect::, _>>()?; - let callee_len = callees.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); - let callee_bytes = callees.iter().flat_map(|x| x.account_id.encode()).collect(); - let value: BalanceOf = 0u32.into(); + let account_bytes = account.encode(); + let account_len = account_bytes.len() as u32; let value_bytes = value.encode(); - let value_len = BalanceOf::::max_encoded_len() as u32; - // Set an own limit every 2nd call - let own_limit = (u32::MAX - 100).into(); - let deposits = (0..r) - .map(|i| if i % 2 == 0 { 0u32.into() } else { own_limit }) - .collect::>>(); - let deposits_bytes: Vec = deposits.iter().flat_map(|i| i.encode()).collect(); - let deposits_len = deposits_bytes.len() as u32; - let deposit_len = value_len; - let callee_offset = value_len + deposits_len; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I64, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len, value: deposits_bytes }, - DataSegment { offset: callee_offset, value: callee_bytes }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(callee_offset, callee_len as u32), // callee_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Counter(value_len, deposit_len as u32), // deposit_limit_ptr - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_storage_deposit_limit(BalanceOf::::from(u32::MAX.into())); - call_builder!(func, setup: setup); + let value_len = value_bytes.len() as u32; + let mut memory = memory!(account_bytes, value_bytes,); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_transfer( + &mut runtime, + &mut memory, + 0, // account_ptr + account_len, + account_len, + value_len, + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_delegate_call(r: Linear<0, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let hashes = (0..r) - .map(|i| { - let code = WasmModule::::dummy_with_bytes(i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(code.code, caller)?; - Ok(code.hash) - }) - .collect::, &'static str>>()?; - let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_offset = 0; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_delegate_call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: hashes_offset as u32, value: hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Regular(Instruction::I32Const(0)), // flags - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(u32::max_value() as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) + assert_ok!(result); } + // t: with or without some value to transfer + // i: size of the input data #[benchmark(pov_mode = Measured)] - fn seal_call_per_transfer_clone_byte( - t: Linear<0, { 1 }>, - c: Linear<0, { code::max_pages::() * 64 * 1024 }>, - ) -> Result<(), BenchmarkError> { - let callee = Contract::with_index(5, >::dummy(), vec![])?; + fn seal_call(t: Linear<0, 1>, i: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + let Contract { account_id: callee, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + let callee_bytes = callee.encode(); + let callee_len = callee_bytes.len() as u32; + let value: BalanceOf = t.into(); let value_bytes = value.encode(); - let value_len = value_bytes.len(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "seal_call", - params: vec![ - ValueType::I32, - ValueType::I32, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: value_len as u32, value: callee.account_id.encode() }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(CallFlags::CLONE_INPUT.bits() as i32), // flags - Instruction::I32Const(value_len as i32), // callee_ptr - Instruction::I64Const(0), // gas - Instruction::I32Const(0), // value_ptr - Instruction::I32Const(0), // input_data_ptr - Instruction::I32Const(0), // input_data_len - Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_data(vec![42; c as usize]); - call_builder!(func, setup: setup); - let res; - #[block] - { - res = func.call(); + let deposit: BalanceOf = (u32::MAX - 100).into(); + let deposit_bytes = deposit.encode(); + let deposit_len = deposit_bytes.len() as u32; + + let mut setup = CallSetup::::default(); + setup.set_storage_deposit_limit(deposit); + setup.set_data(vec![42; i as usize]); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); + + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(callee_bytes, deposit_bytes, value_bytes,); + + let result; + #[block] + { + result = BenchEnv::seal2_call( + &mut runtime, + &mut memory, + CallFlags::CLONE_INPUT.bits(), // flags + 0, // callee_ptr + 0, // ref_time_limit + 0, // proof_size_limit + callee_len, // deposit_ptr + callee_len + deposit_len, // value_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr + 0, // output_len_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_ok!(result); } - // We assume that every instantiate sends at least the minimum balance. - // This is a slow call: we reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_instantiate(r: Linear<1, { API_BENCHMARK_RUNS / 2 }>) -> Result<(), BenchmarkError> { - let hashes = (0..r) - .map(|i| { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - call_body: Some(body::plain(vec![ - // We need to add this in order to make contracts unique, - // so that they can be deployed from the same sender. - Instruction::I32Const(i as i32), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(code.code, caller)?; - Ok(code.hash) - }) - .collect::, &'static str>>()?; - let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); - let hashes_len = &hashes_bytes.len(); - let value = Pallet::::min_balance(); - assert!(value > 0u32.into()); - let value_bytes = value.encode(); - let value_len = BalanceOf::::max_encoded_len(); - let addr_len = T::AccountId::max_encoded_len(); - // Offsets where to place static data in contract memory. - let hashes_offset = value_len; - let addr_len_offset = hashes_offset + hashes_len; - let addr_offset = addr_len_offset + addr_len; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal2", - name: "instantiate", - params: vec![ - ValueType::I32, - ValueType::I64, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: value_bytes }, - DataSegment { offset: hashes_offset as u32, value: hashes_bytes }, - DataSegment { - offset: addr_len_offset as u32, - value: addr_len.to_le_bytes().into(), - }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(hashes_offset as u32, hash_len as u32), // code_hash_ptr - Regular(Instruction::I64Const(0)), // ref_time weight - Regular(Instruction::I64Const(0)), // proof_size weight - Regular(Instruction::I32Const(SENTINEL as i32)), /* deposit limit ptr: use - * parent's limit */ - Regular(Instruction::I32Const(0)), // value_ptr - Regular(Instruction::I32Const(0)), // input_data_ptr - Regular(Instruction::I32Const(0)), // input_data_len - Regular(Instruction::I32Const(addr_offset as i32)), // address_ptr - Regular(Instruction::I32Const(addr_len_offset as i32)), // address_len_ptr - Regular(Instruction::I32Const(SENTINEL as i32)), // output_ptr - Regular(Instruction::I32Const(0)), // output_len_ptr - Regular(Instruction::I32Const(0)), // salt_ptr - Regular(Instruction::I32Const(0)), // salt_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); - setup.set_balance((value + Pallet::::min_balance()) * (r + 1).into()); - call_builder!(func, instance, setup: setup); - let addresses = hashes - .iter() - .map(|hash| Contracts::::contract_address(&instance.account_id, hash, &[], &[])) - .collect::>(); - - for addr in &addresses { - if ContractInfoOf::::get(&addr).is_some() { - return Err("Expected that contract does not exist at this point.".into()); - } - } + fn seal_delegate_call() -> Result<(), BenchmarkError> { + let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); + + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); + let mut memory = memory!(hash.encode(),); - let res; + let result; #[block] { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - for addr in &addresses { - ContractInfoOf::::get(&addr).ok_or("Contract should have been instantiated")?; + result = BenchEnv::seal0_delegate_call( + &mut runtime, + &mut memory, + 0, // flags + 0, // code_hash_ptr + 0, // input_data_ptr + 0, // input_data_len + SENTINEL, // output_ptr + 0, + ); } + + assert_ok!(result); Ok(()) } + // t: value to transfer + // i: size of input in bytes + // s: size of salt in bytes #[benchmark(pov_mode = Measured)] - fn seal_instantiate_per_transfer_input_salt_byte( + fn seal_instantiate( t: Linear<0, 1>, i: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, s: Linear<0, { (code::max_pages::() - 1) * 64 * 1024 }>, ) -> Result<(), BenchmarkError> { - let callee_code = WasmModule::::dummy(); - let hash_bytes = callee_code.hash.encode(); - let hash_len = hash_bytes.len(); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(callee_code.code, caller)?; + let hash = Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + let hash_bytes = hash.encode(); + let hash_len = hash_bytes.len() as u32; + let value: BalanceOf = t.into(); let value_bytes = value.encode(); + let value_len = value_bytes.len() as u32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal1", - name: "seal_instantiate", - params: vec![ - ValueType::I32, - ValueType::I64, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ValueType::I32, - ], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: hash_bytes }, - DataSegment { offset: hash_len as u32, value: value_bytes }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0 as i32), // code_hash_ptr - Instruction::I64Const(0), // gas - Instruction::I32Const(hash_len as i32), // value_ptr - Instruction::I32Const(0 as i32), // input_data_ptr - Instruction::I32Const(i as i32), // input_data_len - Instruction::I32Const(SENTINEL as i32), // address_ptr - Instruction::I32Const(0), // address_len_ptr - Instruction::I32Const(SENTINEL as i32), // output_ptr - Instruction::I32Const(0), // output_len_ptr - Instruction::I32Const(0 as i32), // salt_ptr - Instruction::I32Const(s as i32), // salt_len - Instruction::Call(0), - Instruction::I32Eqz, - Instruction::If(BlockType::NoResult), - Instruction::Nop, - Instruction::Else, - Instruction::Unreachable, - Instruction::End, - Instruction::End, - ])), - ..Default::default() - }); - let mut setup = CallSetup::::new(code); + let deposit: BalanceOf = 0u32.into(); + let deposit_bytes = deposit.encode(); + let deposit_len = deposit_bytes.len() as u32; + + let mut setup = CallSetup::::default(); + setup.set_origin(Origin::from_account_id(setup.contract().account_id.clone())); setup.set_balance(value + (Pallet::::min_balance() * 2u32.into())); - call_builder!(func, setup: setup); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) - } + let account_id = &setup.contract().account_id.clone(); + let (mut ext, _) = setup.ext(); + let mut runtime = crate::wasm::Runtime::new(&mut ext, vec![]); - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", r, 0)); + let input = vec![42u8; i as _]; + let salt = vec![42u8; s as _]; + let addr = Contracts::::contract_address(&account_id, &hash, &input, &salt); + let mut memory = memory!(hash_bytes, deposit_bytes, value_bytes, input, salt,); - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } - - // `n`: Input to hash in bytes - #[benchmark(pov_mode = Measured)] - fn seal_hash_sha2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_sha2_256", 1, n)); - - let res; - #[block] - { - res = func.call(); + let mut offset = { + let mut current = 0u32; + move |after: u32| { + current += after; + current + } + }; + + assert!(ContractInfoOf::::get(&addr).is_none()); + + let result; + #[block] + { + result = BenchEnv::seal2_instantiate( + &mut runtime, + &mut memory, + 0, // code_hash_ptr + 0, // ref_time_limit + 0, // proof_size_limit + offset(hash_len), // deposit_ptr + offset(deposit_len), // value_ptr + offset(value_len), // input_data_ptr + i, // input_data_len + SENTINEL, // address_ptr + 0, // address_len_ptr + SENTINEL, // output_ptr + 0, // output_len_ptr + offset(i), // salt_ptr + s, // salt_len + ); } - assert_eq!(res.did_revert(), false); - } - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", r, 0)); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); + assert_ok!(result); + assert!(ContractInfoOf::::get(&addr).is_some()); + Ok(()) } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_keccak_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_keccak_256", 1, n)); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - } - - // Only the overhead of calling the function itself with minimal arguments. - #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", r, 0)); + fn seal_hash_sha2_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_sha2_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::sha2_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_256_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_256", 1, n)); + fn seal_hash_keccak_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_keccak_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::keccak_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } - // Only the overhead of calling the function itself with minimal arguments. + // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128(r: Linear<0, API_BENCHMARK_RUNS>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", r, 0)); + fn seal_hash_blake2_256(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 32], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_blake2_256(&mut runtime, &mut memory, 32, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::blake2_256(&memory[32..]), &memory[0..32]); + assert_ok!(result); } // `n`: Input to hash in bytes #[benchmark(pov_mode = Measured)] - fn seal_hash_blake2_128_per_byte(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { - call_builder!(func, WasmModule::hasher("seal_hash_blake2_128", 1, n)); + fn seal_hash_blake2_128(n: Linear<0, { code::max_pages::() * 64 * 1024 }>) { + build_runtime!(runtime, memory: [[0u8; 16], vec![0u8; n as usize], ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_hash_blake2_128(&mut runtime, &mut memory, 16, n, 0); } - assert_eq!(res.did_revert(), false); + assert_eq!(sp_io::hashing::blake2_128(&memory[16..]), &memory[0..16]); + assert_ok!(result); } // `n`: Message input length to verify in bytes. // need some buffer so the code size does not exceed the max code size. #[benchmark(pov_mode = Measured)] - fn seal_sr25519_verify_per_byte( - n: Linear<0, { T::MaxCodeLen::get() - 255 }>, - ) -> Result<(), BenchmarkError> { + fn seal_sr25519_verify(n: Linear<0, { T::MaxCodeLen::get() - 255 }>) { let message = (0..n).zip((32u8..127u8).cycle()).map(|(_, c)| c).collect::>(); - let message_len = message.len() as i32; + let message_len = message.len() as u32; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_key = sp_io::crypto::sr25519_generate(key_type, None); let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message).expect("Generates signature"); let sig = AsRef::<[u8; 64]>::as_ref(&sig).to_vec(); + let sig_len = sig.len() as u32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "sr25519_verify", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: sig }, - DataSegment { offset: 64, value: pub_key.to_vec() }, - DataSegment { offset: 96, value: message }, - ], - call_body: Some(body::plain(vec![ - Instruction::I32Const(0), // signature_ptr - Instruction::I32Const(64), // pub_key_ptr - Instruction::I32Const(message_len), // message_len - Instruction::I32Const(96), // message_ptr - Instruction::Call(0), - Instruction::Drop, - Instruction::End, - ])), - ..Default::default() - }); - - call_builder!(func, code); + build_runtime!(runtime, memory: [sig, pub_key.to_vec(), message, ]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_sr25519_verify( + &mut runtime, + &mut memory, + 0, // signature_ptr + sig_len, // pub_key_ptr + message_len, // message_len + sig_len + pub_key.len() as u32, // message_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) - } - // Only calling the function itself with valid arguments. - // It generates different private keys and signatures for the message "Hello world". - // This is a slow call: We reduce the number of runs. - #[benchmark(pov_mode = Measured)] - fn seal_sr25519_verify( - r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, - ) -> Result<(), BenchmarkError> { - let message = b"Hello world".to_vec(); - let message_len = message.len() as i32; - let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let sig_params = (0..r) - .flat_map(|_| { - let pub_key = sp_io::crypto::sr25519_generate(key_type, None); - let sig = sp_io::crypto::sr25519_sign(key_type, &pub_key, &message) - .expect("Generates signature"); - let data: [u8; 96] = [AsRef::<[u8]>::as_ref(&sig), AsRef::<[u8]>::as_ref(&pub_key)] - .concat() - .try_into() - .unwrap(); - data - }) - .collect::>(); - let sig_params_len = sig_params.len() as i32; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "sr25519_verify", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: sig_params }, - DataSegment { offset: sig_params_len as u32, value: message }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, 96), // signature_ptr - Counter(64, 96), // pub_key_ptr - Regular(Instruction::I32Const(message_len)), // message_len - Regular(Instruction::I32Const(sig_params_len)), // message_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); - - let res; - #[block] - { - res = func.call(); - } - assert_eq!(res.did_revert(), false); - Ok(()) + assert_eq!(result.unwrap(), ReturnErrorCode::Success); } - // Only calling the function itself with valid arguments. - // It generates different private keys and signatures for the message "Hello world". - // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_ecdsa_recover(r: Linear<0, { API_BENCHMARK_RUNS / 10 }>) -> Result<(), BenchmarkError> { + fn seal_ecdsa_recover() { let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let signatures = (0..r) - .map(|_| { - let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); - let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash) - .expect("Generates signature"); - AsRef::<[u8; 65]>::as_ref(&sig).to_vec() - }) - .collect::>(); - let signatures = signatures.iter().flatten().cloned().collect::>(); - let signatures_bytes_len = signatures.len() as i32; - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_ecdsa_recover", - params: vec![ValueType::I32, ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![ - DataSegment { offset: 0, value: message_hash[..].to_vec() }, - DataSegment { offset: 32, value: signatures }, - ], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(32, 65), // signature_ptr - Regular(Instruction::I32Const(0)), // message_hash_ptr - Regular(Instruction::I32Const(signatures_bytes_len + 32)), // output_len_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let signature = { + let pub_key = sp_io::crypto::ecdsa_generate(key_type, None); + let sig = sp_io::crypto::ecdsa_sign_prehashed(key_type, &pub_key, &message_hash) + .expect("Generates signature"); + AsRef::<[u8; 65]>::as_ref(&sig).to_vec() + }; - let res; + build_runtime!(runtime, memory: [signature, message_hash, [0u8; 33], ]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_ecdsa_recover( + &mut runtime, + &mut memory, + 0, // signature_ptr + 65, // message_hash_ptr + 65 + 32, // output_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_eq!(result.unwrap(), ReturnErrorCode::Success); } // Only calling the function itself for the list of // generated different ECDSA keys. // This is a slow call: We reduce the number of runs. #[benchmark(pov_mode = Measured)] - fn seal_ecdsa_to_eth_address( - r: Linear<0, { API_BENCHMARK_RUNS / 10 }>, - ) -> Result<(), BenchmarkError> { + fn seal_ecdsa_to_eth_address() { let key_type = sp_core::crypto::KeyTypeId(*b"code"); - let pub_keys_bytes = (0..r) - .flat_map(|_| sp_io::crypto::ecdsa_generate(key_type, None).0) - .collect::>(); - let pub_keys_bytes_len = pub_keys_bytes.len() as i32; - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_ecdsa_to_eth_address", - params: vec![ValueType::I32, ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: pub_keys_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, 33), // pub_key_ptr - Regular(Instruction::I32Const(pub_keys_bytes_len)), // out_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + let pub_key_bytes = sp_io::crypto::ecdsa_generate(key_type, None).0; + build_runtime!(runtime, memory: [[0u8; 20], pub_key_bytes,]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_ecdsa_to_eth_address( + &mut runtime, + &mut memory, + 20, // key_ptr + 0, // output_ptr + ); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_ok!(result); + assert_eq!(&memory[..20], runtime.ext().ecdsa_to_eth_address(&pub_key_bytes).unwrap()); } #[benchmark(pov_mode = Measured)] - fn seal_set_code_hash(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "seal_set_code_hash", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn seal_set_code_hash() -> Result<(), BenchmarkError> { + let code_hash = + Contract::::with_index(1, WasmModule::dummy(), vec![])?.info()?.code_hash; + + build_runtime!(runtime, memory: [ code_hash.encode(),]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_set_code_hash(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark(pov_mode = Measured)] - fn lock_delegate_dependency( - r: Linear<0, { T::MaxDelegateDependencies::get() }>, - ) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn lock_delegate_dependency() -> Result<(), BenchmarkError> { + let code_hash = Contract::::with_index(1, WasmModule::dummy_with_bytes(1), vec![])? + .info()? + .code_hash; - let res; + build_runtime!(runtime, memory: [ code_hash.encode(),]); + + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_lock_delegate_dependency(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark] - fn unlock_delegate_dependency( - r: Linear<0, { T::MaxDelegateDependencies::get() }>, - ) -> Result<(), BenchmarkError> { - let code_hashes = (0..r) - .map(|i| { - let new_code = WasmModule::::dummy_with_bytes(65 + i); - let caller = whitelisted_caller(); - T::Currency::set_balance(&caller, caller_funding::()); - Contracts::::store_code_raw(new_code.code, caller)?; - Ok(new_code.hash) - }) - .collect::, &'static str>>()?; + fn unlock_delegate_dependency() -> Result<(), BenchmarkError> { + let code_hash = Contract::::with_index(1, WasmModule::dummy_with_bytes(1), vec![])? + .info()? + .code_hash; - let code_hash_len = code_hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); - let code_hashes_bytes = code_hashes.iter().flat_map(|x| x.encode()).collect::>(); - - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ - ImportedFunction { - module: "seal0", - name: "unlock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ImportedFunction { - module: "seal0", - name: "lock_delegate_dependency", - params: vec![ValueType::I32], - return_type: None, - }, - ], - data_segments: vec![DataSegment { offset: 0, value: code_hashes_bytes }], - deploy_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(1)), - ], - )), - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, code_hash_len as u32), // code_hash_ptr - Regular(Instruction::Call(0)), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + build_runtime!(runtime, memory: [ code_hash.encode(),]); + BenchEnv::seal0_lock_delegate_dependency(&mut runtime, &mut memory, 0).unwrap(); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_unlock_delegate_dependency(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); + + assert_ok!(result); Ok(()) } #[benchmark(pov_mode = Measured)] - fn seal_reentrance_count(r: Linear<0, API_BENCHMARK_RUNS>) -> Result<(), BenchmarkError> { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "reentrance_count", - params: vec![], - return_type: Some(ValueType::I32), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - let instance = Contract::::new(code, vec![])?; - let origin = RawOrigin::Signed(instance.caller.clone()); - #[extrinsic_call] - call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]); - Ok(()) + fn seal_reentrance_count() { + build_runtime!(runtime, memory: []); + let result; + #[block] + { + result = BenchEnv::seal0_reentrance_count(&mut runtime, &mut memory) + } + + assert_eq!(result.unwrap(), 0); } #[benchmark(pov_mode = Measured)] - fn seal_account_reentrance_count( - r: Linear<0, API_BENCHMARK_RUNS>, - ) -> Result<(), BenchmarkError> { - let dummy_code = WasmModule::::dummy_with_bytes(0); - let accounts = (0..r) - .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![])) - .collect::, _>>()?; - let account_id_len = accounts.get(0).map(|i| i.account_id.encode().len()).unwrap_or(0); - let account_id_bytes = accounts.iter().flat_map(|x| x.account_id.encode()).collect(); - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "account_reentrance_count", - params: vec![ValueType::I32], - return_type: Some(ValueType::I32), - }], - data_segments: vec![DataSegment { offset: 0, value: account_id_bytes }], - call_body: Some(body::repeated_dyn( - r, - vec![ - Counter(0, account_id_len as u32), // account_ptr - Regular(Instruction::Call(0)), - Regular(Instruction::Drop), - ], - )), - ..Default::default() - }); - call_builder!(func, code); + fn seal_account_reentrance_count() { + let Contract { account_id, .. } = + Contract::::with_index(1, WasmModule::dummy(), vec![]).unwrap(); + build_runtime!(runtime, memory: [account_id.encode(),]); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_account_reentrance_count(&mut runtime, &mut memory, 0); } - assert_eq!(res.did_revert(), false); - Ok(()) + + assert_eq!(result.unwrap(), 0); } #[benchmark(pov_mode = Measured)] - fn seal_instantiation_nonce(r: Linear<0, API_BENCHMARK_RUNS>) { - let code = WasmModule::::from(ModuleDefinition { - memory: Some(ImportedMemory::max::()), - imported_functions: vec![ImportedFunction { - module: "seal0", - name: "instantiation_nonce", - params: vec![], - return_type: Some(ValueType::I64), - }], - call_body: Some(body::repeated(r, &[Instruction::Call(0), Instruction::Drop])), - ..Default::default() - }); - call_builder!(func, code); + fn seal_instantiation_nonce() { + build_runtime!(runtime, memory: []); - let res; + let result; #[block] { - res = func.call(); + result = BenchEnv::seal0_instantiation_nonce(&mut runtime, &mut memory); } - assert_eq!(res.did_revert(), false); + + assert_eq!(result.unwrap(), 1); } // We load `i64` values from random linear memory locations and store the loaded diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 31cdadb4bb4373baee420b48cecfdd2b82862c42..992f7aaace312b69d21f40ff754124912eeba290 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -46,7 +46,7 @@ use sp_core::{ }; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; use sp_runtime::{ - traits::{Convert, Dispatchable, Hash, Zero}, + traits::{Convert, Dispatchable, Zero}, DispatchError, }; use sp_std::{fmt::Debug, marker::PhantomData, mem, prelude::*, vec::Vec}; @@ -303,7 +303,7 @@ pub trait Ext: sealing::Sealed { fn ecdsa_to_eth_address(&self, pk: &[u8; 33]) -> Result<[u8; 20], ()>; /// Tests sometimes need to modify and inspect the contract info directly. - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] fn contract_info(&mut self) -> &mut ContractInfo; /// Sets new code hash for existing contract. @@ -365,6 +365,11 @@ pub trait Ext: sealing::Sealed { &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError>; + + /// Returns the number of locked delegate dependencies. + /// + /// Note: Requires &mut self to access the contract info. + fn locked_delegate_dependencies_count(&mut self) -> usize; } /// Describes the different functions that can be exported by an [`Executable`]. @@ -983,16 +988,16 @@ where let caller = self.caller().account_id()?.clone(); // Deposit an instantiation event. - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&caller), T::Hashing::hash_of(account_id)], - Event::Instantiated { deployer: caller, contract: account_id.clone() }, - ); + Contracts::::deposit_event(Event::Instantiated { + deployer: caller, + contract: account_id.clone(), + }); }, (ExportedFunction::Call, Some(code_hash)) => { - Contracts::::deposit_event( - vec![T::Hashing::hash_of(account_id), T::Hashing::hash_of(&code_hash)], - Event::DelegateCalled { contract: account_id.clone(), code_hash }, - ); + Contracts::::deposit_event(Event::DelegateCalled { + contract: account_id.clone(), + code_hash, + }); }, (ExportedFunction::Call, None) => { // If a special limit was set for the sub-call, we enforce it here. @@ -1002,10 +1007,10 @@ where frame.nested_storage.enforce_subcall_limit(contract)?; let caller = self.caller(); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&caller), T::Hashing::hash_of(&account_id)], - Event::Called { caller: caller.clone(), contract: account_id.clone() }, - ); + Contracts::::deposit_event(Event::Called { + caller: caller.clone(), + contract: account_id.clone(), + }); }, } @@ -1324,13 +1329,10 @@ where .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(*deposit)); } - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), T::Hashing::hash_of(&beneficiary)], - Event::Terminated { - contract: frame.account_id.clone(), - beneficiary: beneficiary.clone(), - }, - ); + Contracts::::deposit_event(Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }); Ok(()) } @@ -1422,7 +1424,7 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - Contracts::::deposit_event( + Contracts::::deposit_indexed_event( topics, Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); @@ -1500,7 +1502,7 @@ where ECDSAPublic::from(*pk).to_eth_address() } - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] fn contract_info(&mut self) -> &mut ContractInfo { self.top_frame_mut().contract_info() } @@ -1527,14 +1529,11 @@ where Self::increment_refcount(hash)?; Self::decrement_refcount(prev_hash); - Contracts::::deposit_event( - vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], - Event::ContractCodeUpdated { - contract: frame.account_id.clone(), - new_code_hash: hash, - old_code_hash: prev_hash, - }, - ); + Contracts::::deposit_event(Event::ContractCodeUpdated { + contract: frame.account_id.clone(), + new_code_hash: hash, + old_code_hash: prev_hash, + }); Ok(()) } @@ -1611,6 +1610,10 @@ where .charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(deposit)); Ok(()) } + + fn locked_delegate_dependencies_count(&mut self) -> usize { + self.top_frame_mut().contract_info().delegate_dependencies_count() + } } mod sealing { @@ -1639,7 +1642,7 @@ mod tests { exec::ExportedFunction::*, gas::GasMeter, tests::{ - test_utils::{get_balance, hash, place_contract, set_balance}, + test_utils::{get_balance, place_contract, set_balance}, ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter, ALICE, BOB, CHARLIE, GAS_LIMIT, }, @@ -3164,7 +3167,7 @@ mod tests { caller: Origin::from_account_id(ALICE), contract: BOB, }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&BOB)], + topics: vec![], }, ] ); @@ -3264,7 +3267,7 @@ mod tests { caller: Origin::from_account_id(ALICE), contract: BOB, }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&BOB)], + topics: vec![], }, ] ); diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 3e87eb9f37ea7fc023a76d8ac7dc048f7bed9f7d..6fab1a44ecb9c499f9d627810b75523041fdd096 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -135,7 +135,7 @@ use frame_system::{ use scale_info::TypeInfo; use smallvec::Array; use sp_runtime::{ - traits::{Convert, Dispatchable, Hash, Saturating, StaticLookup, Zero}, + traits::{Convert, Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; use sp_std::{fmt::Debug, prelude::*}; @@ -146,7 +146,7 @@ pub use crate::{ exec::Frame, migration::{MigrateSequence, Migration, NoopMigration}, pallet::*, - schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, + schedule::{InstructionWeights, Limits, Schedule}, wasm::Determinism, }; pub use weights::WeightInfo; @@ -833,14 +833,11 @@ pub mod pallet { }; >>::increment_refcount(code_hash)?; >>::decrement_refcount(contract.code_hash); - Self::deposit_event( - vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], - Event::ContractCodeUpdated { - contract: dest.clone(), - new_code_hash: code_hash, - old_code_hash: contract.code_hash, - }, - ); + Self::deposit_event(Event::ContractCodeUpdated { + contract: dest.clone(), + new_code_hash: code_hash, + old_code_hash: contract.code_hash, + }); contract.code_hash = code_hash; Ok(()) }) @@ -1827,8 +1824,13 @@ impl Pallet { Ok(()) } - /// Deposit a pallet contracts event. Handles the conversion to the overarching event type. - fn deposit_event(topics: Vec, event: Event) { + /// Deposit a pallet contracts event. + fn deposit_event(event: Event) { + >::deposit_event(::RuntimeEvent::from(event)) + } + + /// Deposit a pallet contracts indexed event. + fn deposit_indexed_event(topics: Vec, event: Event) { >::deposit_event_indexed( &topics, ::RuntimeEvent::from(event).into(), diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index 06a7c2005aa5e46e814d6eff192775ca63881fae..a1fbdea4228bfe3238c6458db4f5d5425563fc56 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -22,7 +22,7 @@ use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{weights::Weight, DefaultNoBound}; +use frame_support::DefaultNoBound; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -60,9 +60,6 @@ pub struct Schedule { /// The weights for individual wasm instructions. pub instruction_weights: InstructionWeights, - - /// The weights for each imported function a contract is allowed to call. - pub host_fn_weights: HostFnWeights, } /// Describes the upper limits on various metrics. @@ -109,230 +106,6 @@ pub struct InstructionWeights { pub _phantom: PhantomData, } -/// Describes the weight for each imported function that a contract is allowed to call. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "runtime-benchmarks", derive(pallet_contracts_proc_macro::WeightDebug))] -#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct HostFnWeights { - /// Weight of calling `seal_caller`. - pub caller: Weight, - - /// Weight of calling `seal_is_contract`. - pub is_contract: Weight, - - /// Weight of calling `seal_code_hash`. - pub code_hash: Weight, - - /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: Weight, - - /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: Weight, - - /// Weight of calling `seal_caller_is_root`. - pub caller_is_root: Weight, - - /// Weight of calling `seal_address`. - pub address: Weight, - - /// Weight of calling `seal_gas_left`. - pub gas_left: Weight, - - /// Weight of calling `seal_balance`. - pub balance: Weight, - - /// Weight of calling `seal_value_transferred`. - pub value_transferred: Weight, - - /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: Weight, - - /// Weight of calling `seal_block_number`. - pub block_number: Weight, - - /// Weight of calling `seal_now`. - pub now: Weight, - - /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: Weight, - - /// Weight of calling `seal_input`. - pub input: Weight, - - /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: Weight, - - /// Weight of calling `seal_return`. - pub r#return: Weight, - - /// Weight per byte returned through `seal_return`. - pub return_per_byte: Weight, - - /// Weight of calling `seal_terminate`. - pub terminate: Weight, - - /// Weight of calling `seal_random`. - pub random: Weight, - - /// Weight of calling `seal_reposit_event`. - pub deposit_event: Weight, - - /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: Weight, - - /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: Weight, - - /// Weight of calling `seal_debug_message`. - pub debug_message: Weight, - - /// Weight of calling `seal_debug_message` per byte of the message. - pub debug_message_per_byte: Weight, - - /// Weight of calling `seal_set_storage`. - pub set_storage: Weight, - - /// Weight per written byte of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: Weight, - - /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: Weight, - - /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: Weight, - - /// Weight of calling `seal_clear_storage`. - pub clear_storage: Weight, - - /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: Weight, - - /// Weight of calling `seal_contains_storage`. - pub contains_storage: Weight, - - /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: Weight, - - /// Weight of calling `seal_get_storage`. - pub get_storage: Weight, - - /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: Weight, - - /// Weight of calling `seal_take_storage`. - pub take_storage: Weight, - - /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: Weight, - - /// Weight of calling `seal_transfer`. - pub transfer: Weight, - - /// Weight of calling `seal_call`. - pub call: Weight, - - /// Weight of calling `seal_delegate_call`. - pub delegate_call: Weight, - - /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: Weight, - - /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: Weight, - - /// Weight of calling `seal_instantiate`. - pub instantiate: Weight, - - /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: Weight, - - /// Weight per input byte supplied to `seal_instantiate`. - pub instantiate_per_input_byte: Weight, - - /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: Weight, - - /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: Weight, - - /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: Weight, - - /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: Weight, - - /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: Weight, - - /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: Weight, - - /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: Weight, - - /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: Weight, - - /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: Weight, - - /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: Weight, - - /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: Weight, - - /// Weight of calling `sr25519_verify`. - pub sr25519_verify: Weight, - - /// Weight per byte of calling `sr25519_verify`. - pub sr25519_verify_per_byte: Weight, - - /// Weight of calling `reentrance_count`. - pub reentrance_count: Weight, - - /// Weight of calling `account_reentrance_count`. - pub account_reentrance_count: Weight, - - /// Weight of calling `instantiation_nonce`. - pub instantiation_nonce: Weight, - - /// Weight of calling `lock_delegate_dependency`. - pub lock_delegate_dependency: Weight, - - /// Weight of calling `unlock_delegate_dependency`. - pub unlock_delegate_dependency: Weight, - - /// The type parameter is used in the default implementation. - #[codec(skip)] - pub _phantom: PhantomData, -} - -macro_rules! replace_token { - ($_in:tt $replacement:tt) => { - $replacement - }; -} - -macro_rules! call_zero { - ($name:ident, $( $arg:expr ),*) => { - T::WeightInfo::$name($( replace_token!($arg 0) ),*) - }; -} - -macro_rules! cost_args { - ($name:ident, $( $arg: expr ),+) => { - (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))) - } -} - -macro_rules! cost { - ($name:ident) => { - cost_args!($name, 1) - }; -} - impl Default for Limits { fn default() -> Self { Self { @@ -350,94 +123,10 @@ impl Default for InstructionWeights { /// computed gas costs by 6 to have a rough estimate as to how expensive each /// single executed instruction is going to be. fn default() -> Self { - let instr_cost = cost!(instr_i64_load_store).ref_time() as u32; + let instr_cost = T::WeightInfo::instr_i64_load_store(1) + .saturating_sub(T::WeightInfo::instr_i64_load_store(0)) + .ref_time() as u32; let base = instr_cost / 6; Self { base, _phantom: PhantomData } } } - -impl Default for HostFnWeights { - fn default() -> Self { - Self { - caller: cost!(seal_caller), - is_contract: cost!(seal_is_contract), - code_hash: cost!(seal_code_hash), - own_code_hash: cost!(seal_own_code_hash), - caller_is_origin: cost!(seal_caller_is_origin), - caller_is_root: cost!(seal_caller_is_root), - address: cost!(seal_address), - gas_left: cost!(seal_gas_left), - balance: cost!(seal_balance), - value_transferred: cost!(seal_value_transferred), - minimum_balance: cost!(seal_minimum_balance), - block_number: cost!(seal_block_number), - now: cost!(seal_now), - weight_to_fee: cost!(seal_weight_to_fee), - input: cost!(seal_input), - input_per_byte: cost!(seal_input_per_byte), - r#return: cost!(seal_return), - return_per_byte: cost!(seal_return_per_byte), - terminate: cost!(seal_terminate), - random: cost!(seal_random), - deposit_event: cost!(seal_deposit_event), - deposit_event_per_topic: cost_args!(seal_deposit_event_per_topic_and_byte, 1, 0), - deposit_event_per_byte: cost_args!(seal_deposit_event_per_topic_and_byte, 0, 1), - debug_message: cost!(seal_debug_message), - debug_message_per_byte: cost!(seal_debug_message_per_byte), - set_storage: cost!(seal_set_storage), - set_code_hash: cost!(seal_set_code_hash), - set_storage_per_new_byte: cost!(seal_set_storage_per_new_byte), - set_storage_per_old_byte: cost!(seal_set_storage_per_old_byte), - clear_storage: cost!(seal_clear_storage), - clear_storage_per_byte: cost!(seal_clear_storage_per_byte), - contains_storage: cost!(seal_contains_storage), - contains_storage_per_byte: cost!(seal_contains_storage_per_byte), - get_storage: cost!(seal_get_storage), - get_storage_per_byte: cost!(seal_get_storage_per_byte), - take_storage: cost!(seal_take_storage), - take_storage_per_byte: cost!(seal_take_storage_per_byte), - transfer: cost!(seal_transfer), - call: cost!(seal_call), - delegate_call: cost!(seal_delegate_call), - call_transfer_surcharge: cost_args!(seal_call_per_transfer_clone_byte, 1, 0), - call_per_cloned_byte: cost_args!(seal_call_per_transfer_clone_byte, 0, 1), - instantiate: cost!(seal_instantiate), - instantiate_transfer_surcharge: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 1, - 0, - 0 - ), - instantiate_per_input_byte: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 0, - 1, - 0 - ), - instantiate_per_salt_byte: cost_args!( - seal_instantiate_per_transfer_input_salt_byte, - 0, - 0, - 1 - ), - hash_sha2_256: cost!(seal_hash_sha2_256), - hash_sha2_256_per_byte: cost!(seal_hash_sha2_256_per_byte), - hash_keccak_256: cost!(seal_hash_keccak_256), - hash_keccak_256_per_byte: cost!(seal_hash_keccak_256_per_byte), - hash_blake2_256: cost!(seal_hash_blake2_256), - hash_blake2_256_per_byte: cost!(seal_hash_blake2_256_per_byte), - hash_blake2_128: cost!(seal_hash_blake2_128), - hash_blake2_128_per_byte: cost!(seal_hash_blake2_128_per_byte), - ecdsa_recover: cost!(seal_ecdsa_recover), - sr25519_verify: cost!(seal_sr25519_verify), - sr25519_verify_per_byte: cost!(seal_sr25519_verify_per_byte), - ecdsa_to_eth_address: cost!(seal_ecdsa_to_eth_address), - reentrance_count: cost!(seal_reentrance_count), - account_reentrance_count: cost!(seal_account_reentrance_count), - instantiation_nonce: cost!(seal_instantiation_nonce), - lock_delegate_dependency: cost!(lock_delegate_dependency), - unlock_delegate_dependency: cost!(unlock_delegate_dependency), - _phantom: PhantomData, - } - } -} diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 5db9a772ad82155348383153de04ef31bdcfe4cf..7c55ce5d3f0c4acda7af2d35741a7b155933b11f 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -34,10 +34,10 @@ use frame_support::{ DefaultNoBound, RuntimeDebugNoBound, }; use sp_runtime::{ - traits::{Hash as HashT, Saturating, Zero}, + traits::{Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, }; -use sp_std::{fmt::Debug, marker::PhantomData, vec, vec::Vec}; +use sp_std::{fmt::Debug, marker::PhantomData, vec::Vec}; /// Deposit that uses the native fungible's balance type. pub type DepositOf = Deposit>; @@ -551,14 +551,11 @@ impl Ext for ReservingExt { Fortitude::Polite, )?; - Pallet::::deposit_event( - vec![T::Hashing::hash_of(&origin), T::Hashing::hash_of(&contract)], - Event::StorageDepositTransferredAndHeld { - from: origin.clone(), - to: contract.clone(), - amount: *amount, - }, - ); + Pallet::::deposit_event(Event::StorageDepositTransferredAndHeld { + from: origin.clone(), + to: contract.clone(), + amount: *amount, + }); }, Deposit::Refund(amount) => { let transferred = T::Currency::transfer_on_hold( @@ -571,14 +568,11 @@ impl Ext for ReservingExt { Fortitude::Polite, )?; - Pallet::::deposit_event( - vec![T::Hashing::hash_of(&contract), T::Hashing::hash_of(&origin)], - Event::StorageDepositTransferredAndReleased { - from: contract.clone(), - to: origin.clone(), - amount: transferred, - }, - ); + Pallet::::deposit_event(Event::StorageDepositTransferredAndReleased { + from: contract.clone(), + to: origin.clone(), + amount: transferred, + }); if transferred < *amount { // This should never happen, if it does it means that there is a bug in the diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 8fe845fcf0f83e8658e7638ea2536bee2794d73f..899b0144b072e56265f690881f8377678b102bc4 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -20,13 +20,13 @@ mod test_debug; use self::{ test_debug::TestDebug, - test_utils::{ensure_stored, expected_deposit, hash}, + test_utils::{ensure_stored, expected_deposit}, }; use crate::{ self as pallet_contracts, chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, - Result as ExtensionResult, RetVal, ReturnFlags, SysConfig, + Result as ExtensionResult, RetVal, ReturnFlags, }, exec::{Frame, Key}, migration::codegen::LATEST_MIGRATION_VERSION, @@ -63,7 +63,7 @@ use sp_io::hashing::blake2_256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, + traits::{BlakeTwo256, Convert, IdentityLookup}, AccountId32, BuildStorage, DispatchError, Perbill, TokenError, }; @@ -97,7 +97,7 @@ macro_rules! assert_refcount { } pub mod test_utils { - use super::{Contracts, DepositPerByte, DepositPerItem, Hash, SysConfig, Test}; + use super::{Contracts, DepositPerByte, DepositPerItem, Test}; use crate::{ exec::AccountIdOf, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, Nonce, PristineCode, @@ -145,9 +145,6 @@ pub mod test_utils { .saturating_mul(info_size) .saturating_add(DepositPerItem::get()) } - pub fn hash(s: &S) -> <::Hashing as Hash>::Output { - <::Hashing as Hash>::hash_of(s) - } pub fn expected_deposit(code_len: usize) -> u64 { // For code_info, the deposit for max_encoded_len is taken. let code_info_len = CodeInfo::::max_encoded_len() as u64; @@ -768,7 +765,7 @@ fn instantiate_and_call_and_deposit_event() { deployer: ALICE, contract: addr.clone() }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -779,7 +776,7 @@ fn instantiate_and_call_and_deposit_event() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -874,8 +871,7 @@ fn gas_syncs_work() { let result = builder::bare_call(addr.clone()).data(1u32.encode()).build(); assert_ok!(result.result); let gas_consumed_once = result.gas_consumed.ref_time(); - let host_consumed_once = - ::Schedule::get().host_fn_weights.caller_is_origin.ref_time(); + let host_consumed_once = ::WeightInfo::seal_caller_is_origin().ref_time(); let engine_consumed_once = gas_consumed_once - host_consumed_once - engine_consumed_noop; let result = builder::bare_call(addr).data(2u32.encode()).build(); @@ -1039,7 +1035,7 @@ fn deploy_and_call_other_contract() { deployer: caller_addr.clone(), contract: callee_addr.clone(), }), - topics: vec![hash(&caller_addr), hash(&callee_addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1056,10 +1052,7 @@ fn deploy_and_call_other_contract() { caller: Origin::from_account_id(caller_addr.clone()), contract: callee_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(caller_addr.clone())), - hash(&callee_addr) - ], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1067,7 +1060,7 @@ fn deploy_and_call_other_contract() { caller: Origin::from_account_id(ALICE), contract: caller_addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&caller_addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1078,7 +1071,7 @@ fn deploy_and_call_other_contract() { amount: test_utils::contract_info_storage_deposit(&callee_addr), } ), - topics: vec![hash(&ALICE), hash(&callee_addr)], + topics: vec![], }, ] ); @@ -1304,7 +1297,7 @@ fn self_destruct_works() { contract: addr.clone(), beneficiary: DJANGO }), - topics: vec![hash(&addr), hash(&DJANGO)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1312,7 +1305,7 @@ fn self_destruct_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -1323,7 +1316,7 @@ fn self_destruct_works() { amount: info_deposit, } ), - topics: vec![hash(&addr), hash(&ALICE)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2511,7 +2504,7 @@ fn upload_code_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], },] ); }); @@ -2599,7 +2592,7 @@ fn remove_code_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2608,7 +2601,7 @@ fn remove_code_works() { deposit_released: deposit_expected, remover: ALICE }), - topics: vec![code_hash], + topics: vec![], }, ] ); @@ -2648,7 +2641,7 @@ fn remove_code_wrong_origin() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], },] ); }); @@ -2727,7 +2720,7 @@ fn instantiate_with_zero_balance_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2759,7 +2752,7 @@ fn instantiate_with_zero_balance_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2770,7 +2763,7 @@ fn instantiate_with_zero_balance_works() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -2812,7 +2805,7 @@ fn instantiate_with_below_existential_deposit_works() { deposit_held: deposit_expected, uploader: ALICE }), - topics: vec![code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2853,7 +2846,7 @@ fn instantiate_with_below_existential_deposit_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2864,7 +2857,7 @@ fn instantiate_with_below_existential_deposit_works() { amount: test_utils::contract_info_storage_deposit(&addr), } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, ] ); @@ -2925,7 +2918,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2936,7 +2929,7 @@ fn storage_deposit_works() { amount: charged0, } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2944,7 +2937,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2955,7 +2948,7 @@ fn storage_deposit_works() { amount: charged1, } ), - topics: vec![hash(&ALICE), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2963,7 +2956,7 @@ fn storage_deposit_works() { caller: Origin::from_account_id(ALICE), contract: addr.clone(), }), - topics: vec![hash(&Origin::::from_account_id(ALICE)), hash(&addr)], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -2974,7 +2967,7 @@ fn storage_deposit_works() { amount: refunded0, } ), - topics: vec![hash(&addr.clone()), hash(&ALICE)], + topics: vec![], }, ] ); @@ -3078,7 +3071,7 @@ fn set_code_extrinsic() { new_code_hash, old_code_hash: code_hash, }), - topics: vec![hash(&addr), new_code_hash, code_hash], + topics: vec![], },] ); }); @@ -3230,7 +3223,7 @@ fn set_code_hash() { new_code_hash, old_code_hash: code_hash, }), - topics: vec![hash(&contract_addr), new_code_hash, code_hash], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -3238,10 +3231,7 @@ fn set_code_hash() { caller: Origin::from_account_id(ALICE), contract: contract_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(ALICE)), - hash(&contract_addr) - ], + topics: vec![], }, EventRecord { phase: Phase::Initialization, @@ -3249,10 +3239,7 @@ fn set_code_hash() { caller: Origin::from_account_id(ALICE), contract: contract_addr.clone(), }), - topics: vec![ - hash(&Origin::::from_account_id(ALICE)), - hash(&contract_addr) - ], + topics: vec![], }, ], ); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 8d7f928dba33d3cb3f221820d2b825e32997db75..e5497b143b8b44073493db391d60bc670ade45a5 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -31,6 +31,9 @@ pub use { tests::MockExt, }; +#[cfg(feature = "runtime-benchmarks")] +pub use crate::wasm::runtime::{BenchEnv, ReturnData, TrapReason}; + pub use crate::wasm::{ prepare::{LoadedModule, LoadingMode}, runtime::{ @@ -184,10 +187,11 @@ impl WasmBlob { *existing = None; >::remove(&code_hash); - >::deposit_event( - vec![code_hash], - Event::CodeRemoved { code_hash, deposit_released, remover }, - ); + >::deposit_event(Event::CodeRemoved { + code_hash, + deposit_released, + remover, + }); Ok(()) } else { Err(>::CodeNotFound.into()) @@ -271,14 +275,11 @@ impl WasmBlob { self.code_info.refcount = 0; >::insert(code_hash, &self.code); *stored_code_info = Some(self.code_info.clone()); - >::deposit_event( - vec![code_hash], - Event::CodeStored { - code_hash, - deposit_held: deposit, - uploader: self.code_info.owner.clone(), - }, - ); + >::deposit_event(Event::CodeStored { + code_hash, + deposit_held: deposit, + uploader: self.code_info.owner.clone(), + }); Ok(deposit) }, } @@ -804,6 +805,9 @@ mod tests { self.delegate_dependencies.borrow_mut().remove(code); Ok(()) } + fn locked_delegate_dependencies_count(&mut self) -> usize { + self.delegate_dependencies.borrow().len() + } } /// Execute the supplied code. diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 3212aff31269a91d66ce0043f60c8130b2d4bc9a..39b15c867c6ab4d8c0287535974565c92f1b0b92 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -21,6 +21,7 @@ use crate::{ exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, primitives::ExecReturnValue, + weights::WeightInfo, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; @@ -145,6 +146,8 @@ impl HostError for TrapReason {} #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeCosts { + /// Base Weight of calling a host function. + HostFn, /// Weight charged for copying data from the sandbox. CopyFromContract(u32), /// Weight charged for copying data to the sandbox. @@ -177,12 +180,8 @@ pub enum RuntimeCosts { Now, /// Weight of calling `seal_weight_to_fee`. WeightToFee, - /// Weight of calling `seal_input` without the weight of copying the input. - InputBase, - /// Weight of calling `seal_return` for the given output size. - Return(u32), - /// Weight of calling `seal_terminate`. - Terminate, + /// Weight of calling `seal_terminate`, passing the number of locked dependencies. + Terminate(u32), /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, /// Weight of calling `seal_deposit_event` with the given number of topics and event size. @@ -206,13 +205,13 @@ pub enum RuntimeCosts { /// Weight of calling `seal_delegate_call` for the given input size. DelegateCallBase, /// Weight of the transfer performed during a call. - CallSurchargeTransfer, + CallTransferSurcharge, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. CallInputCloned(u32), /// Weight of calling `seal_instantiate` for the given input length and salt. InstantiateBase { input_data_len: u32, salt_len: u32 }, /// Weight of the transfer performed during an instantiate. - InstantiateSurchargeTransfer, + InstantiateTransferSurcharge, /// Weight of calling `seal_hash_sha_256` for the given input size. HashSha256(u32), /// Weight of calling `seal_hash_keccak_256` for the given input size. @@ -236,9 +235,9 @@ pub enum RuntimeCosts { /// Weight of calling `ecdsa_to_eth_address` EcdsaToEthAddress, /// Weight of calling `reentrance_count` - ReentrantCount, + ReentranceCount, /// Weight of calling `account_reentrance_count` - AccountEntranceCount, + AccountReentranceCount, /// Weight of calling `instantiation_nonce` InstantiationNonce, /// Weight of calling `lock_delegate_dependency` @@ -247,6 +246,19 @@ pub enum RuntimeCosts { UnlockDelegateDependency, } +macro_rules! cost_args { + // cost_args!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_sub(T::WeightInfo::name(0, 0, 0)) + ($name:ident, $( $arg: expr ),+) => { + (T::WeightInfo::$name($( $arg ),+).saturating_sub(cost_args!(@call_zero $name, $( $arg ),+))) + }; + // Transform T::WeightInfo::name(a, b, c) into T::WeightInfo::name(0, 0, 0) + (@call_zero $name:ident, $( $arg:expr ),*) => { + T::WeightInfo::$name($( cost_args!(@replace_token $arg) ),*) + }; + // Replace the token with 0. + (@replace_token $_in:tt) => { 0 }; +} + impl Token for RuntimeCosts { fn influence_lowest_gas_limit(&self) -> bool { match self { @@ -256,85 +268,57 @@ impl Token for RuntimeCosts { } fn weight(&self) -> Weight { - let s = T::Schedule::get().host_fn_weights; use self::RuntimeCosts::*; match *self { - CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), - CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), - Caller => s.caller, - IsContract => s.is_contract, - CodeHash => s.code_hash, - OwnCodeHash => s.own_code_hash, - CallerIsOrigin => s.caller_is_origin, - CallerIsRoot => s.caller_is_root, - Address => s.address, - GasLeft => s.gas_left, - Balance => s.balance, - ValueTransferred => s.value_transferred, - MinimumBalance => s.minimum_balance, - BlockNumber => s.block_number, - Now => s.now, - WeightToFee => s.weight_to_fee, - InputBase => s.input, - Return(len) => s.r#return.saturating_add(s.return_per_byte.saturating_mul(len.into())), - Terminate => s.terminate, - Random => s.random, - DepositEvent { num_topic, len } => s - .deposit_event - .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) - .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), - DebugMessage(len) => s - .debug_message - .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), - SetStorage { new_bytes, old_bytes } => s - .set_storage - .saturating_add(s.set_storage_per_new_byte.saturating_mul(new_bytes.into())) - .saturating_add(s.set_storage_per_old_byte.saturating_mul(old_bytes.into())), - ClearStorage(len) => s - .clear_storage - .saturating_add(s.clear_storage_per_byte.saturating_mul(len.into())), - ContainsStorage(len) => s - .contains_storage - .saturating_add(s.contains_storage_per_byte.saturating_mul(len.into())), - GetStorage(len) => - s.get_storage.saturating_add(s.get_storage_per_byte.saturating_mul(len.into())), - TakeStorage(len) => s - .take_storage - .saturating_add(s.take_storage_per_byte.saturating_mul(len.into())), - Transfer => s.transfer, - CallBase => s.call, - DelegateCallBase => s.delegate_call, - CallSurchargeTransfer => s.call_transfer_surcharge, - CallInputCloned(len) => s.call_per_cloned_byte.saturating_mul(len.into()), - InstantiateBase { input_data_len, salt_len } => s - .instantiate - .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) - .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), - InstantiateSurchargeTransfer => s.instantiate_transfer_surcharge, - HashSha256(len) => s - .hash_sha2_256 - .saturating_add(s.hash_sha2_256_per_byte.saturating_mul(len.into())), - HashKeccak256(len) => s - .hash_keccak_256 - .saturating_add(s.hash_keccak_256_per_byte.saturating_mul(len.into())), - HashBlake256(len) => s - .hash_blake2_256 - .saturating_add(s.hash_blake2_256_per_byte.saturating_mul(len.into())), - HashBlake128(len) => s - .hash_blake2_128 - .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), - EcdsaRecovery => s.ecdsa_recover, - Sr25519Verify(len) => s - .sr25519_verify - .saturating_add(s.sr25519_verify_per_byte.saturating_mul(len.into())), + HostFn => cost_args!(noop_host_fn, 1), + CopyToContract(len) => T::WeightInfo::seal_input(len), + CopyFromContract(len) => T::WeightInfo::seal_return(len), + Caller => T::WeightInfo::seal_caller(), + IsContract => T::WeightInfo::seal_is_contract(), + CodeHash => T::WeightInfo::seal_code_hash(), + OwnCodeHash => T::WeightInfo::seal_own_code_hash(), + CallerIsOrigin => T::WeightInfo::seal_caller_is_origin(), + CallerIsRoot => T::WeightInfo::seal_caller_is_root(), + Address => T::WeightInfo::seal_address(), + GasLeft => T::WeightInfo::seal_gas_left(), + Balance => T::WeightInfo::seal_balance(), + ValueTransferred => T::WeightInfo::seal_value_transferred(), + MinimumBalance => T::WeightInfo::seal_minimum_balance(), + BlockNumber => T::WeightInfo::seal_block_number(), + Now => T::WeightInfo::seal_now(), + WeightToFee => T::WeightInfo::seal_weight_to_fee(), + Terminate(locked_dependencies) => T::WeightInfo::seal_terminate(locked_dependencies), + Random => T::WeightInfo::seal_random(), + DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len), + DebugMessage(len) => T::WeightInfo::seal_debug_message(len), + SetStorage { new_bytes, old_bytes } => + T::WeightInfo::seal_set_storage(new_bytes, old_bytes), + ClearStorage(len) => T::WeightInfo::seal_clear_storage(len), + ContainsStorage(len) => T::WeightInfo::seal_contains_storage(len), + GetStorage(len) => T::WeightInfo::seal_get_storage(len), + TakeStorage(len) => T::WeightInfo::seal_take_storage(len), + Transfer => T::WeightInfo::seal_transfer(), + CallBase => T::WeightInfo::seal_call(0, 0), + DelegateCallBase => T::WeightInfo::seal_delegate_call(), + CallTransferSurcharge => cost_args!(seal_call, 1, 0), + CallInputCloned(len) => cost_args!(seal_call, 0, len), + InstantiateBase { input_data_len, salt_len } => + T::WeightInfo::seal_instantiate(0, input_data_len, salt_len), + InstantiateTransferSurcharge => cost_args!(seal_instantiate, 1, 0, 0), + HashSha256(len) => T::WeightInfo::seal_hash_sha2_256(len), + HashKeccak256(len) => T::WeightInfo::seal_hash_keccak_256(len), + HashBlake256(len) => T::WeightInfo::seal_hash_blake2_256(len), + HashBlake128(len) => T::WeightInfo::seal_hash_blake2_128(len), + EcdsaRecovery => T::WeightInfo::seal_ecdsa_recover(), + Sr25519Verify(len) => T::WeightInfo::seal_sr25519_verify(len), ChainExtension(weight) | CallRuntime(weight) | CallXcmExecute(weight) => weight, - SetCodeHash => s.set_code_hash, - EcdsaToEthAddress => s.ecdsa_to_eth_address, - ReentrantCount => s.reentrance_count, - AccountEntranceCount => s.account_reentrance_count, - InstantiationNonce => s.instantiation_nonce, - LockDelegateDependency => s.lock_delegate_dependency, - UnlockDelegateDependency => s.unlock_delegate_dependency, + SetCodeHash => T::WeightInfo::seal_set_code_hash(), + EcdsaToEthAddress => T::WeightInfo::seal_ecdsa_to_eth_address(), + ReentranceCount => T::WeightInfo::seal_reentrance_count(), + AccountReentranceCount => T::WeightInfo::seal_account_reentrance_count(), + InstantiationNonce => T::WeightInfo::seal_instantiation_nonce(), + LockDelegateDependency => T::WeightInfo::lock_delegate_dependency(), + UnlockDelegateDependency => T::WeightInfo::unlock_delegate_dependency(), } } } @@ -819,6 +803,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { output_len_ptr: u32, ) -> Result { self.charge_gas(call_type.cost())?; + let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; charge_gas!(self, RuntimeCosts::CallInputCloned(input.len() as u32))?; @@ -842,7 +827,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { - self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; + self.charge_gas(RuntimeCosts::CallTransferSurcharge)?; } self.ext.call( weight, @@ -910,7 +895,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { }; let value: BalanceOf<::T> = self.read_sandbox_memory_as(memory, value_ptr)?; if value > 0u32.into() { - self.charge_gas(RuntimeCosts::InstantiateSurchargeTransfer)?; + self.charge_gas(RuntimeCosts::InstantiateTransferSurcharge)?; } let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(memory, code_hash_ptr)?; @@ -942,7 +927,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { } fn terminate(&mut self, memory: &[u8], beneficiary_ptr: u32) -> Result<(), TrapReason> { - self.charge_gas(RuntimeCosts::Terminate)?; + let count = self.ext.locked_delegate_dependencies_count() as _; + self.charge_gas(RuntimeCosts::Terminate(count))?; + let beneficiary: <::T as frame_system::Config>::AccountId = self.read_sandbox_memory_as(memory, beneficiary_ptr)?; self.ext.terminate(&beneficiary)?; @@ -959,6 +946,14 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + + /// Noop function used to benchmark the time it takes to execute an empty function. + #[cfg(feature = "runtime-benchmarks")] + #[unstable] + fn noop(ctx: _, memory: _) -> Result<(), TrapReason> { + Ok(()) + } + /// Set the value at the given key in the contract storage. /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] @@ -1387,7 +1382,6 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::input`]. #[prefixed_alias] fn input(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(memory, out_ptr, out_len_ptr, &input, false, |len| { Some(RuntimeCosts::CopyToContract(len)) @@ -1408,7 +1402,7 @@ pub mod env { data_ptr: u32, data_len: u32, ) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::Return(data_len))?; + ctx.charge_gas(RuntimeCosts::CopyFromContract(data_len))?; Err(TrapReason::Return(ReturnData { flags, data: ctx.read_sandbox_memory(memory, data_ptr, data_len)?, @@ -2249,7 +2243,7 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::reentrance_count`]. #[unstable] fn reentrance_count(ctx: _, memory: _) -> Result { - ctx.charge_gas(RuntimeCosts::ReentrantCount)?; + ctx.charge_gas(RuntimeCosts::ReentranceCount)?; Ok(ctx.ext.reentrance_count()) } @@ -2258,7 +2252,7 @@ pub mod env { /// See [`pallet_contracts_uapi::HostFn::account_reentrance_count`]. #[unstable] fn account_reentrance_count(ctx: _, memory: _, account_ptr: u32) -> Result { - ctx.charge_gas(RuntimeCosts::AccountEntranceCount)?; + ctx.charge_gas(RuntimeCosts::AccountReentranceCount)?; let account_id: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; Ok(ctx.ext.account_reentrance_count(&account_id)) diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index b95b1d1a9a2e5ae47ca0d9fe1715031f47e43986..2e9c2cd15af8408baabccfaadf42ce1406d92f17 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-05-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-vicqj8em-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -72,65 +72,49 @@ pub trait WeightInfo { fn upload_code_determinism_relaxed(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; - fn seal_caller(r: u32, ) -> Weight; - fn seal_is_contract(r: u32, ) -> Weight; - fn seal_code_hash(r: u32, ) -> Weight; - fn seal_own_code_hash(r: u32, ) -> Weight; - fn seal_caller_is_origin(r: u32, ) -> Weight; - fn seal_caller_is_root(r: u32, ) -> Weight; - fn seal_address(r: u32, ) -> Weight; - fn seal_gas_left(r: u32, ) -> Weight; - fn seal_balance(r: u32, ) -> Weight; - fn seal_value_transferred(r: u32, ) -> Weight; - fn seal_minimum_balance(r: u32, ) -> Weight; - fn seal_block_number(r: u32, ) -> Weight; - fn seal_now(r: u32, ) -> Weight; - fn seal_weight_to_fee(r: u32, ) -> Weight; - fn seal_input(r: u32, ) -> Weight; - fn seal_input_per_byte(n: u32, ) -> Weight; - fn seal_return(r: u32, ) -> Weight; - fn seal_return_per_byte(n: u32, ) -> Weight; - fn seal_terminate(r: u32, ) -> Weight; - fn seal_random(r: u32, ) -> Weight; - fn seal_deposit_event(r: u32, ) -> Weight; - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight; - fn seal_debug_message(r: u32, ) -> Weight; - fn seal_debug_message_per_byte(i: u32, ) -> Weight; - fn seal_set_storage(r: u32, ) -> Weight; - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight; - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight; - fn seal_clear_storage(r: u32, ) -> Weight; - fn seal_clear_storage_per_byte(n: u32, ) -> Weight; - fn seal_get_storage(r: u32, ) -> Weight; - fn seal_get_storage_per_byte(n: u32, ) -> Weight; - fn seal_contains_storage(r: u32, ) -> Weight; - fn seal_contains_storage_per_byte(n: u32, ) -> Weight; - fn seal_take_storage(r: u32, ) -> Weight; - fn seal_take_storage_per_byte(n: u32, ) -> Weight; - fn seal_transfer(r: u32, ) -> Weight; - fn seal_call(r: u32, ) -> Weight; - fn seal_delegate_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight; - fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight; - fn seal_hash_sha2_256(r: u32, ) -> Weight; - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_keccak_256(r: u32, ) -> Weight; - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_blake2_256(r: u32, ) -> Weight; - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight; - fn seal_hash_blake2_128(r: u32, ) -> Weight; - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight; - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight; - fn seal_sr25519_verify(r: u32, ) -> Weight; - fn seal_ecdsa_recover(r: u32, ) -> Weight; - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; - fn seal_set_code_hash(r: u32, ) -> Weight; - fn lock_delegate_dependency(r: u32, ) -> Weight; - fn unlock_delegate_dependency(r: u32, ) -> Weight; - fn seal_reentrance_count(r: u32, ) -> Weight; - fn seal_account_reentrance_count(r: u32, ) -> Weight; - fn seal_instantiation_nonce(r: u32, ) -> Weight; + fn noop_host_fn(r: u32, ) -> Weight; + fn seal_caller() -> Weight; + fn seal_is_contract() -> Weight; + fn seal_code_hash() -> Weight; + fn seal_own_code_hash() -> Weight; + fn seal_caller_is_origin() -> Weight; + fn seal_caller_is_root() -> Weight; + fn seal_address() -> Weight; + fn seal_gas_left() -> Weight; + fn seal_balance() -> Weight; + fn seal_value_transferred() -> Weight; + fn seal_minimum_balance() -> Weight; + fn seal_block_number() -> Weight; + fn seal_now() -> Weight; + fn seal_weight_to_fee() -> Weight; + fn seal_input(n: u32, ) -> Weight; + fn seal_return(n: u32, ) -> Weight; + fn seal_terminate(n: u32, ) -> Weight; + fn seal_random() -> Weight; + fn seal_deposit_event(t: u32, n: u32, ) -> Weight; + fn seal_debug_message(i: u32, ) -> Weight; + fn seal_set_storage(n: u32, o: u32, ) -> Weight; + fn seal_clear_storage(n: u32, ) -> Weight; + fn seal_get_storage(n: u32, ) -> Weight; + fn seal_contains_storage(n: u32, ) -> Weight; + fn seal_take_storage(n: u32, ) -> Weight; + fn seal_transfer() -> Weight; + fn seal_call(t: u32, i: u32, ) -> Weight; + fn seal_delegate_call() -> Weight; + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight; + fn seal_hash_sha2_256(n: u32, ) -> Weight; + fn seal_hash_keccak_256(n: u32, ) -> Weight; + fn seal_hash_blake2_256(n: u32, ) -> Weight; + fn seal_hash_blake2_128(n: u32, ) -> Weight; + fn seal_sr25519_verify(n: u32, ) -> Weight; + fn seal_ecdsa_recover() -> Weight; + fn seal_ecdsa_to_eth_address() -> Weight; + fn seal_set_code_hash() -> Weight; + fn lock_delegate_dependency() -> Weight; + fn unlock_delegate_dependency() -> Weight; + fn seal_reentrance_count() -> Weight; + fn seal_account_reentrance_count() -> Weight; + fn seal_instantiation_nonce() -> Weight; fn instr_i64_load_store(r: u32, ) -> Weight; } @@ -143,8 +127,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_274_000, 1627) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_142_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -154,10 +138,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_863_000 picoseconds. - Weight::from_parts(13_188_000, 442) - // Standard Error: 1_053 - .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) + // Minimum execution time: 12_095_000 picoseconds. + Weight::from_parts(12_699_000, 442) + // Standard Error: 891 + .saturating_add(Weight::from_parts(1_114_063, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -171,10 +155,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(9_203_290, 6149) + // Minimum execution time: 8_433_000 picoseconds. + Weight::from_parts(8_992_328, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -187,8 +171,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 17_177_000 picoseconds. - Weight::from_parts(17_663_000, 6450) + // Minimum execution time: 16_415_000 picoseconds. + Weight::from_parts(17_348_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -201,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_636_000 picoseconds. - Weight::from_parts(3_774_000, 3635) - // Standard Error: 542 - .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) + // Minimum execution time: 3_433_000 picoseconds. + Weight::from_parts(3_490_000, 3635) + // Standard Error: 1_043 + .saturating_add(Weight::from_parts(1_225_953, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -214,8 +198,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -223,13 +205,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (1 ±0)` - // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_585_000 picoseconds. - Weight::from_parts(22_069_944, 6266) - // Standard Error: 1 - .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `325 + c * (1 ±0)` + // Estimated: `6263 + c * (1 ±0)` + // Minimum execution time: 16_421_000 picoseconds. + Weight::from_parts(16_822_963, 6263) + // Standard Error: 0 + .saturating_add(Weight::from_parts(456, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -239,8 +221,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_283_000 picoseconds. - Weight::from_parts(14_015_000, 6380) + // Minimum execution time: 12_569_000 picoseconds. + Weight::from_parts(13_277_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -254,8 +236,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 48_022_000 picoseconds. - Weight::from_parts(49_627_000, 6292) + // Minimum execution time: 46_777_000 picoseconds. + Weight::from_parts(47_690_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -267,8 +249,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_374_000 picoseconds. - Weight::from_parts(59_615_000, 6534) + // Minimum execution time: 55_280_000 picoseconds. + Weight::from_parts(57_081_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -278,8 +260,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_559_000 picoseconds. - Weight::from_parts(12_947_000, 6349) + // Minimum execution time: 12_077_000 picoseconds. + Weight::from_parts(12_647_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -289,8 +271,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_680_000, 1627) + // Minimum execution time: 2_559_000 picoseconds. + Weight::from_parts(2_711_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -302,8 +284,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_625_000 picoseconds. - Weight::from_parts(13_094_000, 3631) + // Minimum execution time: 12_238_000 picoseconds. + Weight::from_parts(12_627_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -314,7 +296,7 @@ impl WeightInfo for SubstrateWeight { // Measured: `142` // Estimated: `3607` // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_182_000, 3607) + Weight::from_parts(5_086_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -325,8 +307,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_319_000 picoseconds. - Weight::from_parts(6_582_000, 3632) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_380_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -337,15 +319,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_532_000 picoseconds. - Weight::from_parts(6_909_000, 3607) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(6_670_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -356,31 +336,25 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `804 + c * (1 ±0)` - // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 305_778_000 picoseconds. - Weight::from_parts(282_321_249, 9217) - // Standard Error: 72 - .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `801 + c * (1 ±0)` + // Estimated: `4264 + c * (1 ±0)` + // Minimum execution time: 354_459_000 picoseconds. + Weight::from_parts(332_397_871, 4264) + // Standard Error: 70 + .saturating_add(Weight::from_parts(33_775, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -396,18 +370,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `8740` - // Minimum execution time: 3_810_809_000 picoseconds. - Weight::from_parts(739_511_598, 8740) - // Standard Error: 140 - .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) - .saturating_add(T::DbWeight::get().writes(10_u64)) + // Measured: `323` + // Estimated: `6262` + // Minimum execution time: 4_239_452_000 picoseconds. + Weight::from_parts(800_849_282, 6262) + // Standard Error: 117 + .saturating_add(Weight::from_parts(68_435, 0).saturating_mul(c.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(i.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_668, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -415,8 +389,6 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -425,29 +397,25 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8982` - // Minimum execution time: 1_986_789_000 picoseconds. - Weight::from_parts(2_017_466_000, 8982) + // Measured: `560` + // Estimated: `4029` + // Minimum execution time: 2_085_570_000 picoseconds. + Weight::from_parts(2_112_501_000, 4029) // Standard Error: 26 - .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(888, 0).saturating_mul(i.into())) // Standard Error: 26 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(13_u64)) - .saturating_add(T::DbWeight::get().writes(7_u64)) + .saturating_add(Weight::from_parts(795, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -458,64 +426,54 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `829` - // Estimated: `9244` - // Minimum execution time: 210_724_000 picoseconds. - Weight::from_parts(218_608_000, 9244) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `826` + // Estimated: `4291` + // Minimum execution time: 201_900_000 picoseconds. + Weight::from_parts(206_738_000, 4291) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 271_259_000 picoseconds. - Weight::from_parts(298_852_854, 6085) - // Standard Error: 65 - .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 330_704_000 picoseconds. + Weight::from_parts(345_129_342, 3607) + // Standard Error: 51 + .saturating_add(Weight::from_parts(33_126, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_relaxed(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 278_167_000 picoseconds. - Weight::from_parts(311_888_941, 6085) - // Standard Error: 58 - .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 343_339_000 picoseconds. + Weight::from_parts(356_479_729, 3607) + // Standard Error: 49 + .saturating_add(Weight::from_parts(33_404, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -523,18 +481,16 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 47_403_000 picoseconds. - Weight::from_parts(48_707_000, 3780) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(4_u64)) + // Minimum execution time: 42_241_000 picoseconds. + Weight::from_parts(43_365_000, 3780) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -542,610 +498,331 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `552` - // Estimated: `8967` - // Minimum execution time: 35_361_000 picoseconds. - Weight::from_parts(36_714_000, 8967) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) + // Estimated: `6492` + // Minimum execution time: 26_318_000 picoseconds. + Weight::from_parts(27_840_000, 6492) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1600]`. - fn seal_caller(r: u32, ) -> Weight { + fn noop_host_fn(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_397_000 picoseconds. + Weight::from_parts(9_318_986, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(72_994, 0).saturating_mul(r.into())) + } + fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_340_000 picoseconds. - Weight::from_parts(9_360_237, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(687_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_is_contract(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `509 + r * (77 ±0)` - // Estimated: `1467 + r * (2552 ±0)` - // Minimum execution time: 9_059_000 picoseconds. - Weight::from_parts(9_201_000, 1467) - // Standard Error: 5_643 - .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + fn seal_is_contract() -> Weight { + // Proof Size summary in bytes: + // Measured: `354` + // Estimated: `3819` + // Minimum execution time: 6_465_000 picoseconds. + Weight::from_parts(6_850_000, 3819) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_code_hash(r: u32, ) -> Weight { + fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `517 + r * (170 ±0)` - // Estimated: `1468 + r * (2645 ±0)` - // Minimum execution time: 9_220_000 picoseconds. - Weight::from_parts(9_399_000, 1468) - // Standard Error: 6_194 - .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_115_000, 3912) + .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_own_code_hash(r: u32, ) -> Weight { + fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(10_100_456, 0) - // Standard Error: 234 - .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(791_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_origin(r: u32, ) -> Weight { + fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_524_000 picoseconds. - Weight::from_parts(10_813_389, 0) - // Standard Error: 76 - .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) + // Minimum execution time: 365_000 picoseconds. + Weight::from_parts(427_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_root(r: u32, ) -> Weight { + fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_799_000 picoseconds. - Weight::from_parts(10_886_744, 0) - // Standard Error: 75 - .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_address(r: u32, ) -> Weight { + fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_895_000 picoseconds. - Weight::from_parts(10_658_338, 0) - // Standard Error: 189 - .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) + // Minimum execution time: 586_000 picoseconds. + Weight::from_parts(625_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_gas_left(r: u32, ) -> Weight { + fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_643_000 picoseconds. - Weight::from_parts(10_932_126, 0) - // Standard Error: 153 - .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(734_000, 0) } - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_balance(r: u32, ) -> Weight { + fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` - // Estimated: `3599` - // Minimum execution time: 9_548_000 picoseconds. - Weight::from_parts(9_737_000, 3599) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Estimated: `0` + // Minimum execution time: 4_732_000 picoseconds. + Weight::from_parts(5_008_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_value_transferred(r: u32, ) -> Weight { + fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_172_000 picoseconds. - Weight::from_parts(18_255_933, 0) - // Standard Error: 540 - .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(635_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_minimum_balance(r: u32, ) -> Weight { + fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_232_000 picoseconds. - Weight::from_parts(9_796_584, 0) - // Standard Error: 208 - .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) + // Minimum execution time: 571_000 picoseconds. + Weight::from_parts(606_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_block_number(r: u32, ) -> Weight { + fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_747_000 picoseconds. - Weight::from_parts(8_733_230, 0) - // Standard Error: 377 - .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(584_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_now(r: u32, ) -> Weight { + fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(10_194_153, 0) - // Standard Error: 516 - .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(612_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_weight_to_fee(r: u32, ) -> Weight { + fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_022_000 picoseconds. - Weight::from_parts(22_051_160, 1552) - // Standard Error: 697 - .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + // Minimum execution time: 4_396_000 picoseconds. + Weight::from_parts(4_630_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_input(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_135_000 picoseconds. - Weight::from_parts(10_646_215, 0) - // Standard Error: 161 - .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_input_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `9287` - // Minimum execution time: 273_896_000 picoseconds. - Weight::from_parts(148_309_654, 9287) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_906_000 picoseconds. - Weight::from_parts(9_264_446, 0) - // Standard Error: 19_760 - .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) + // Minimum execution time: 494_000 picoseconds. + Weight::from_parts(510_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. - fn seal_return_per_byte(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_266_000 picoseconds. - Weight::from_parts(10_602_261, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) + // Minimum execution time: 311_000 picoseconds. + Weight::from_parts(346_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(480, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:1 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// The range of component `r` is `[0, 1]`. - fn seal_terminate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4805 + r * (2121 ±0)` - // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 295_922_000 picoseconds. - Weight::from_parts(322_472_877, 13220) - // Standard Error: 993_812 - .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().reads((36_u64).saturating_mul(r.into()))) + /// The range of component `n` is `[0, 32]`. + fn seal_terminate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `319 + n * (78 ±0)` + // Estimated: `3784 + n * (2553 ±0)` + // Minimum execution time: 14_403_000 picoseconds. + Weight::from_parts(16_478_113, 3784) + // Standard Error: 6_667 + .saturating_add(Weight::from_parts(3_641_603, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(T::DbWeight::get().writes((41_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) } /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_random(r: u32, ) -> Weight { + fn seal_random() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_427_000 picoseconds. - Weight::from_parts(12_996_213, 1561) - // Standard Error: 845 - .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + // Minimum execution time: 3_639_000 picoseconds. + Weight::from_parts(3_801_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_deposit_event(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_304_000 picoseconds. - Weight::from_parts(25_678_842, 0) - // Standard Error: 1_855 - .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) - } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 23_425_000 picoseconds. - Weight::from_parts(15_229_010, 990) - // Standard Error: 14_380 - .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) + // Minimum execution time: 4_102_000 picoseconds. + Weight::from_parts(4_256_984, 990) + // Standard Error: 6_777 + .saturating_add(Weight::from_parts(2_331_893, 0).saturating_mul(t.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// The range of component `r` is `[0, 1600]`. - fn seal_debug_message(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_117_000 picoseconds. - Weight::from_parts(12_887_533, 0) - // Standard Error: 83 - .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) - } /// The range of component `i` is `[0, 1048576]`. - fn seal_debug_message_per_byte(i: u32, ) -> Weight { + fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_982_000 picoseconds. - Weight::from_parts(11_176_000, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_set_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_150_000 picoseconds. - Weight::from_parts(9_269_000, 105) - // Standard Error: 8_147 - .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `245` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_007_323, 245) - // Standard Error: 3 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) + // Minimum execution time: 385_000 picoseconds. + Weight::from_parts(427_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_272, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + /// The range of component `o` is `[0, 16384]`. + fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 19_127_000 picoseconds. - Weight::from_parts(21_152_987, 248) - // Standard Error: 3 - .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + // Measured: `250 + o * (1 ±0)` + // Estimated: `249 + o * (1 ±0)` + // Minimum execution time: 10_128_000 picoseconds. + Weight::from_parts(9_963_519, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_clear_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_264_000 picoseconds. - Weight::from_parts(9_449_000, 105) - // Standard Error: 8_196 - .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_489_000 picoseconds. - Weight::from_parts(19_916_153, 248) + // Minimum execution time: 7_921_000 picoseconds. + Weight::from_parts(9_290_526, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_get_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_464_000, 105) - // Standard Error: 6_827 - .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_get_storage_per_byte(n: u32, ) -> Weight { + fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_981_000 picoseconds. - Weight::from_parts(19_802_353, 248) + // Minimum execution time: 7_403_000 picoseconds. + Weight::from_parts(8_815_037, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(701, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_contains_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_891_000 picoseconds. - Weight::from_parts(10_046_000, 105) - // Standard Error: 6_993 - .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(18_302_733, 248) + // Minimum execution time: 6_590_000 picoseconds. + Weight::from_parts(7_949_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_take_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_323_000 picoseconds. - Weight::from_parts(9_462_000, 105) - // Standard Error: 8_031 - .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_take_storage_per_byte(n: u32, ) -> Weight { + fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_711_000 picoseconds. - Weight::from_parts(20_495_670, 248) + // Minimum execution time: 7_900_000 picoseconds. + Weight::from_parts(9_988_151, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `System::Account` (r:1601 w:1601) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_transfer(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `4221 + r * (2475 ±0)` - // Minimum execution time: 9_226_000 picoseconds. - Weight::from_parts(9_394_000, 4221) - // Standard Error: 14_741 - .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) + fn seal_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `0` + // Minimum execution time: 9_023_000 picoseconds. + Weight::from_parts(9_375_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `520 + r * (170 ±0)` - // Estimated: `6463 + r * (2646 ±0)` - // Minimum execution time: 9_455_000 picoseconds. - Weight::from_parts(9_671_000, 6463) - // Standard Error: 126_080 - .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(2_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// The range of component `t` is `[0, 1]`. + /// The range of component `i` is `[0, 1048576]`. + fn seal_call(t: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `620 + t * (280 ±0)` + // Estimated: `4085 + t * (2182 ±0)` + // Minimum execution time: 157_109_000 picoseconds. + Weight::from_parts(159_458_069, 4085) + // Standard Error: 339_702 + .saturating_add(Weight::from_parts(44_066_869, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) } - /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:735 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:736 w:736) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_delegate_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (527 ±0)` - // Estimated: `6447 + r * (2583 ±10)` - // Minimum execution time: 9_274_000 picoseconds. - Weight::from_parts(9_437_000, 6447) - // Standard Error: 150_832 - .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1048576]`. - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `699 + t * (277 ±0)` - // Estimated: `6639 + t * (3458 ±0)` - // Minimum execution time: 214_483_000 picoseconds. - Weight::from_parts(122_634_366, 6639) - // Standard Error: 2_499_235 - .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(7_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:800 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[1, 800]`. - fn seal_instantiate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1097 + r * (188 ±0)` - // Estimated: `6990 + r * (2664 ±0)` - // Minimum execution time: 341_569_000 picoseconds. - Weight::from_parts(360_574_000, 6990) - // Standard Error: 259_746 - .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4_u64)) - .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) + fn seal_delegate_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 143_384_000 picoseconds. + Weight::from_parts(147_554_000, 3895) + .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1153,256 +830,149 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `760 + t * (104 ±0)` - // Estimated: `6719 + t * (2549 ±1)` - // Minimum execution time: 1_863_119_000 picoseconds. - Weight::from_parts(900_189_174, 6719) - // Standard Error: 13_040_979 - .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(T::DbWeight::get().writes(7_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_211_000 picoseconds. - Weight::from_parts(11_696_412, 0) - // Standard Error: 388 - .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `676` + // Estimated: `4138` + // Minimum execution time: 1_798_243_000 picoseconds. + Weight::from_parts(82_642_573, 4138) + // Standard Error: 6_831_260 + .saturating_add(Weight::from_parts(159_867_027, 0).saturating_mul(t.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(i.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_809, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_296_000 picoseconds. - Weight::from_parts(572_494, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_keccak_256(r: u32, ) -> Weight { + fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_177_000 picoseconds. - Weight::from_parts(8_620_481, 0) - // Standard Error: 249 - .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) + // Minimum execution time: 875_000 picoseconds. + Weight::from_parts(904_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_240_000 picoseconds. - Weight::from_parts(8_696_186, 0) + // Minimum execution time: 1_475_000 picoseconds. + Weight::from_parts(1_551_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_889_000 picoseconds. - Weight::from_parts(16_103_170, 0) - // Standard Error: 343 - .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(3_410, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(2_264_024, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(850_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_505_632, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_440_000 picoseconds. - Weight::from_parts(2_575_889, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(773_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_276, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 55_119_000 picoseconds. - Weight::from_parts(56_732_248, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 160]`. - fn seal_sr25519_verify(r: u32, ) -> Weight { + fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_861_102, 0) - // Standard Error: 6_029 - .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(45_087_558, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(4_628, 0).saturating_mul(n.into())) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_recover(r: u32, ) -> Weight { + fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_293_000 picoseconds. - Weight::from_parts(28_785_765, 0) - // Standard Error: 9_160 - .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) + // Minimum execution time: 47_193_000 picoseconds. + Weight::from_parts(48_514_000, 0) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { + fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_206_000 picoseconds. - Weight::from_parts(12_420_664, 0) - // Standard Error: 3_489 - .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) + // Minimum execution time: 13_083_000 picoseconds. + Weight::from_parts(13_218_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1535 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1537 w:1537) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_set_code_hash(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (926 ±0)` - // Estimated: `8969 + r * (3047 ±7)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(9_385_000, 8969) - // Standard Error: 45_562 - .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn seal_set_code_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_116_000, 3895) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `r` is `[0, 32]`. - fn lock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `274 + r * (78 ±0)` - // Estimated: `1265 + r * (2553 ±0)` - // Minimum execution time: 9_355_000 picoseconds. - Weight::from_parts(15_071_309, 1265) - // Standard Error: 9_722 - .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn lock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3820` + // Minimum execution time: 9_271_000 picoseconds. + Weight::from_parts(9_640_000, 3820) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 32]`. - fn unlock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `275 + r * (78 ±0)` - // Estimated: `990 + r * (2568 ±0)` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(14_362_224, 990) - // Standard Error: 9_137 - .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + fn unlock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3558` + // Minimum execution time: 8_182_000 picoseconds. + Weight::from_parts(8_343_000, 3558) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_reentrance_count(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 269_704_000 picoseconds. - Weight::from_parts(289_916_035, 9282) - // Standard Error: 408 - .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(11_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + fn seal_reentrance_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(347_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_account_reentrance_count(r: u32, ) -> Weight { + fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_361_000 picoseconds. - Weight::from_parts(11_633_836, 0) - // Standard Error: 86 - .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) + // Minimum execution time: 345_000 picoseconds. + Weight::from_parts(370_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_instantiation_nonce(r: u32, ) -> Weight { + fn seal_instantiation_nonce() -> Weight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(13_259_836, 1704) - // Standard Error: 121 - .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + // Minimum execution time: 2_998_000 picoseconds. + Weight::from_parts(3_221_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1410,10 +980,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 851_000 picoseconds. - Weight::from_parts(587_883, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) + // Minimum execution time: 1_002_000 picoseconds. + Weight::from_parts(1_094_958, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(14_531, 0).saturating_mul(r.into())) } } @@ -1425,8 +995,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_149_000 picoseconds. - Weight::from_parts(2_274_000, 1627) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_142_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1436,10 +1006,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 12_863_000 picoseconds. - Weight::from_parts(13_188_000, 442) - // Standard Error: 1_053 - .saturating_add(Weight::from_parts(1_105_325, 0).saturating_mul(k.into())) + // Minimum execution time: 12_095_000 picoseconds. + Weight::from_parts(12_699_000, 442) + // Standard Error: 891 + .saturating_add(Weight::from_parts(1_114_063, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1453,10 +1023,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(9_203_290, 6149) + // Minimum execution time: 8_433_000 picoseconds. + Weight::from_parts(8_992_328, 6149) // Standard Error: 1 - .saturating_add(Weight::from_parts(1_186, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1469,8 +1039,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 17_177_000 picoseconds. - Weight::from_parts(17_663_000, 6450) + // Minimum execution time: 16_415_000 picoseconds. + Weight::from_parts(17_348_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1483,10 +1053,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_636_000 picoseconds. - Weight::from_parts(3_774_000, 3635) - // Standard Error: 542 - .saturating_add(Weight::from_parts(1_260_058, 0).saturating_mul(k.into())) + // Minimum execution time: 3_433_000 picoseconds. + Weight::from_parts(3_490_000, 3635) + // Standard Error: 1_043 + .saturating_add(Weight::from_parts(1_225_953, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1496,8 +1066,6 @@ impl WeightInfo for () { /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553053f13fd319a03c211337c76e0fe776df` (r:2 w:0) /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) /// Proof: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc553022fca90611ba8b7942f8bdb3b97f6580` (r:1 w:1) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:0 w:1) @@ -1505,13 +1073,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 125952]`. fn v12_migration_step(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `328 + c * (1 ±0)` - // Estimated: `6266 + c * (1 ±0)` - // Minimum execution time: 21_585_000 picoseconds. - Weight::from_parts(22_069_944, 6266) - // Standard Error: 1 - .saturating_add(Weight::from_parts(404, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `325 + c * (1 ±0)` + // Estimated: `6263 + c * (1 ±0)` + // Minimum execution time: 16_421_000 picoseconds. + Weight::from_parts(16_822_963, 6263) + // Standard Error: 0 + .saturating_add(Weight::from_parts(456, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } @@ -1521,8 +1089,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 13_283_000 picoseconds. - Weight::from_parts(14_015_000, 6380) + // Minimum execution time: 12_569_000 picoseconds. + Weight::from_parts(13_277_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1536,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 48_022_000 picoseconds. - Weight::from_parts(49_627_000, 6292) + // Minimum execution time: 46_777_000 picoseconds. + Weight::from_parts(47_690_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1549,8 +1117,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 58_374_000 picoseconds. - Weight::from_parts(59_615_000, 6534) + // Minimum execution time: 55_280_000 picoseconds. + Weight::from_parts(57_081_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1560,8 +1128,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 12_559_000 picoseconds. - Weight::from_parts(12_947_000, 6349) + // Minimum execution time: 12_077_000 picoseconds. + Weight::from_parts(12_647_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1571,8 +1139,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_680_000, 1627) + // Minimum execution time: 2_559_000 picoseconds. + Weight::from_parts(2_711_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1584,8 +1152,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 12_625_000 picoseconds. - Weight::from_parts(13_094_000, 3631) + // Minimum execution time: 12_238_000 picoseconds. + Weight::from_parts(12_627_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1596,7 +1164,7 @@ impl WeightInfo for () { // Measured: `142` // Estimated: `3607` // Minimum execution time: 4_836_000 picoseconds. - Weight::from_parts(5_182_000, 3607) + Weight::from_parts(5_086_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1607,8 +1175,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 6_319_000 picoseconds. - Weight::from_parts(6_582_000, 3632) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_380_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1619,15 +1187,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 6_532_000 picoseconds. - Weight::from_parts(6_909_000, 3607) + // Minimum execution time: 6_140_000 picoseconds. + Weight::from_parts(6_670_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1638,31 +1204,25 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `804 + c * (1 ±0)` - // Estimated: `9217 + c * (1 ±0)` - // Minimum execution time: 305_778_000 picoseconds. - Weight::from_parts(282_321_249, 9217) - // Standard Error: 72 - .saturating_add(Weight::from_parts(33_456, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `801 + c * (1 ±0)` + // Estimated: `4264 + c * (1 ±0)` + // Minimum execution time: 354_459_000 picoseconds. + Weight::from_parts(332_397_871, 4264) + // Standard Error: 70 + .saturating_add(Weight::from_parts(33_775, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1678,18 +1238,18 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `8740` - // Minimum execution time: 3_810_809_000 picoseconds. - Weight::from_parts(739_511_598, 8740) - // Standard Error: 140 - .saturating_add(Weight::from_parts(67_574, 0).saturating_mul(c.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_488, 0).saturating_mul(i.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_537, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) - .saturating_add(RocksDbWeight::get().writes(10_u64)) + // Measured: `323` + // Estimated: `6262` + // Minimum execution time: 4_239_452_000 picoseconds. + Weight::from_parts(800_849_282, 6262) + // Standard Error: 117 + .saturating_add(Weight::from_parts(68_435, 0).saturating_mul(c.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(i.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_668, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1697,8 +1257,6 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:1) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) @@ -1707,29 +1265,25 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) /// The range of component `i` is `[0, 1048576]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate(i: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `563` - // Estimated: `8982` - // Minimum execution time: 1_986_789_000 picoseconds. - Weight::from_parts(2_017_466_000, 8982) + // Measured: `560` + // Estimated: `4029` + // Minimum execution time: 2_085_570_000 picoseconds. + Weight::from_parts(2_112_501_000, 4029) // Standard Error: 26 - .saturating_add(Weight::from_parts(827, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(888, 0).saturating_mul(i.into())) // Standard Error: 26 - .saturating_add(Weight::from_parts(781, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(13_u64)) - .saturating_add(RocksDbWeight::get().writes(7_u64)) + .saturating_add(Weight::from_parts(795, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) @@ -1740,64 +1294,54 @@ impl WeightInfo for () { /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `829` - // Estimated: `9244` - // Minimum execution time: 210_724_000 picoseconds. - Weight::from_parts(218_608_000, 9244) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `826` + // Estimated: `4291` + // Minimum execution time: 201_900_000 picoseconds. + Weight::from_parts(206_738_000, 4291) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_enforced(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 271_259_000 picoseconds. - Weight::from_parts(298_852_854, 6085) - // Standard Error: 65 - .saturating_add(Weight::from_parts(33_547, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 330_704_000 picoseconds. + Weight::from_parts(345_129_342, 3607) + // Standard Error: 51 + .saturating_add(Weight::from_parts(33_126, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// The range of component `c` is `[0, 125952]`. fn upload_code_determinism_relaxed(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `6085` - // Minimum execution time: 278_167_000 picoseconds. - Weight::from_parts(311_888_941, 6085) - // Standard Error: 58 - .saturating_add(Weight::from_parts(33_595, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 343_339_000 picoseconds. + Weight::from_parts(356_479_729, 3607) + // Standard Error: 49 + .saturating_add(Weight::from_parts(33_404, 0).saturating_mul(c.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1805,18 +1349,16 @@ impl WeightInfo for () { /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1 w:1) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:0 w:1) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 47_403_000 picoseconds. - Weight::from_parts(48_707_000, 3780) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(4_u64)) + // Minimum execution time: 42_241_000 picoseconds. + Weight::from_parts(43_365_000, 3780) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) @@ -1824,610 +1366,331 @@ impl WeightInfo for () { /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:2 w:2) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `System::EventTopics` (r:3 w:3) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `552` - // Estimated: `8967` - // Minimum execution time: 35_361_000 picoseconds. - Weight::from_parts(36_714_000, 8967) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) + // Estimated: `6492` + // Minimum execution time: 26_318_000 picoseconds. + Weight::from_parts(27_840_000, 6492) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// The range of component `r` is `[0, 1600]`. - fn seal_caller(r: u32, ) -> Weight { + fn noop_host_fn(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 9_397_000 picoseconds. + Weight::from_parts(9_318_986, 0) + // Standard Error: 72 + .saturating_add(Weight::from_parts(72_994, 0).saturating_mul(r.into())) + } + fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_340_000 picoseconds. - Weight::from_parts(9_360_237, 0) - // Standard Error: 269 - .saturating_add(Weight::from_parts(249_611, 0).saturating_mul(r.into())) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(687_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_is_contract(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `509 + r * (77 ±0)` - // Estimated: `1467 + r * (2552 ±0)` - // Minimum execution time: 9_059_000 picoseconds. - Weight::from_parts(9_201_000, 1467) - // Standard Error: 5_643 - .saturating_add(Weight::from_parts(3_343_859, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2552).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1600 w:0) + fn seal_is_contract() -> Weight { + // Proof Size summary in bytes: + // Measured: `354` + // Estimated: `3819` + // Minimum execution time: 6_465_000 picoseconds. + Weight::from_parts(6_850_000, 3819) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_code_hash(r: u32, ) -> Weight { + fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `517 + r * (170 ±0)` - // Estimated: `1468 + r * (2645 ±0)` - // Minimum execution time: 9_220_000 picoseconds. - Weight::from_parts(9_399_000, 1468) - // Standard Error: 6_194 - .saturating_add(Weight::from_parts(4_172_011, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2645).saturating_mul(r.into())) + // Measured: `447` + // Estimated: `3912` + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_115_000, 3912) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_own_code_hash(r: u32, ) -> Weight { + fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_707_000 picoseconds. - Weight::from_parts(10_100_456, 0) - // Standard Error: 234 - .saturating_add(Weight::from_parts(338_464, 0).saturating_mul(r.into())) + // Minimum execution time: 717_000 picoseconds. + Weight::from_parts(791_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_origin(r: u32, ) -> Weight { + fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_524_000 picoseconds. - Weight::from_parts(10_813_389, 0) - // Standard Error: 76 - .saturating_add(Weight::from_parts(102_535, 0).saturating_mul(r.into())) + // Minimum execution time: 365_000 picoseconds. + Weight::from_parts(427_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_caller_is_root(r: u32, ) -> Weight { + fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_799_000 picoseconds. - Weight::from_parts(10_886_744, 0) - // Standard Error: 75 - .saturating_add(Weight::from_parts(80_901, 0).saturating_mul(r.into())) + // Minimum execution time: 331_000 picoseconds. + Weight::from_parts(363_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_address(r: u32, ) -> Weight { + fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_895_000 picoseconds. - Weight::from_parts(10_658_338, 0) - // Standard Error: 189 - .saturating_add(Weight::from_parts(249_694, 0).saturating_mul(r.into())) + // Minimum execution time: 586_000 picoseconds. + Weight::from_parts(625_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_gas_left(r: u32, ) -> Weight { + fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_643_000 picoseconds. - Weight::from_parts(10_932_126, 0) - // Standard Error: 153 - .saturating_add(Weight::from_parts(280_924, 0).saturating_mul(r.into())) + // Minimum execution time: 680_000 picoseconds. + Weight::from_parts(734_000, 0) } - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_balance(r: u32, ) -> Weight { + fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` - // Estimated: `3599` - // Minimum execution time: 9_548_000 picoseconds. - Weight::from_parts(9_737_000, 3599) - // Standard Error: 971 - .saturating_add(Weight::from_parts(1_704_134, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Estimated: `0` + // Minimum execution time: 4_732_000 picoseconds. + Weight::from_parts(5_008_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_value_transferred(r: u32, ) -> Weight { + fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_172_000 picoseconds. - Weight::from_parts(18_255_933, 0) - // Standard Error: 540 - .saturating_add(Weight::from_parts(230_929, 0).saturating_mul(r.into())) + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(635_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_minimum_balance(r: u32, ) -> Weight { + fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_232_000 picoseconds. - Weight::from_parts(9_796_584, 0) - // Standard Error: 208 - .saturating_add(Weight::from_parts(239_962, 0).saturating_mul(r.into())) + // Minimum execution time: 571_000 picoseconds. + Weight::from_parts(606_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_block_number(r: u32, ) -> Weight { + fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_747_000 picoseconds. - Weight::from_parts(8_733_230, 0) - // Standard Error: 377 - .saturating_add(Weight::from_parts(253_801, 0).saturating_mul(r.into())) + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(584_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_now(r: u32, ) -> Weight { + fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(10_194_153, 0) - // Standard Error: 516 - .saturating_add(Weight::from_parts(247_621, 0).saturating_mul(r.into())) + // Minimum execution time: 552_000 picoseconds. + Weight::from_parts(612_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_weight_to_fee(r: u32, ) -> Weight { + fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 9_022_000 picoseconds. - Weight::from_parts(22_051_160, 1552) - // Standard Error: 697 - .saturating_add(Weight::from_parts(709_612, 0).saturating_mul(r.into())) + // Minimum execution time: 4_396_000 picoseconds. + Weight::from_parts(4_630_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_input(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_135_000 picoseconds. - Weight::from_parts(10_646_215, 0) - // Standard Error: 161 - .saturating_add(Weight::from_parts(170_336, 0).saturating_mul(r.into())) - } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 1048576]`. - fn seal_input_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `872` - // Estimated: `9287` - // Minimum execution time: 273_896_000 picoseconds. - Weight::from_parts(148_309_654, 9287) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// The range of component `r` is `[0, 1]`. - fn seal_return(r: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_input(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_906_000 picoseconds. - Weight::from_parts(9_264_446, 0) - // Standard Error: 19_760 - .saturating_add(Weight::from_parts(1_256_053, 0).saturating_mul(r.into())) + // Minimum execution time: 494_000 picoseconds. + Weight::from_parts(510_000, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(303, 0).saturating_mul(n.into())) } - /// The range of component `n` is `[0, 1048576]`. - fn seal_return_per_byte(n: u32, ) -> Weight { + /// The range of component `n` is `[0, 1048572]`. + fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_266_000 picoseconds. - Weight::from_parts(10_602_261, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(318, 0).saturating_mul(n.into())) + // Minimum execution time: 311_000 picoseconds. + Weight::from_parts(346_000, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(480, 0).saturating_mul(n.into())) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:1 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:4 w:4) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `Contracts::CodeInfoOf` (r:33 w:33) + /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::DeletionQueue` (r:0 w:1) /// Proof: `Contracts::DeletionQueue` (`max_values`: None, `max_size`: Some(142), added: 2617, mode: `Measured`) - /// The range of component `r` is `[0, 1]`. - fn seal_terminate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `4805 + r * (2121 ±0)` - // Estimated: `13220 + r * (81321 ±0)` - // Minimum execution time: 295_922_000 picoseconds. - Weight::from_parts(322_472_877, 13220) - // Standard Error: 993_812 - .saturating_add(Weight::from_parts(259_075_422, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().reads((36_u64).saturating_mul(r.into()))) + /// The range of component `n` is `[0, 32]`. + fn seal_terminate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `319 + n * (78 ±0)` + // Estimated: `3784 + n * (2553 ±0)` + // Minimum execution time: 14_403_000 picoseconds. + Weight::from_parts(16_478_113, 3784) + // Standard Error: 6_667 + .saturating_add(Weight::from_parts(3_641_603, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(RocksDbWeight::get().writes((41_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 81321).saturating_mul(r.into())) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2553).saturating_mul(n.into())) } /// Storage: `RandomnessCollectiveFlip::RandomMaterial` (r:1 w:0) /// Proof: `RandomnessCollectiveFlip::RandomMaterial` (`max_values`: Some(1), `max_size`: Some(2594), added: 3089, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_random(r: u32, ) -> Weight { + fn seal_random() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 9_427_000 picoseconds. - Weight::from_parts(12_996_213, 1561) - // Standard Error: 845 - .saturating_add(Weight::from_parts(1_182_642, 0).saturating_mul(r.into())) + // Minimum execution time: 3_639_000 picoseconds. + Weight::from_parts(3_801_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// The range of component `r` is `[0, 1600]`. - fn seal_deposit_event(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_304_000 picoseconds. - Weight::from_parts(25_678_842, 0) - // Standard Error: 1_855 - .saturating_add(Weight::from_parts(1_814_511, 0).saturating_mul(r.into())) - } /// Storage: `System::EventTopics` (r:4 w:4) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16384]`. - fn seal_deposit_event_per_topic_and_byte(t: u32, n: u32, ) -> Weight { + fn seal_deposit_event(t: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 23_425_000 picoseconds. - Weight::from_parts(15_229_010, 990) - // Standard Error: 14_380 - .saturating_add(Weight::from_parts(2_545_653, 0).saturating_mul(t.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(594, 0).saturating_mul(n.into())) + // Minimum execution time: 4_102_000 picoseconds. + Weight::from_parts(4_256_984, 990) + // Standard Error: 6_777 + .saturating_add(Weight::from_parts(2_331_893, 0).saturating_mul(t.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(31, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) } - /// The range of component `r` is `[0, 1600]`. - fn seal_debug_message(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_117_000 picoseconds. - Weight::from_parts(12_887_533, 0) - // Standard Error: 83 - .saturating_add(Weight::from_parts(99_373, 0).saturating_mul(r.into())) - } /// The range of component `i` is `[0, 1048576]`. - fn seal_debug_message_per_byte(i: u32, ) -> Weight { + fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_982_000 picoseconds. - Weight::from_parts(11_176_000, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(983, 0).saturating_mul(i.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_set_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_150_000 picoseconds. - Weight::from_parts(9_269_000, 105) - // Standard Error: 8_147 - .saturating_add(Weight::from_parts(5_339_554, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_new_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `245` - // Estimated: `245` - // Minimum execution time: 19_085_000 picoseconds. - Weight::from_parts(20_007_323, 245) - // Standard Error: 3 - .saturating_add(Weight::from_parts(291, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) + // Minimum execution time: 385_000 picoseconds. + Weight::from_parts(427_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_272, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_set_storage_per_old_byte(n: u32, ) -> Weight { + /// The range of component `o` is `[0, 16384]`. + fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 19_127_000 picoseconds. - Weight::from_parts(21_152_987, 248) - // Standard Error: 3 - .saturating_add(Weight::from_parts(42, 0).saturating_mul(n.into())) + // Measured: `250 + o * (1 ±0)` + // Estimated: `249 + o * (1 ±0)` + // Minimum execution time: 10_128_000 picoseconds. + Weight::from_parts(9_963_519, 249) + // Standard Error: 1 + .saturating_add(Weight::from_parts(327, 0).saturating_mul(n.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(58, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_clear_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_264_000 picoseconds. - Weight::from_parts(9_449_000, 105) - // Standard Error: 8_196 - .saturating_add(Weight::from_parts(5_325_578, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_clear_storage_per_byte(n: u32, ) -> Weight { + fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_489_000 picoseconds. - Weight::from_parts(19_916_153, 248) + // Minimum execution time: 7_921_000 picoseconds. + Weight::from_parts(9_290_526, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(97, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_get_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_299_000 picoseconds. - Weight::from_parts(9_464_000, 105) - // Standard Error: 6_827 - .saturating_add(Weight::from_parts(4_720_699, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_get_storage_per_byte(n: u32, ) -> Weight { + fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_981_000 picoseconds. - Weight::from_parts(19_802_353, 248) + // Minimum execution time: 7_403_000 picoseconds. + Weight::from_parts(8_815_037, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(617, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(701, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_contains_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_891_000 picoseconds. - Weight::from_parts(10_046_000, 105) - // Standard Error: 6_993 - .saturating_add(Weight::from_parts(4_601_167, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_contains_storage_per_byte(n: u32, ) -> Weight { + fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 17_229_000 picoseconds. - Weight::from_parts(18_302_733, 248) + // Minimum execution time: 6_590_000 picoseconds. + Weight::from_parts(7_949_861, 248) // Standard Error: 2 - .saturating_add(Weight::from_parts(112, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(76, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_take_storage(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `108 + r * (150 ±0)` - // Estimated: `105 + r * (151 ±0)` - // Minimum execution time: 9_323_000 picoseconds. - Weight::from_parts(9_462_000, 105) - // Standard Error: 8_031 - .saturating_add(Weight::from_parts(5_433_981, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 151).saturating_mul(r.into())) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `n` is `[0, 16384]`. - fn seal_take_storage_per_byte(n: u32, ) -> Weight { + fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 18_711_000 picoseconds. - Weight::from_parts(20_495_670, 248) + // Minimum execution time: 7_900_000 picoseconds. + Weight::from_parts(9_988_151, 248) // Standard Error: 3 - .saturating_add(Weight::from_parts(640, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } - /// Storage: `System::Account` (r:1601 w:1601) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_transfer(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `4221 + r * (2475 ±0)` - // Minimum execution time: 9_226_000 picoseconds. - Weight::from_parts(9_394_000, 4221) - // Standard Error: 14_741 - .saturating_add(Weight::from_parts(34_179_316, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2475).saturating_mul(r.into())) + fn seal_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `0` + // Minimum execution time: 9_023_000 picoseconds. + Weight::from_parts(9_375_000, 0) } - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `520 + r * (170 ±0)` - // Estimated: `6463 + r * (2646 ±0)` - // Minimum execution time: 9_455_000 picoseconds. - Weight::from_parts(9_671_000, 6463) - // Standard Error: 126_080 - .saturating_add(Weight::from_parts(244_204_040, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2646).saturating_mul(r.into())) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) + /// The range of component `t` is `[0, 1]`. + /// The range of component `i` is `[0, 1048576]`. + fn seal_call(t: u32, i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `620 + t * (280 ±0)` + // Estimated: `4085 + t * (2182 ±0)` + // Minimum execution time: 157_109_000 picoseconds. + Weight::from_parts(159_458_069, 4085) + // Standard Error: 339_702 + .saturating_add(Weight::from_parts(44_066_869, 0).saturating_mul(t.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2182).saturating_mul(t.into())) } - /// Storage: `Contracts::CodeInfoOf` (r:735 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:735 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::EventTopics` (r:736 w:736) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:0 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// The range of component `r` is `[0, 800]`. - fn seal_delegate_call(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (527 ±0)` - // Estimated: `6447 + r * (2583 ±10)` - // Minimum execution time: 9_274_000 picoseconds. - Weight::from_parts(9_437_000, 6447) - // Standard Error: 150_832 - .saturating_add(Weight::from_parts(244_196_269, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2583).saturating_mul(r.into())) - } - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// The range of component `t` is `[0, 1]`. - /// The range of component `c` is `[0, 1048576]`. - fn seal_call_per_transfer_clone_byte(t: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `699 + t * (277 ±0)` - // Estimated: `6639 + t * (3458 ±0)` - // Minimum execution time: 214_483_000 picoseconds. - Weight::from_parts(122_634_366, 6639) - // Standard Error: 2_499_235 - .saturating_add(Weight::from_parts(41_326_008, 0).saturating_mul(t.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(422, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(7_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 3458).saturating_mul(t.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:800 w:800) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:800 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Contracts::Nonce` (r:1 w:0) - /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:800 w:801) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:802 w:802) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:801 w:801) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[1, 800]`. - fn seal_instantiate(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `1097 + r * (188 ±0)` - // Estimated: `6990 + r * (2664 ±0)` - // Minimum execution time: 341_569_000 picoseconds. - Weight::from_parts(360_574_000, 6990) - // Standard Error: 259_746 - .saturating_add(Weight::from_parts(337_944_674, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes(4_u64)) - .saturating_add(RocksDbWeight::get().writes((4_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2664).saturating_mul(r.into())) + fn seal_delegate_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 143_384_000 picoseconds. + Weight::from_parts(147_554_000, 3895) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -2435,256 +1698,149 @@ impl WeightInfo for () { /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:2) + /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `System::Account` (r:3 w:3) + /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `t` is `[0, 1]`. /// The range of component `i` is `[0, 983040]`. /// The range of component `s` is `[0, 983040]`. - fn seal_instantiate_per_transfer_input_salt_byte(t: u32, i: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `760 + t * (104 ±0)` - // Estimated: `6719 + t * (2549 ±1)` - // Minimum execution time: 1_863_119_000 picoseconds. - Weight::from_parts(900_189_174, 6719) - // Standard Error: 13_040_979 - .saturating_add(Weight::from_parts(4_056_063, 0).saturating_mul(t.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(i.into())) - // Standard Error: 20 - .saturating_add(Weight::from_parts(1_173, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) - .saturating_add(RocksDbWeight::get().writes(7_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2549).saturating_mul(t.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_211_000 picoseconds. - Weight::from_parts(11_696_412, 0) - // Standard Error: 388 - .saturating_add(Weight::from_parts(265_538, 0).saturating_mul(r.into())) + fn seal_instantiate(t: u32, i: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `676` + // Estimated: `4138` + // Minimum execution time: 1_798_243_000 picoseconds. + Weight::from_parts(82_642_573, 4138) + // Standard Error: 6_831_260 + .saturating_add(Weight::from_parts(159_867_027, 0).saturating_mul(t.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_534, 0).saturating_mul(i.into())) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_809, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_sha2_256_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_296_000 picoseconds. - Weight::from_parts(572_494, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_067, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_keccak_256(r: u32, ) -> Weight { + fn seal_hash_sha2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_177_000 picoseconds. - Weight::from_parts(8_620_481, 0) - // Standard Error: 249 - .saturating_add(Weight::from_parts(674_502, 0).saturating_mul(r.into())) + // Minimum execution time: 875_000 picoseconds. + Weight::from_parts(904_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_145, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_keccak_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_240_000 picoseconds. - Weight::from_parts(8_696_186, 0) + // Minimum execution time: 1_475_000 picoseconds. + Weight::from_parts(1_551_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(3_328, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_889_000 picoseconds. - Weight::from_parts(16_103_170, 0) - // Standard Error: 343 - .saturating_add(Weight::from_parts(328_939, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(3_410, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_256_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_405_000 picoseconds. - Weight::from_parts(2_264_024, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(850_000, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_196, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 1600]`. - fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_505_632, 0) - // Standard Error: 240 - .saturating_add(Weight::from_parts(324_854, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. - fn seal_hash_blake2_128_per_byte(n: u32, ) -> Weight { + fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 10_440_000 picoseconds. - Weight::from_parts(2_575_889, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(773_000, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_276, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. - fn seal_sr25519_verify_per_byte(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 55_119_000 picoseconds. - Weight::from_parts(56_732_248, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(4_639, 0).saturating_mul(n.into())) - } - /// The range of component `r` is `[0, 160]`. - fn seal_sr25519_verify(r: u32, ) -> Weight { + fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_861_102, 0) - // Standard Error: 6_029 - .saturating_add(Weight::from_parts(45_948_571, 0).saturating_mul(r.into())) + // Minimum execution time: 43_154_000 picoseconds. + Weight::from_parts(45_087_558, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(4_628, 0).saturating_mul(n.into())) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_recover(r: u32, ) -> Weight { + fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_293_000 picoseconds. - Weight::from_parts(28_785_765, 0) - // Standard Error: 9_160 - .saturating_add(Weight::from_parts(45_566_150, 0).saturating_mul(r.into())) + // Minimum execution time: 47_193_000 picoseconds. + Weight::from_parts(48_514_000, 0) } - /// The range of component `r` is `[0, 160]`. - fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { + fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_206_000 picoseconds. - Weight::from_parts(12_420_664, 0) - // Standard Error: 3_489 - .saturating_add(Weight::from_parts(11_628_989, 0).saturating_mul(r.into())) + // Minimum execution time: 13_083_000 picoseconds. + Weight::from_parts(13_218_000, 0) } - /// Storage: `Contracts::CodeInfoOf` (r:1536 w:1536) + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1535 w:0) + /// Storage: `Contracts::PristineCode` (r:1 w:0) /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:2 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `System::EventTopics` (r:1537 w:1537) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_set_code_hash(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + r * (926 ±0)` - // Estimated: `8969 + r * (3047 ±7)` - // Minimum execution time: 9_219_000 picoseconds. - Weight::from_parts(9_385_000, 8969) - // Standard Error: 45_562 - .saturating_add(Weight::from_parts(26_360_661, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 3047).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn seal_set_code_hash() -> Weight { + // Proof Size summary in bytes: + // Measured: `430` + // Estimated: `3895` + // Minimum execution time: 19_308_000 picoseconds. + Weight::from_parts(20_116_000, 3895) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// The range of component `r` is `[0, 32]`. - fn lock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `274 + r * (78 ±0)` - // Estimated: `1265 + r * (2553 ±0)` - // Minimum execution time: 9_355_000 picoseconds. - Weight::from_parts(15_071_309, 1265) - // Standard Error: 9_722 - .saturating_add(Weight::from_parts(5_328_717, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2553).saturating_mul(r.into())) - } - /// Storage: `Contracts::CodeInfoOf` (r:32 w:32) + fn lock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3820` + // Minimum execution time: 9_271_000 picoseconds. + Weight::from_parts(9_640_000, 3820) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 32]`. - fn unlock_delegate_dependency(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `275 + r * (78 ±0)` - // Estimated: `990 + r * (2568 ±0)` - // Minimum execution time: 8_979_000 picoseconds. - Weight::from_parts(14_362_224, 990) - // Standard Error: 9_137 - .saturating_add(Weight::from_parts(4_488_748, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2568).saturating_mul(r.into())) + fn unlock_delegate_dependency() -> Weight { + // Proof Size summary in bytes: + // Measured: `355` + // Estimated: `3558` + // Minimum execution time: 8_182_000 picoseconds. + Weight::from_parts(8_343_000, 3558) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Contracts::MigrationInProgress` (r:1 w:0) - /// Proof: `Contracts::MigrationInProgress` (`max_values`: Some(1), `max_size`: Some(1026), added: 1521, mode: `Measured`) - /// Storage: `System::Account` (r:1 w:0) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) - /// Storage: `Parameters::Parameters` (r:3 w:0) - /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(36), added: 2511, mode: `Measured`) - /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) - /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) - /// Storage: `Contracts::CodeInfoOf` (r:1 w:0) - /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) - /// Storage: `Contracts::PristineCode` (r:1 w:0) - /// Proof: `Contracts::PristineCode` (`max_values`: None, `max_size`: Some(125988), added: 128463, mode: `Measured`) - /// Storage: `Timestamp::Now` (r:1 w:0) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// Storage: `System::EventTopics` (r:2 w:2) - /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_reentrance_count(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `861 + r * (3 ±0)` - // Estimated: `9282 + r * (3 ±0)` - // Minimum execution time: 269_704_000 picoseconds. - Weight::from_parts(289_916_035, 9282) - // Standard Error: 408 - .saturating_add(Weight::from_parts(166_040, 0).saturating_mul(r.into())) - .saturating_add(RocksDbWeight::get().reads(11_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - .saturating_add(Weight::from_parts(0, 3).saturating_mul(r.into())) + fn seal_reentrance_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 320_000 picoseconds. + Weight::from_parts(347_000, 0) } - /// The range of component `r` is `[0, 1600]`. - fn seal_account_reentrance_count(r: u32, ) -> Weight { + fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_361_000 picoseconds. - Weight::from_parts(11_633_836, 0) - // Standard Error: 86 - .saturating_add(Weight::from_parts(83_083, 0).saturating_mul(r.into())) + // Minimum execution time: 345_000 picoseconds. + Weight::from_parts(370_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) - /// The range of component `r` is `[0, 1600]`. - fn seal_instantiation_nonce(r: u32, ) -> Weight { + fn seal_instantiation_nonce() -> Weight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(13_259_836, 1704) - // Standard Error: 121 - .saturating_add(Weight::from_parts(76_878, 0).saturating_mul(r.into())) + // Minimum execution time: 2_998_000 picoseconds. + Weight::from_parts(3_221_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2692,9 +1848,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 851_000 picoseconds. - Weight::from_parts(587_883, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(14_912, 0).saturating_mul(r.into())) + // Minimum execution time: 1_002_000 picoseconds. + Weight::from_parts(1_094_958, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(14_531, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index d9a5ee14f0545a7481fcfd133887c74921659589..80de7a1d5d69077d91ec9e0b845128027aad55ca 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -15,7 +15,7 @@ workspace = true paste = { version = "1.0", default-features = false } bitflags = "1.0" scale-info = { version = "2.11.1", default-features = false, features = ["derive"], optional = true } -scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +scale = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ], optional = true } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index ffb5122ed7f980a547e2d299f0a687641f0d9161..20de4d858ad62e4e499e5c4ee6786b1f5ee2f6c4 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index b4258281b7010855bb1f81aae4bce1cd64fb774e..8773a124cd02accb450daa2218c161563a68d8bf 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs index fd5453310be51a65dac808ee2f07737ca8915203..b3ee3ab7d165fd87710b207ab4668bba450e2189 100644 --- a/substrate/frame/core-fellowship/src/benchmarking.rs +++ b/substrate/frame/core-fellowship/src/benchmarking.rs @@ -54,11 +54,12 @@ mod benchmarks { } fn set_benchmark_params, I: 'static>() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); let params = ParamsType { - active_salary: [100u32.into(); 9], - passive_salary: [10u32.into(); 9], - demotion_period: [100u32.into(); 9], - min_promotion_period: [100u32.into(); 9], + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), offboard_timeout: 1u32.into(), }; @@ -68,11 +69,12 @@ mod benchmarks { #[benchmark] fn set_params() -> Result<(), BenchmarkError> { + let max_rank = T::MaxRank::get().try_into().unwrap(); let params = ParamsType { - active_salary: [100u32.into(); 9], - passive_salary: [10u32.into(); 9], - demotion_period: [100u32.into(); 9], - min_promotion_period: [100u32.into(); 9], + active_salary: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + passive_salary: BoundedVec::try_from(vec![10u32.into(); max_rank]).unwrap(), + demotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), + min_promotion_period: BoundedVec::try_from(vec![100u32.into(); max_rank]).unwrap(), offboard_timeout: 1u32.into(), }; @@ -151,10 +153,14 @@ mod benchmarks { fn promote() -> Result<(), BenchmarkError> { // Ensure that the `min_promotion_period` wont get in our way. let mut params = Params::::get(); - params.min_promotion_period = [Zero::zero(); RANK_COUNT]; + let max_rank = T::MaxRank::get().try_into().unwrap(); + params.min_promotion_period = BoundedVec::try_from(vec![Zero::zero(); max_rank]).unwrap(); Params::::put(¶ms); let member = make_member::(1)?; + + // Set it to the max value to ensure that any possible auto-demotion period has passed. + frame_system::Pallet::::set_block_number(BlockNumberFor::::max_value()); ensure_evidence::(&member)?; #[extrinsic_call] diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs index afb188261fd448c2617189e0db71050d3d45234e..94339b85d0524a297241248312e92b31e761149c 100644 --- a/substrate/frame/core-fellowship/src/lib.rs +++ b/substrate/frame/core-fellowship/src/lib.rs @@ -61,7 +61,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_arithmetic::traits::{Saturating, Zero}; use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, prelude::*}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; use frame_support::{ defensive, @@ -71,7 +71,7 @@ use frame_support::{ tokens::Balance as BalanceTrait, EnsureOrigin, EnsureOriginWithArg, Get, RankedMembers, RankedMembersSwapHandler, }, - BoundedVec, + BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; #[cfg(test)] @@ -79,10 +79,11 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; pub mod weights; pub use pallet::*; -pub use weights::WeightInfo; +pub use weights::*; /// The desired outcome for which evidence is presented. #[derive(Encode, Decode, Eq, PartialEq, Copy, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] @@ -100,29 +101,46 @@ pub enum Wish { pub type Evidence = BoundedVec>::EvidenceSize>; /// The status of the pallet instance. -#[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub struct ParamsType { +#[derive( + Encode, + Decode, + CloneNoBound, + EqNoBound, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, + MaxEncodedLen, +)] +#[scale_info(skip_type_params(Ranks))] +pub struct ParamsType< + Balance: Clone + Eq + PartialEq + Debug, + BlockNumber: Clone + Eq + PartialEq + Debug, + Ranks: Get, +> { /// The amounts to be paid when a member of a given rank (-1) is active. - active_salary: [Balance; RANKS], + pub active_salary: BoundedVec, /// The amounts to be paid when a member of a given rank (-1) is passive. - passive_salary: [Balance; RANKS], + pub passive_salary: BoundedVec, /// The period between which unproven members become demoted. - demotion_period: [BlockNumber; RANKS], + pub demotion_period: BoundedVec, /// The period between which members must wait before they may proceed to this rank. - min_promotion_period: [BlockNumber; RANKS], + pub min_promotion_period: BoundedVec, /// Amount by which an account can remain at rank 0 (candidate before being offboard entirely). - offboard_timeout: BlockNumber, + pub offboard_timeout: BlockNumber, } -impl Default - for ParamsType +impl< + Balance: Default + Copy + Eq + Debug, + BlockNumber: Default + Copy + Eq + Debug, + Ranks: Get, + > Default for ParamsType { fn default() -> Self { Self { - active_salary: [Balance::default(); RANKS], - passive_salary: [Balance::default(); RANKS], - demotion_period: [BlockNumber::default(); RANKS], - min_promotion_period: [BlockNumber::default(); RANKS], + active_salary: Default::default(), + passive_salary: Default::default(), + demotion_period: Default::default(), + min_promotion_period: Default::default(), offboard_timeout: BlockNumber::default(), } } @@ -148,11 +166,11 @@ pub mod pallet { traits::{tokens::GetSalary, EnsureOrigin}, }; use frame_system::{ensure_root, pallet_prelude::*}; - - /// Number of available ranks. - pub(crate) const RANK_COUNT: usize = 9; + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); #[pallet::config] @@ -194,9 +212,16 @@ pub mod pallet { /// The maximum size in bytes submitted evidence is allowed to be. #[pallet::constant] type EvidenceSize: Get; + + /// Represents the highest possible rank in this pallet. + /// + /// Increasing this value is supported, but decreasing it may lead to a broken state. + #[pallet::constant] + type MaxRank: Get; } - pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, RANK_COUNT>; + pub type ParamsOf = + ParamsType<>::Balance, BlockNumberFor, >::MaxRank>; pub type MemberStatusOf = MemberStatus>; pub type RankOf = <>::Members as RankedMembers>::Rank; @@ -338,8 +363,10 @@ pub mod pallet { #[pallet::call_index(1)] pub fn set_params(origin: OriginFor, params: Box>) -> DispatchResult { T::ParamsOrigin::ensure_origin_or_root(origin)?; + Params::::put(params.as_ref()); Self::deposit_event(Event::::ParamsChanged { params: *params }); + Ok(()) } @@ -540,7 +567,7 @@ pub mod pallet { /// in the range `1..=RANK_COUNT` is `None`. pub(crate) fn rank_to_index(rank: RankOf) -> Option { match TryInto::::try_into(rank) { - Ok(r) if r <= RANK_COUNT && r > 0 => Some(r - 1), + Ok(r) if r as u32 <= >::MaxRank::get() && r > 0 => Some(r - 1), _ => return None, } } diff --git a/substrate/frame/core-fellowship/src/migration.rs b/substrate/frame/core-fellowship/src/migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..b8b5540a4b475be0a5a40ec2324b94f71d0da9c8 --- /dev/null +++ b/substrate/frame/core-fellowship/src/migration.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the core-fellowship pallet. +use super::*; +use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{DefensiveTruncateFrom, UncheckedOnRuntimeUpgrade}, + BoundedVec, +}; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +mod v0 { + use frame_system::pallet_prelude::BlockNumberFor; + + use super::*; + + #[derive(Encode, Decode, Eq, PartialEq, Clone, TypeInfo, MaxEncodedLen, RuntimeDebug)] + pub struct ParamsType { + pub active_salary: [Balance; RANKS], + pub passive_salary: [Balance; RANKS], + pub demotion_period: [BlockNumber; RANKS], + pub min_promotion_period: [BlockNumber; RANKS], + pub offboard_timeout: BlockNumber, + } + + impl Default + for ParamsType + { + fn default() -> Self { + Self { + active_salary: [Balance::default(); RANKS], + passive_salary: [Balance::default(); RANKS], + demotion_period: [BlockNumber::default(); RANKS], + min_promotion_period: [BlockNumber::default(); RANKS], + offboard_timeout: BlockNumber::default(), + } + } + } + + /// Number of available ranks from old version. + pub(crate) const RANK_COUNT: usize = 9; + + pub type ParamsOf = ParamsType<>::Balance, BlockNumberFor, RANK_COUNT>; + + /// V0 type for [`crate::Params`]. + #[storage_alias] + pub type Params, I: 'static> = + StorageValue, ParamsOf, ValueQuery>; +} + +pub struct MigrateToV1(PhantomData<(T, I)>); +impl, I: 'static> UncheckedOnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + ensure!( + T::MaxRank::get() >= v0::RANK_COUNT as u32, + "pallet-core-fellowship: new bound should not truncate" + ); + Ok(Default::default()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + let old_value = v0::Params::::take(); + // Write the new value to storage + let new = crate::ParamsType { + active_salary: BoundedVec::defensive_truncate_from(old_value.active_salary.to_vec()), + passive_salary: BoundedVec::defensive_truncate_from(old_value.passive_salary.to_vec()), + demotion_period: BoundedVec::defensive_truncate_from( + old_value.demotion_period.to_vec(), + ), + min_promotion_period: BoundedVec::defensive_truncate_from( + old_value.min_promotion_period.to_vec(), + ), + offboard_timeout: old_value.offboard_timeout, + }; + crate::Params::::put(new); + T::DbWeight::get().reads_writes(1, 1) + } +} + +/// [`UncheckedOnRuntimeUpgrade`] implementation [`MigrateToV1`] wrapped in a +/// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: +/// - The migration only runs once when the on-chain storage version is 0 +/// - The on-chain storage version is updated to `1` after the migration executes +/// - Reads/Writes from checking/settings the on-chain storage version are accounted for +pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + MigrateToV1, + crate::pallet::Pallet, + ::DbWeight, +>; diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs index d3bbac158056e7edbbf1e08b255d63450fa1c5c0..f3137316658576d91616428ee32d51b916189963 100644 --- a/substrate/frame/core-fellowship/src/tests/integration.rs +++ b/substrate/frame/core-fellowship/src/tests/integration.rs @@ -25,8 +25,9 @@ use frame_support::{ }; use frame_system::EnsureSignedBy; use pallet_ranked_collective::{EnsureRanked, Geometric, Rank, TallyOf, Votes}; -use sp_core::Get; +use sp_core::{ConstU32, Get}; use sp_runtime::{ + bounded_vec, traits::{Convert, ReduceBy, ReplaceWithDefault, TryMorphInto}, BuildStorage, DispatchError, }; @@ -78,6 +79,7 @@ impl Config for Test { type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; type EvidenceSize = EvidenceSize; + type MaxRank = ConstU32<9>; } pub struct TestPolls; @@ -163,11 +165,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { + assert_ok!(Club::add_member(RuntimeOrigin::root(), 100)); + promote_n_times(100, 9); let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [2, 4, 6, 8, 10, 12, 14, 16, 18], - min_promotion_period: [3, 6, 9, 12, 15, 18, 21, 24, 27], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 4, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], offboard_timeout: 1, }; assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs index 669517d61a4a963d3e021191ed3f9e561021b908..9245e5159a901d47842339836ae3f0767a2b1b1f 100644 --- a/substrate/frame/core-fellowship/src/tests/unit.rs +++ b/substrate/frame/core-fellowship/src/tests/unit.rs @@ -27,7 +27,7 @@ use frame_support::{ traits::{tokens::GetSalary, ConstU32, IsInVec, TryMapSuccess}, }; use frame_system::EnsureSignedBy; -use sp_runtime::{traits::TryMorphInto, BuildStorage, DispatchError, DispatchResult}; +use sp_runtime::{bounded_vec, traits::TryMorphInto, BuildStorage, DispatchError, DispatchResult}; use crate as pallet_core_fellowship; use crate::*; @@ -116,19 +116,22 @@ impl Config for Test { type ApproveOrigin = TryMapSuccess, u64>, TryMorphInto>; type PromoteOrigin = TryMapSuccess, u64>, TryMorphInto>; type EvidenceSize = ConstU32<1024>; + type MaxRank = ConstU32<9>; } pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { + set_rank(100, 9); let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [2, 4, 6, 8, 10, 12, 14, 16, 18], - min_promotion_period: [3, 6, 9, 12, 15, 18, 21, 24, 27], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![2, 4, 6, 8, 10, 12, 14, 16, 18], + min_promotion_period: bounded_vec![3, 6, 9, 12, 15, 18, 21, 24, 27], offboard_timeout: 1, }; + assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); System::set_block_number(1); }); @@ -170,10 +173,10 @@ fn basic_stuff() { fn set_params_works() { new_test_ext().execute_with(|| { let params = ParamsType { - active_salary: [10, 20, 30, 40, 50, 60, 70, 80, 90], - passive_salary: [1, 2, 3, 4, 5, 6, 7, 8, 9], - demotion_period: [1, 2, 3, 4, 5, 6, 7, 8, 9], - min_promotion_period: [1, 2, 3, 4, 5, 10, 15, 20, 30], + active_salary: bounded_vec![10, 20, 30, 40, 50, 60, 70, 80, 90], + passive_salary: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + demotion_period: bounded_vec![1, 2, 3, 4, 5, 6, 7, 8, 9], + min_promotion_period: bounded_vec![1, 2, 3, 4, 5, 10, 15, 20, 30], offboard_timeout: 1, }; assert_noop!( @@ -284,10 +287,10 @@ fn offboard_works() { fn infinite_demotion_period_works() { new_test_ext().execute_with(|| { let params = ParamsType { - active_salary: [10; 9], - passive_salary: [10; 9], - min_promotion_period: [10; 9], - demotion_period: [0; 9], + active_salary: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + passive_salary: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + min_promotion_period: bounded_vec![10, 10, 10, 10, 10, 10, 10, 10, 10], + demotion_period: bounded_vec![0, 0, 0, 0, 0, 0, 0, 0, 0], offboard_timeout: 0, }; assert_ok!(CoreFellowship::set_params(signed(1), Box::new(params))); diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs index 1e42335067a4a802942753940b2f5d6bbc5aef4f..8fad6f585c1129e9a6c4160ed396cea28158e990 100644 --- a/substrate/frame/core-fellowship/src/weights.rs +++ b/substrate/frame/core-fellowship/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_core_fellowship` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_core_fellowship -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/core-fellowship/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_core_fellowship +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/core-fellowship/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -67,13 +65,13 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `CoreFellowship::Params` (r:0 w:1) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(7_057_000, 0) + // Minimum execution time: 7_633_000 picoseconds. + Weight::from_parts(8_018_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -81,7 +79,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -92,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `17274` + // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 55_535_000 picoseconds. - Weight::from_parts(57_104_000, 19894) + // Minimum execution time: 57_597_000 picoseconds. + Weight::from_parts(58_825_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -104,7 +102,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -115,10 +113,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `17384` + // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 59_111_000 picoseconds. - Weight::from_parts(61_394_000, 19894) + // Minimum execution time: 61_387_000 picoseconds. + Weight::from_parts(63_408_000, 19894) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -130,8 +128,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 16_166_000 picoseconds. - Weight::from_parts(16_773_000, 3514) + // Minimum execution time: 15_941_000 picoseconds. + Weight::from_parts(16_547_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -149,8 +147,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 25_508_000 picoseconds. - Weight::from_parts(25_952_000, 3514) + // Minimum execution time: 24_963_000 picoseconds. + Weight::from_parts(25_873_000, 3514) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -159,7 +157,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) @@ -170,10 +168,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `17252` + // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_102_000 picoseconds. - Weight::from_parts(53_302_000, 19894) + // Minimum execution time: 55_062_000 picoseconds. + Weight::from_parts(58_422_000, 19894) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -187,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 16_035_000 picoseconds. - Weight::from_parts(16_529_000, 3514) + // Minimum execution time: 15_901_000 picoseconds. + Weight::from_parts(16_746_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -200,8 +198,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_340_000, 3514) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_421_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -215,8 +213,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 35_137_000 picoseconds. - Weight::from_parts(36_285_000, 19894) + // Minimum execution time: 36_925_000 picoseconds. + Weight::from_parts(38_330_000, 19894) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -228,8 +226,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 24_307_000 picoseconds. - Weight::from_parts(25_426_000, 19894) + // Minimum execution time: 25_210_000 picoseconds. + Weight::from_parts(26_247_000, 19894) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -238,13 +236,13 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `CoreFellowship::Params` (r:0 w:1) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) fn set_params() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_836_000 picoseconds. - Weight::from_parts(7_057_000, 0) + // Minimum execution time: 7_633_000 picoseconds. + Weight::from_parts(8_018_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `CoreFellowship::Member` (r:1 w:1) @@ -252,7 +250,7 @@ impl WeightInfo for () { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -263,10 +261,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_offboard() -> Weight { // Proof Size summary in bytes: - // Measured: `17274` + // Measured: `17278` // Estimated: `19894` - // Minimum execution time: 55_535_000 picoseconds. - Weight::from_parts(57_104_000, 19894) + // Minimum execution time: 57_597_000 picoseconds. + Weight::from_parts(58_825_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -275,7 +273,7 @@ impl WeightInfo for () { /// Storage: `RankedCollective::Members` (r:1 w:1) /// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::IdToIndex` (r:1 w:1) @@ -286,10 +284,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn bump_demote() -> Weight { // Proof Size summary in bytes: - // Measured: `17384` + // Measured: `17388` // Estimated: `19894` - // Minimum execution time: 59_111_000 picoseconds. - Weight::from_parts(61_394_000, 19894) + // Minimum execution time: 61_387_000 picoseconds. + Weight::from_parts(63_408_000, 19894) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -301,8 +299,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `388` // Estimated: `3514` - // Minimum execution time: 16_166_000 picoseconds. - Weight::from_parts(16_773_000, 3514) + // Minimum execution time: 15_941_000 picoseconds. + Weight::from_parts(16_547_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -320,8 +318,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `146` // Estimated: `3514` - // Minimum execution time: 25_508_000 picoseconds. - Weight::from_parts(25_952_000, 3514) + // Minimum execution time: 24_963_000 picoseconds. + Weight::from_parts(25_873_000, 3514) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -330,7 +328,7 @@ impl WeightInfo for () { /// Storage: `CoreFellowship::Member` (r:1 w:1) /// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::Params` (r:1 w:0) - /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`) /// Storage: `RankedCollective::MemberCount` (r:1 w:1) /// Proof: `RankedCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `CoreFellowship::MemberEvidence` (r:1 w:1) @@ -341,10 +339,10 @@ impl WeightInfo for () { /// Proof: `RankedCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) fn promote() -> Weight { // Proof Size summary in bytes: - // Measured: `17252` + // Measured: `16931` // Estimated: `19894` - // Minimum execution time: 51_102_000 picoseconds. - Weight::from_parts(53_302_000, 19894) + // Minimum execution time: 55_062_000 picoseconds. + Weight::from_parts(58_422_000, 19894) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -358,8 +356,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `293` // Estimated: `3514` - // Minimum execution time: 16_035_000 picoseconds. - Weight::from_parts(16_529_000, 3514) + // Minimum execution time: 15_901_000 picoseconds. + Weight::from_parts(16_746_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -371,8 +369,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `313` // Estimated: `3514` - // Minimum execution time: 14_966_000 picoseconds. - Weight::from_parts(15_340_000, 3514) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_421_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -386,8 +384,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `16843` // Estimated: `19894` - // Minimum execution time: 35_137_000 picoseconds. - Weight::from_parts(36_285_000, 19894) + // Minimum execution time: 36_925_000 picoseconds. + Weight::from_parts(38_330_000, 19894) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -399,8 +397,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `79` // Estimated: `19894` - // Minimum execution time: 24_307_000 picoseconds. - Weight::from_parts(25_426_000, 19894) + // Minimum execution time: 25_210_000 picoseconds. + Weight::from_parts(26_247_000, 19894) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..3b122dc2e26c3c9d0f56f07427b037bd1cd388c0 --- /dev/null +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -0,0 +1,73 @@ +[package] +name = "pallet-delegated-staking" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME delegated staking pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } + +[dev-dependencies] +sp-core = { path = "../../primitives/core" } +sp-io = { path = "../../primitives/io" } +substrate-test-utils = { path = "../../test-utils" } +sp-tracing = { path = "../../primitives/tracing" } +pallet-staking = { path = "../staking" } +pallet-nomination-pools = { path = "../nomination-pools" } +pallet-balances = { path = "../balances" } +pallet-timestamp = { path = "../timestamp" } +pallet-staking-reward-curve = { path = "../staking/reward-curve" } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-election-provider-support/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-nomination-pools/std", + "pallet-staking/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-staking/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-election-provider-support/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-nomination-pools/try-runtime", + "pallet-staking/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs new file mode 100644 index 0000000000000000000000000000000000000000..9f5649672d70e9e55ad84350d0be53bd4abb2b0f --- /dev/null +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -0,0 +1,161 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementations of public traits, namely [`DelegationInterface`] and [`OnStakingUpdate`]. + +use super::*; +use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator, OnStakingUpdate}; + +impl DelegationInterface for Pallet { + type Balance = BalanceOf; + type AccountId = T::AccountId; + + /// Effective balance of the `Agent` account. + fn agent_balance(agent: Agent) -> Option { + AgentLedgerOuter::::get(&agent.get()) + .map(|a| a.ledger.effective_balance()) + .ok() + } + + fn delegator_balance(delegator: Delegator) -> Option { + Delegation::::get(&delegator.get()).map(|d| d.amount) + } + + /// Delegate funds to an `Agent`. + fn delegate( + who: Delegator, + agent: Agent, + reward_account: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Pallet::::register_agent( + RawOrigin::Signed(agent.clone().get()).into(), + reward_account.clone(), + )?; + + // Delegate the funds from who to the `Agent` account. + Pallet::::delegate_to_agent(RawOrigin::Signed(who.get()).into(), agent.get(), amount) + } + + /// Add more delegation to the `Agent` account. + fn delegate_extra( + who: Delegator, + agent: Agent, + amount: Self::Balance, + ) -> DispatchResult { + Pallet::::delegate_to_agent(RawOrigin::Signed(who.get()).into(), agent.get(), amount) + } + + /// Withdraw delegation of `delegator` to `Agent`. + /// + /// If there are funds in `Agent` account that can be withdrawn, then those funds would be + /// unlocked/released in the delegator's account. + fn withdraw_delegation( + delegator: Delegator, + agent: Agent, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult { + Pallet::::release_delegation( + RawOrigin::Signed(agent.get()).into(), + delegator.get(), + amount, + num_slashing_spans, + ) + } + + /// Returns pending slash of the `agent`. + fn pending_slash(agent: Agent) -> Option { + AgentLedgerOuter::::get(&agent.get()).map(|d| d.ledger.pending_slash).ok() + } + + fn delegator_slash( + agent: Agent, + delegator: Delegator, + value: Self::Balance, + maybe_reporter: Option, + ) -> sp_runtime::DispatchResult { + Pallet::::do_slash(agent, delegator, value, maybe_reporter) + } +} + +impl DelegationMigrator for Pallet { + type Balance = BalanceOf; + type AccountId = T::AccountId; + + fn migrate_nominator_to_agent( + agent: Agent, + reward_account: &Self::AccountId, + ) -> DispatchResult { + Pallet::::migrate_to_agent(RawOrigin::Signed(agent.get()).into(), reward_account.clone()) + } + fn migrate_delegation( + agent: Agent, + delegator: Delegator, + value: Self::Balance, + ) -> DispatchResult { + Pallet::::migrate_delegation( + RawOrigin::Signed(agent.get()).into(), + delegator.get(), + value, + ) + } + + /// Only used for testing. + #[cfg(feature = "runtime-benchmarks")] + fn drop_agent(agent: Agent) { + >::remove(agent.clone().get()); + >::iter() + .filter(|(_, delegation)| delegation.agent == agent.clone().get()) + .for_each(|(delegator, _)| { + let _ = T::Currency::release_all( + &HoldReason::StakingDelegation.into(), + &delegator, + Precision::BestEffort, + ); + >::remove(&delegator); + }); + + T::CoreStaking::migrate_to_direct_staker(&agent.get()); + } +} + +impl OnStakingUpdate> for Pallet { + fn on_slash( + who: &T::AccountId, + _slashed_active: BalanceOf, + _slashed_unlocking: &sp_std::collections::btree_map::BTreeMap>, + slashed_total: BalanceOf, + ) { + >::mutate(who, |maybe_register| match maybe_register { + // if existing agent, register the slashed amount as pending slash. + Some(register) => register.pending_slash.saturating_accrue(slashed_total), + None => { + // nothing to do + }, + }); + } + + fn on_withdraw(stash: &T::AccountId, amount: BalanceOf) { + // if there is a withdraw to the agent, then add it to the unclaimed withdrawals. + let _ = AgentLedgerOuter::::get(stash) + // can't do anything if there is an overflow error. Just raise a defensive error. + .and_then(|agent| agent.add_unclaimed_withdraw(amount).defensive()) + .map(|agent| agent.save()); + } +} diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b924bce3a579ace305d998cc1db84dee82f78bf --- /dev/null +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -0,0 +1,850 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Delegated Staking Pallet +//! +//! This pallet implements [`sp_staking::DelegationInterface`] that provides delegation +//! functionality to `delegators` and `agents`. It is designed to be used in conjunction with +//! [`StakingInterface`] and relies on [`Config::CoreStaking`] to provide primitive staking +//! functions. +//! +//! Currently, it does not expose any dispatchable calls but is written with a vision to expose them +//! in the future such that it can be utilised by any external account, off-chain entity or xcm +//! `MultiLocation` such as a parachain or a smart contract. +//! +//! ## Key Terminologies +//! - **Agent**: An account who accepts delegations from other accounts and act as an agent on their +//! behalf for staking these delegated funds. Also, sometimes referred as `Delegatee`. +//! - **Delegator**: An account who delegates their funds to an `agent` and authorises them to use +//! it for staking. +//! - **AgentLedger**: A data structure that holds important information about the `agent` such as +//! total delegations they have received, any slashes posted to them, etc. +//! - **Delegation**: A data structure that stores the amount of funds delegated to an `agent` by a +//! `delegator`. +//! +//! ## Goals +//! +//! Direct nomination on the Staking pallet does not scale well. Nominations pools were created to +//! address this by pooling delegator funds into one account and then staking it. This though had +//! a very critical limitation that the funds were moved from delegator account to pool account +//! and hence the delegator lost control over their funds for using it for other purposes such as +//! governance. This pallet aims to solve this by extending the staking pallet to support a new +//! primitive function: delegation of funds to an `agent` with the intent of staking. The agent can +//! then stake the delegated funds to [`Config::CoreStaking`] on behalf of the delegators. +//! +//! ### Withdrawal Management +//! Agent unbonding does not regulate ordering of consequent withdrawal for delegators. This is upto +//! the consumer of this pallet to implement in what order unbondable funds from +//! [`Config::CoreStaking`] can be withdrawn by the delegators. +//! +//! ### Reward and Slashing +//! This pallet does not enforce any specific strategy for how rewards or slashes are applied. It +//! is upto the `agent` account to decide how to apply the rewards and slashes. +//! +//! This importantly allows clients of this pallet to build their own strategies for reward/slashes. +//! For example, an `agent` account can choose to first slash the reward pot before slashing the +//! delegators. Or part of the reward can go to an insurance fund that can be used to cover any +//! potential future slashes. The goal is to eventually allow foreign MultiLocations +//! (smart contracts or pallets on another chain) to build their own pooled staking solutions +//! similar to `NominationPools`. + +//! ## Core functions +//! +//! - Allow an account to receive delegations. See [`Pallet::register_agent`]. +//! - Delegate funds to an `agent` account. See [`Pallet::delegate_to_agent`]. +//! - Release delegated funds from an `agent` account to the `delegator`. See +//! [`Pallet::release_delegation`]. +//! - Migrate a `Nominator` account to an `agent` account. See [`Pallet::migrate_to_agent`]. +//! Explained in more detail in the `Migration` section. +//! - Migrate unclaimed delegated funds from `agent` to delegator. When a nominator migrates to an +//! agent, the funds are held in a proxy account. This function allows the delegator to claim their +//! share of the funds from the proxy account. See [`Pallet::migrate_delegation`]. +//! +//! ## Lazy Slashing +//! One of the reasons why direct nominators on staking pallet cannot scale well is because all +//! nominators are slashed at the same time. This is expensive and needs to be bounded operation. +//! +//! This pallet implements a lazy slashing mechanism. Any slashes to the `agent` are posted in its +//! `AgentLedger` as a pending slash. Since the actual amount is held in the multiple +//! `delegator` accounts, this pallet has no way to know how to apply slash. It is the `agent`'s +//! responsibility to apply slashes for each delegator, one at a time. Staking pallet ensures the +//! pending slash never exceeds staked amount and would freeze further withdraws until all pending +//! slashes are cleared. +//! +//! The user of this pallet can apply slash using +//! [DelegationInterface::delegator_slash](sp_staking::DelegationInterface::delegator_slash). +//! +//! ## Migration from Nominator to Agent +//! More details [here](https://hackmd.io/@ak0n/454-np-governance). +//! +//! ## Nomination Pool vs Delegation Staking +//! This pallet is not a replacement for Nomination Pool but adds a new primitive in addition to +//! staking pallet that can be used by Nomination Pool to support delegation based staking. It can +//! be thought of as an extension to the Staking Pallet in relation to Nomination Pools. +//! Technically, these changes could be made in one of those pallets as well but that would have +//! meant significant refactoring and high chances of introducing a regression. With this approach, +//! we can keep the existing pallets with minimal changes and introduce a new pallet that can be +//! optionally used by Nomination Pool. The vision is to build this in a configurable way such that +//! runtime can choose whether to use this pallet or not. +//! +//! With that said, following is the main difference between +//! #### Nomination Pool without delegation support +//! 1) transfer fund from delegator to pool account, and +//! 2) stake from pool account as a direct nominator. +//! +//! #### Nomination Pool with delegation support +//! 1) delegate fund from delegator to pool account, and +//! 2) stake from pool account as an `Agent` account on the staking pallet. +//! +//! The difference being, in the second approach, the delegated funds will be locked in-place in +//! user's account enabling them to participate in use cases that allows use of `held` funds such +//! as participation in governance voting. +//! +//! Nomination pool still does all the heavy lifting around pool administration, reward +//! distribution, lazy slashing and as such, is not meant to be replaced with this pallet. +//! +//! ## Limitations +//! - Rewards can not be auto-compounded. +//! - Slashes are lazy and hence there could be a period of time when an account can use funds for +//! operations such as voting in governance even though they should be slashed. + +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(rustdoc::broken_intra_doc_links)] + +mod impls; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +mod types; + +pub use pallet::*; + +use types::*; + +use frame_support::{ + pallet_prelude::*, + traits::{ + fungible::{ + hold::{ + Balanced as FunHoldBalanced, Inspect as FunHoldInspect, Mutate as FunHoldMutate, + }, + Balanced, Inspect as FunInspect, Mutate as FunMutate, + }, + tokens::{fungible::Credit, Fortitude, Precision, Preservation}, + Defensive, DefensiveOption, Imbalance, OnUnbalanced, + }, +}; +use sp_runtime::{ + traits::{AccountIdConversion, CheckedAdd, CheckedSub, Zero}, + ArithmeticError, DispatchResult, Perbill, RuntimeDebug, Saturating, +}; +use sp_staking::{Agent, Delegator, EraIndex, StakingInterface, StakingUnchecked}; +use sp_std::{convert::TryInto, prelude::*}; + +pub type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + +use frame_system::{ensure_signed, pallet_prelude::*, RawOrigin}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Injected identifier for the pallet. + #[pallet::constant] + type PalletId: Get; + + /// Currency type. + type Currency: FunHoldMutate + + FunMutate + + FunHoldBalanced; + + /// Handler for the unbalanced reduction when slashing a delegator. + type OnSlash: OnUnbalanced>; + + /// Fraction of the slash that is rewarded to the caller of pending slash to the agent. + #[pallet::constant] + type SlashRewardFraction: Get; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// Core staking implementation. + type CoreStaking: StakingUnchecked, AccountId = Self::AccountId>; + } + + #[pallet::error] + pub enum Error { + /// The account cannot perform this operation. + NotAllowed, + /// An existing staker cannot perform this action. + AlreadyStaking, + /// Reward Destination cannot be same as `Agent` account. + InvalidRewardDestination, + /// Delegation conditions are not met. + /// + /// Possible issues are + /// 1) Cannot delegate to self, + /// 2) Cannot delegate to multiple delegates. + InvalidDelegation, + /// The account does not have enough funds to perform the operation. + NotEnoughFunds, + /// Not an existing `Agent` account. + NotAgent, + /// Not a Delegator account. + NotDelegator, + /// Some corruption in internal state. + BadState, + /// Unapplied pending slash restricts operation on `Agent`. + UnappliedSlash, + /// `Agent` has no pending slash to be applied. + NothingToSlash, + /// Failed to withdraw amount from Core Staking. + WithdrawFailed, + /// Operation not supported by this pallet. + NotSupported, + } + + /// A reason for placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// Funds held for stake delegation to another account. + #[codec(index = 0)] + StakingDelegation, + } + + #[pallet::event] + #[pallet::generate_deposit(pub (super) fn deposit_event)] + pub enum Event { + /// Funds delegated by a delegator. + Delegated { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Funds released to a delegator. + Released { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Funds slashed from a delegator. + Slashed { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + /// Unclaimed delegation funds migrated to delegator. + MigratedDelegation { agent: T::AccountId, delegator: T::AccountId, amount: BalanceOf }, + } + + /// Map of Delegators to their `Delegation`. + /// + /// Implementation note: We are not using a double map with `delegator` and `agent` account + /// as keys since we want to restrict delegators to delegate only to one account at a time. + #[pallet::storage] + pub(crate) type Delegators = + CountedStorageMap<_, Twox64Concat, T::AccountId, Delegation, OptionQuery>; + + /// Map of `Agent` to their `Ledger`. + #[pallet::storage] + pub(crate) type Agents = + CountedStorageMap<_, Twox64Concat, T::AccountId, AgentLedger, OptionQuery>; + + // This pallet is not currently written with the intention of exposing any calls. But the + // functions defined in the following impl block should act as a good reference for how the + // exposed calls would look like when exposed. + impl Pallet { + /// Register an account to become a stake `Agent`. Sometimes also called a `Delegatee`. + /// + /// Delegators can authorize `Agent`s to stake on their behalf by delegating their funds to + /// them. The `Agent` can then use the delegated funds to stake to [`Config::CoreStaking`]. + /// + /// An account that is directly staked to [`Config::CoreStaking`] cannot become an `Agent`. + /// However, they can migrate to become an agent using [`Self::migrate_to_agent`]. + /// + /// Implementation note: This function allows any account to become an agent. It is + /// important though that accounts that call [`StakingUnchecked::virtual_bond`] are keyless + /// accounts. This is not a problem for now since this is only used by other pallets in the + /// runtime which use keyless account as agents. If we later want to expose this as a + /// dispatchable call, we should derive a sub-account from the caller and use that as the + /// agent account. + pub fn register_agent( + origin: OriginFor, + reward_account: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + // Existing `agent` cannot register again and a delegator cannot become an `agent`. + ensure!(!Self::is_agent(&who) && !Self::is_delegator(&who), Error::::NotAllowed); + + // They cannot be already a direct staker in the staking pallet. + ensure!(!Self::is_direct_staker(&who), Error::::AlreadyStaking); + + // Reward account cannot be same as `agent` account. + ensure!(reward_account != who, Error::::InvalidRewardDestination); + + Self::do_register_agent(&who, &reward_account); + Ok(()) + } + + /// Migrate from a `Nominator` account to `Agent` account. + /// + /// The origin needs to + /// - be a `Nominator` with [`Config::CoreStaking`], + /// - not already an `Agent`, + /// + /// This function will create a proxy account to the agent called `proxy_delegator` and + /// transfer the directly staked amount by the agent to it. The `proxy_delegator` delegates + /// the funds to the origin making origin an `Agent` account. The real `delegator` + /// accounts of the origin can later migrate their funds using [Self::migrate_delegation] to + /// claim back their share of delegated funds from `proxy_delegator` to self. + /// + /// Any free fund in the agent's account will be marked as unclaimed withdrawal. + pub fn migrate_to_agent( + origin: OriginFor, + reward_account: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + // ensure who is a staker in `CoreStaking` but not already an agent or a delegator. + ensure!( + Self::is_direct_staker(&who) && !Self::is_agent(&who) && !Self::is_delegator(&who), + Error::::NotAllowed + ); + + // Reward account cannot be same as `agent` account. + ensure!(reward_account != who, Error::::InvalidRewardDestination); + + Self::do_migrate_to_agent(&who, &reward_account) + } + + /// Release previously delegated funds by delegator to origin. + /// + /// Only agents can call this. + /// + /// Tries to withdraw unbonded funds from `CoreStaking` if needed and release amount to + /// `delegator`. + pub fn release_delegation( + origin: OriginFor, + delegator: T::AccountId, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_release( + Agent::from(who), + Delegator::from(delegator), + amount, + num_slashing_spans, + ) + } + + /// Migrate delegated funds that are held in `proxy_delegator` to the claiming `delegator`'s + /// account. If successful, the specified funds will be moved and delegated from `delegator` + /// account to the agent. + /// + /// This can be called by `agent` accounts that were previously a direct `Nominator` with + /// [`Config::CoreStaking`] and has some remaining unclaimed delegations. + /// + /// Internally, it moves some delegations from `proxy_delegator` account to `delegator` + /// account and reapplying the holds. + pub fn migrate_delegation( + origin: OriginFor, + delegator: T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let agent = ensure_signed(origin)?; + + // Ensure they have minimum delegation. + ensure!(amount >= T::Currency::minimum_balance(), Error::::NotEnoughFunds); + + // Ensure delegator is sane. + ensure!(!Self::is_agent(&delegator), Error::::NotAllowed); + ensure!(!Self::is_delegator(&delegator), Error::::NotAllowed); + ensure!(!Self::is_direct_staker(&delegator), Error::::AlreadyStaking); + + // ensure agent is sane. + ensure!(Self::is_agent(&agent), Error::::NotAgent); + + // and has enough delegated balance to migrate. + let proxy_delegator = Self::generate_proxy_delegator(Agent::from(agent)); + let balance_remaining = Self::held_balance_of(proxy_delegator.clone()); + ensure!(balance_remaining >= amount, Error::::NotEnoughFunds); + + Self::do_migrate_delegation(proxy_delegator, Delegator::from(delegator), amount) + } + + /// Delegate given `amount` of tokens to an `Agent` account. + /// + /// If `origin` is the first time delegator, we add them to state. If they are already + /// delegating, we increase the delegation. + /// + /// Conditions: + /// - Delegators cannot delegate to more than one agent. + /// - The `agent` account should already be registered as such. See + /// [`Self::register_agent`]. + pub fn delegate_to_agent( + origin: OriginFor, + agent: T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + let delegator = ensure_signed(origin)?; + + // ensure delegator is sane. + ensure!( + Delegation::::can_delegate(&delegator, &agent), + Error::::InvalidDelegation + ); + ensure!(!Self::is_direct_staker(&delegator), Error::::AlreadyStaking); + + // ensure agent is sane. + ensure!(Self::is_agent(&agent), Error::::NotAgent); + + // add to delegation. + Self::do_delegate(Delegator::from(delegator), Agent::from(agent.clone()), amount)?; + + // bond the newly delegated amount to `CoreStaking`. + Self::do_bond(Agent::from(agent), amount) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() + } + } +} + +impl Pallet { + /// Derive an account from the migrating agent account where the unclaimed delegation funds + /// are held. + pub fn generate_proxy_delegator(agent: Agent) -> Delegator { + Delegator::from(Self::sub_account(AccountType::ProxyDelegator, agent.get())) + } + + /// Derive a (keyless) pot account from the given agent account and account type. + fn sub_account(account_type: AccountType, acc: T::AccountId) -> T::AccountId { + T::PalletId::get().into_sub_account_truncating((account_type, acc.clone())) + } + + /// Held balance of a delegator. + pub(crate) fn held_balance_of(who: Delegator) -> BalanceOf { + T::Currency::balance_on_hold(&HoldReason::StakingDelegation.into(), &who.get()) + } + + /// Returns true if who is registered as an `Agent`. + fn is_agent(who: &T::AccountId) -> bool { + >::contains_key(who) + } + + /// Returns true if who is delegating to an `Agent` account. + fn is_delegator(who: &T::AccountId) -> bool { + >::contains_key(who) + } + + /// Returns true if who is already staking on [`Config::CoreStaking`]. + fn is_direct_staker(who: &T::AccountId) -> bool { + T::CoreStaking::status(who).is_ok() + } + + /// Registers a new agent in the system. + fn do_register_agent(who: &T::AccountId, reward_account: &T::AccountId) { + AgentLedger::::new(reward_account).update(who); + + // Agent does not hold balance of its own but this pallet will provide for this to exist. + // This is expected to be a keyless account and not created by any user directly so safe. + // TODO: Someday if we allow anyone to be an agent, we should take a deposit for + // being a delegator. + frame_system::Pallet::::inc_providers(who); + } + + /// Migrate existing staker account `who` to an `Agent` account. + fn do_migrate_to_agent(who: &T::AccountId, reward_account: &T::AccountId) -> DispatchResult { + Self::do_register_agent(who, reward_account); + + // We create a proxy delegator that will keep all the delegation funds until funds are + // transferred to actual delegator. + let proxy_delegator = Self::generate_proxy_delegator(Agent::from(who.clone())); + + // Keep proxy delegator alive until all funds are migrated. + frame_system::Pallet::::inc_providers(&proxy_delegator.clone().get()); + + // Get current stake + let stake = T::CoreStaking::stake(who)?; + + // release funds from core staking. + T::CoreStaking::migrate_to_virtual_staker(who); + + // transfer just released staked amount plus any free amount. + let amount_to_transfer = + T::Currency::reducible_balance(who, Preservation::Expendable, Fortitude::Polite); + + // This should never fail but if it does, it indicates bad state and we abort. + T::Currency::transfer( + who, + &proxy_delegator.clone().get(), + amount_to_transfer, + Preservation::Expendable, + )?; + + T::CoreStaking::update_payee(who, reward_account)?; + // delegate all transferred funds back to agent. + Self::do_delegate(proxy_delegator, Agent::from(who.clone()), amount_to_transfer)?; + + // if the transferred/delegated amount was greater than the stake, mark the extra as + // unclaimed withdrawal. + let unclaimed_withdraws = amount_to_transfer + .checked_sub(&stake.total) + .defensive_ok_or(ArithmeticError::Underflow)?; + + if !unclaimed_withdraws.is_zero() { + let mut ledger = AgentLedger::::get(who).ok_or(Error::::NotAgent)?; + ledger.unclaimed_withdrawals = ledger + .unclaimed_withdrawals + .checked_add(&unclaimed_withdraws) + .defensive_ok_or(ArithmeticError::Overflow)?; + ledger.update(who); + } + + Ok(()) + } + + /// Bond `amount` to `agent_acc` in [`Config::CoreStaking`]. + fn do_bond(agent_acc: Agent, amount: BalanceOf) -> DispatchResult { + let agent_ledger = AgentLedgerOuter::::get(&agent_acc.get())?; + + let available_to_bond = agent_ledger.available_to_bond(); + defensive_assert!(amount == available_to_bond, "not expected value to bond"); + + if agent_ledger.is_bonded() { + T::CoreStaking::bond_extra(&agent_ledger.key, amount) + } else { + T::CoreStaking::virtual_bond(&agent_ledger.key, amount, agent_ledger.reward_account()) + } + } + + /// Delegate `amount` from `delegator` to `agent`. + fn do_delegate( + delegator: Delegator, + agent: Agent, + amount: BalanceOf, + ) -> DispatchResult { + // get inner type + let agent = agent.get(); + let delegator = delegator.get(); + + let mut ledger = AgentLedger::::get(&agent).ok_or(Error::::NotAgent)?; + // try to hold the funds. + T::Currency::hold(&HoldReason::StakingDelegation.into(), &delegator, amount)?; + + let new_delegation_amount = + if let Some(existing_delegation) = Delegation::::get(&delegator) { + ensure!(existing_delegation.agent == agent, Error::::InvalidDelegation); + existing_delegation + .amount + .checked_add(&amount) + .ok_or(ArithmeticError::Overflow)? + } else { + amount + }; + + Delegation::::new(&agent, new_delegation_amount).update_or_kill(&delegator); + ledger.total_delegated = + ledger.total_delegated.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; + ledger.update(&agent); + + Self::deposit_event(Event::::Delegated { agent, delegator, amount }); + + Ok(()) + } + + /// Release `amount` of delegated funds from `agent` to `delegator`. + fn do_release( + who: Agent, + delegator: Delegator, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + // get inner type + let agent = who.get(); + let delegator = delegator.get(); + + let mut agent_ledger = AgentLedgerOuter::::get(&agent)?; + let mut delegation = Delegation::::get(&delegator).ok_or(Error::::NotDelegator)?; + + // make sure delegation to be released is sound. + ensure!(delegation.agent == agent, Error::::NotAgent); + ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); + + // if we do not already have enough funds to be claimed, try withdraw some more. + // keep track if we killed the staker in the process. + let stash_killed = if agent_ledger.ledger.unclaimed_withdrawals < amount { + // withdraw account. + let killed = T::CoreStaking::withdraw_unbonded(agent.clone(), num_slashing_spans) + .map_err(|_| Error::::WithdrawFailed)?; + // reload agent from storage since withdrawal might have changed the state. + agent_ledger = agent_ledger.reload()?; + Some(killed) + } else { + None + }; + + // if we still do not have enough funds to release, abort. + ensure!(agent_ledger.ledger.unclaimed_withdrawals >= amount, Error::::NotEnoughFunds); + + // Claim withdraw from agent. Kill agent if no delegation left. + // TODO: Ideally if there is a register, there should be an unregister that should + // clean up the agent. Can be improved in future. + if agent_ledger.remove_unclaimed_withdraw(amount)?.update_or_kill()? { + match stash_killed { + Some(killed) => { + // this implies we did a `CoreStaking::withdraw` before release. Ensure + // we killed the staker as well. + ensure!(killed, Error::::BadState); + }, + None => { + // We did not do a `CoreStaking::withdraw` before release. Ensure staker is + // already killed in `CoreStaking`. + ensure!(T::CoreStaking::status(&agent).is_err(), Error::::BadState); + }, + } + + // Remove provider reference for `who`. + let _ = frame_system::Pallet::::dec_providers(&agent).defensive(); + } + + // book keep delegation + delegation.amount = delegation + .amount + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + // remove delegator if nothing delegated anymore + delegation.update_or_kill(&delegator); + + let released = T::Currency::release( + &HoldReason::StakingDelegation.into(), + &delegator, + amount, + Precision::BestEffort, + )?; + + defensive_assert!(released == amount, "hold should have been released fully"); + + Self::deposit_event(Event::::Released { agent, delegator, amount }); + + Ok(()) + } + + /// Migrates delegation of `amount` from `source` account to `destination` account. + fn do_migrate_delegation( + source_delegator: Delegator, + destination_delegator: Delegator, + amount: BalanceOf, + ) -> DispatchResult { + // get inner type + let source_delegator = source_delegator.get(); + let destination_delegator = destination_delegator.get(); + + let mut source_delegation = + Delegators::::get(&source_delegator).defensive_ok_or(Error::::BadState)?; + + // some checks that must have already been checked before. + ensure!(source_delegation.amount >= amount, Error::::NotEnoughFunds); + debug_assert!( + !Self::is_delegator(&destination_delegator) && !Self::is_agent(&destination_delegator) + ); + + let agent = source_delegation.agent.clone(); + // update delegations + Delegation::::new(&agent, amount).update_or_kill(&destination_delegator); + + source_delegation.amount = source_delegation + .amount + .checked_sub(&amount) + .defensive_ok_or(Error::::BadState)?; + + source_delegation.update_or_kill(&source_delegator); + + // release funds from source + let released = T::Currency::release( + &HoldReason::StakingDelegation.into(), + &source_delegator, + amount, + Precision::BestEffort, + )?; + + defensive_assert!(released == amount, "hold should have been released fully"); + + // transfer the released amount to `destination_delegator`. + let post_balance = T::Currency::transfer( + &source_delegator, + &destination_delegator, + amount, + Preservation::Expendable, + ) + .map_err(|_| Error::::BadState)?; + + // if balance is zero, clear provider for source (proxy) delegator. + if post_balance == Zero::zero() { + let _ = frame_system::Pallet::::dec_providers(&source_delegator).defensive(); + } + + // hold the funds again in the new delegator account. + T::Currency::hold(&HoldReason::StakingDelegation.into(), &destination_delegator, amount)?; + + Self::deposit_event(Event::::MigratedDelegation { + agent, + delegator: destination_delegator, + amount, + }); + + Ok(()) + } + + /// Take slash `amount` from agent's `pending_slash`counter and apply it to `delegator` account. + pub fn do_slash( + agent: Agent, + delegator: Delegator, + amount: BalanceOf, + maybe_reporter: Option, + ) -> DispatchResult { + // get inner type + let agent = agent.get(); + let delegator = delegator.get(); + + let agent_ledger = AgentLedgerOuter::::get(&agent)?; + // ensure there is something to slash + ensure!(agent_ledger.ledger.pending_slash > Zero::zero(), Error::::NothingToSlash); + + let mut delegation = >::get(&delegator).ok_or(Error::::NotDelegator)?; + ensure!(delegation.agent == agent.clone(), Error::::NotAgent); + ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); + + // slash delegator + let (mut credit, missing) = + T::Currency::slash(&HoldReason::StakingDelegation.into(), &delegator, amount); + + defensive_assert!(missing.is_zero(), "slash should have been fully applied"); + + let actual_slash = credit.peek(); + + // remove the applied slashed amount from agent. + agent_ledger.remove_slash(actual_slash).save(); + delegation.amount = + delegation.amount.checked_sub(&actual_slash).ok_or(ArithmeticError::Overflow)?; + delegation.update_or_kill(&delegator); + + if let Some(reporter) = maybe_reporter { + let reward_payout: BalanceOf = T::SlashRewardFraction::get() * actual_slash; + let (reporter_reward, rest) = credit.split(reward_payout); + + // credit is the amount that we provide to `T::OnSlash`. + credit = rest; + + // reward reporter or drop it. + let _ = T::Currency::resolve(&reporter, reporter_reward); + } + + T::OnSlash::on_unbalanced(credit); + + Self::deposit_event(Event::::Slashed { agent, delegator, amount }); + + Ok(()) + } + + /// Total balance that is available for stake. Includes already staked amount. + #[cfg(test)] + pub(crate) fn stakeable_balance(who: Agent) -> BalanceOf { + AgentLedgerOuter::::get(&who.get()) + .map(|agent| agent.ledger.stakeable_balance()) + .unwrap_or_default() + } +} + +#[cfg(any(test, feature = "try-runtime"))] +use sp_std::collections::btree_map::BTreeMap; + +#[cfg(any(test, feature = "try-runtime"))] +impl Pallet { + pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // build map to avoid reading storage multiple times. + let delegation_map = Delegators::::iter().collect::>(); + let ledger_map = Agents::::iter().collect::>(); + + Self::check_delegates(ledger_map.clone())?; + Self::check_delegators(delegation_map, ledger_map)?; + + Ok(()) + } + + fn check_delegates( + ledgers: BTreeMap>, + ) -> Result<(), sp_runtime::TryRuntimeError> { + for (agent, ledger) in ledgers { + ensure!( + matches!( + T::CoreStaking::status(&agent).expect("agent should be bonded"), + sp_staking::StakerStatus::Nominator(_) | sp_staking::StakerStatus::Idle + ), + "agent should be bonded and not validator" + ); + + ensure!( + ledger.stakeable_balance() >= + T::CoreStaking::total_stake(&agent) + .expect("agent should exist as a nominator"), + "Cannot stake more than balance" + ); + } + + Ok(()) + } + + fn check_delegators( + delegations: BTreeMap>, + ledger: BTreeMap>, + ) -> Result<(), sp_runtime::TryRuntimeError> { + let mut delegation_aggregation = BTreeMap::>::new(); + for (delegator, delegation) in delegations.iter() { + ensure!( + T::CoreStaking::status(delegator).is_err(), + "delegator should not be directly staked" + ); + ensure!(!Self::is_agent(delegator), "delegator cannot be an agent"); + + delegation_aggregation + .entry(delegation.agent.clone()) + .and_modify(|e| *e += delegation.amount) + .or_insert(delegation.amount); + } + + for (agent, total_delegated) in delegation_aggregation { + ensure!(!Self::is_delegator(&agent), "agent cannot be delegator"); + + let ledger = ledger.get(&agent).expect("ledger should exist"); + ensure!( + ledger.total_delegated == total_delegated, + "ledger total delegated should match delegations" + ); + } + + Ok(()) + } +} diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..c1875055f2fec78ea80c908bb11379291efad4a8 --- /dev/null +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -0,0 +1,349 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{self as delegated_staking, types::AgentLedgerOuter}; +use frame_support::{ + assert_ok, derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, Currency}, + PalletId, +}; + +use sp_runtime::{traits::IdentityLookup, BuildStorage, Perbill}; + +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; +use frame_support::dispatch::RawOrigin; +use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; +use sp_core::U256; +use sp_runtime::traits::Convert; +use sp_staking::{Agent, Stake, StakingInterface}; + +pub type T = Runtime; +type Block = frame_system::mocking::MockBlock; +pub type AccountId = u128; + +pub const GENESIS_VALIDATOR: AccountId = 1; +pub const GENESIS_NOMINATOR_ONE: AccountId = 101; +pub const GENESIS_NOMINATOR_TWO: AccountId = 102; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type AccountId = AccountId; + type Lookup = IdentityLookup; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +pub type Balance = u128; + +parameter_types! { + pub static ExistentialDeposit: Balance = 1; +} +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<128>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = ConstU32<1>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static BondingDuration: u32 = 3; + pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); +} +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Runtime; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type Bounds = ElectionsBoundsOnChain; +} + +impl pallet_staking::Config for Runtime { + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = pallet_timestamp::Pallet; + type CurrencyToVote = (); + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = ConstU32<1>; + type SlashDeferDuration = (); + type AdminOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = (); + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = (); + type HistoryDepth = ConstU32<84>; + type MaxExposurePageSize = ConstU32<64>; + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxUnlockingChunks = ConstU32<10>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type EventListeners = (Pools, DelegatedStaking); + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; +} + +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(10); +} +impl delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + +pub struct BalanceToU256; +impl Convert for BalanceToU256 { + fn convert(n: Balance) -> U256 { + n.into() + } +} +pub struct U256ToBalance; +impl Convert for U256ToBalance { + fn convert(n: U256) -> Balance { + n.try_into().unwrap() + } +} + +parameter_types! { + pub static MaxUnbonding: u32 = 8; + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); +} +impl pallet_nomination_pools::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type RuntimeFreezeReason = RuntimeFreezeReason; + type RewardCounter = sp_runtime::FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type PostUnbondingPoolsWindow = ConstU32<2>; + type PalletId = PoolsPalletId; + type MaxMetadataLen = ConstU32<256>; + type MaxUnbonding = MaxUnbonding; + type MaxPointsToBalance = frame_support::traits::ConstU8<10>; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; + type AdminOrigin = frame_system::EnsureRoot; +} + +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + Pools: pallet_nomination_pools, + DelegatedStaking: delegated_staking, + } +); + +#[derive(Default)] +pub struct ExtBuilder {} + +impl ExtBuilder { + fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + (GENESIS_VALIDATOR, 10000), + (GENESIS_NOMINATOR_ONE, 1000), + (GENESIS_NOMINATOR_TWO, 2000), + ], + } + .assimilate_storage(&mut storage); + + let stakers = vec![ + ( + GENESIS_VALIDATOR, + GENESIS_VALIDATOR, + 1000, + sp_staking::StakerStatus::::Validator, + ), + ( + GENESIS_NOMINATOR_ONE, + GENESIS_NOMINATOR_ONE, + 100, + sp_staking::StakerStatus::::Nominator(vec![1]), + ), + ( + GENESIS_NOMINATOR_TWO, + GENESIS_NOMINATOR_TWO, + 200, + sp_staking::StakerStatus::::Nominator(vec![1]), + ), + ]; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + // ideal validator count + validator_count: 2, + minimum_validator_count: 1, + invulnerables: vec![], + slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // for events to be deposited. + frame_system::Pallet::::set_block_number(1); + // set era for staking. + start_era(0); + }); + + ext + } + pub fn build_and_execute(self, test: impl FnOnce()) { + sp_tracing::try_init_simple(); + let mut ext = self.build(); + ext.execute_with(test); + ext.execute_with(|| { + #[cfg(feature = "try-runtime")] + >::try_state( + frame_system::Pallet::::block_number(), + frame_support::traits::TryStateSelect::All, + ) + .unwrap(); + #[cfg(not(feature = "try-runtime"))] + DelegatedStaking::do_try_state().unwrap(); + }); + } +} + +/// fund and return who. +pub(crate) fn fund(who: &AccountId, amount: Balance) { + let _ = Balances::deposit_creating(who, amount); +} + +/// Sets up delegation for passed delegators, returns total delegated amount. +/// +/// `delegate_amount` is incremented by the amount `increment` starting with `base_delegate_amount` +/// from lower index to higher index of delegators. +pub(crate) fn setup_delegation_stake( + agent: AccountId, + reward_acc: AccountId, + delegators: Vec, + base_delegate_amount: Balance, + increment: Balance, +) -> Balance { + fund(&agent, 100); + assert_ok!(DelegatedStaking::register_agent(RawOrigin::Signed(agent).into(), reward_acc)); + let mut delegated_amount: Balance = 0; + for (index, delegator) in delegators.iter().enumerate() { + let amount_to_delegate = base_delegate_amount + increment * index as Balance; + delegated_amount += amount_to_delegate; + + fund(delegator, amount_to_delegate + ExistentialDeposit::get()); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(*delegator).into(), + agent, + amount_to_delegate + )); + } + + // sanity checks + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), delegated_amount); + assert_eq!(AgentLedgerOuter::::get(&agent).unwrap().available_to_bond(), 0); + + delegated_amount +} + +pub(crate) fn start_era(era: sp_staking::EraIndex) { + CurrentEra::::set(Some(era)); + ActiveEra::::set(Some(ActiveEraInfo { index: era, start: None })); +} + +pub(crate) fn eq_stake(who: AccountId, total: Balance, active: Balance) -> bool { + Staking::stake(&who).unwrap() == Stake { total, active } && + get_agent_ledger(&who).ledger.stakeable_balance() == total +} + +pub(crate) fn get_agent_ledger(agent: &AccountId) -> AgentLedgerOuter { + AgentLedgerOuter::::get(agent).expect("delegate should exist") +} + +parameter_types! { + static ObservedEventsDelegatedStaking: usize = 0; + static ObservedEventsPools: usize = 0; +} + +pub(crate) fn pool_events_since_last_call() -> Vec> { + let events = System::read_events_for_pallet::>(); + let already_seen = ObservedEventsPools::get(); + ObservedEventsPools::set(events.len()); + events.into_iter().skip(already_seen).collect() +} + +pub(crate) fn events_since_last_call() -> Vec> { + let events = System::read_events_for_pallet::>(); + let already_seen = ObservedEventsDelegatedStaking::get(); + ObservedEventsDelegatedStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..d40539d40dddac6c517691bef58a41509949f421 --- /dev/null +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -0,0 +1,1243 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-delegated-staking. + +use super::*; +use crate::mock::*; +use frame_support::{assert_noop, assert_ok, traits::fungible::InspectHold}; +use pallet_nomination_pools::{Error as PoolsError, Event as PoolsEvent}; +use pallet_staking::Error as StakingError; +use sp_staking::{Agent, DelegationInterface, Delegator, StakerStatus}; + +#[test] +fn create_an_agent_with_first_delegator() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_account: AccountId = 201; + let delegator: AccountId = 202; + + // set intention to accept delegation. + fund(&agent, 1000); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_account + )); + + // delegate to this account + fund(&delegator, 1000); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 100 + )); + + // verify + assert!(DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 100); + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + 100 + ); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 100); + }); +} + +#[test] +fn cannot_become_agent() { + ExtBuilder::default().build_and_execute(|| { + // cannot set reward account same as agent account + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(100).into(), 100), + Error::::InvalidRewardDestination + ); + + // an existing validator cannot become agent + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_VALIDATOR).into(), + 100 + ), + Error::::AlreadyStaking + ); + + // an existing direct staker to `CoreStaking` cannot become an agent. + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_NOMINATOR_ONE).into(), + 100 + ), + Error::::AlreadyStaking + ); + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(mock::GENESIS_NOMINATOR_TWO).into(), + 100 + ), + Error::::AlreadyStaking + ); + }); +} + +#[test] +fn create_multiple_delegators() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_account: AccountId = 201; + + // stakeable balance is 0 for non agent + fund(&agent, 1000); + assert!(!DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 0); + + // set intention to accept delegation. + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_account + )); + + // create 100 delegators + for i in 202..302 { + fund(&i, 100 + ExistentialDeposit::get()); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(i).into(), + agent, + 100 + )); + // Balance of 100 held on delegator account for delegating to the agent. + assert_eq!(Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &i), 100); + } + + // verify + assert!(DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 100 * 100); + }); +} + +#[test] +fn agent_restrictions() { + // Similar to creating a nomination pool + ExtBuilder::default().build_and_execute(|| { + let agent_one = 200; + let delegator_one = 210; + fund(&agent_one, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent_one).into(), + agent_one + 1 + )); + fund(&delegator_one, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + agent_one, + 100 + )); + + let agent_two = 300; + let delegator_two = 310; + fund(&agent_two, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent_two).into(), + agent_two + 1 + )); + fund(&delegator_two, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_two).into(), + agent_two, + 100 + )); + + // agent one tries to delegate to agent 2 + assert_noop!( + DelegatedStaking::delegate_to_agent(RawOrigin::Signed(agent_one).into(), agent_two, 10), + Error::::InvalidDelegation + ); + + // agent one tries to delegate to a delegator + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(agent_one).into(), + delegator_one, + 10 + ), + Error::::InvalidDelegation + ); + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(agent_one).into(), + delegator_two, + 10 + ), + Error::::InvalidDelegation + ); + + // delegator one tries to delegate to agent 2 as well (it already delegates to agent + // 1) + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + agent_two, + 10 + ), + Error::::InvalidDelegation + ); + + // cannot delegate to non agents. + let non_agent = 201; + // give it some funds + fund(&non_agent, 200); + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + non_agent, + 10 + ), + Error::::InvalidDelegation + ); + + // cannot delegate to a delegator + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + delegator_two, + 10 + ), + Error::::InvalidDelegation + ); + + // delegator cannot delegate to self + assert_noop!( + DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator_one).into(), + delegator_one, + 10 + ), + Error::::InvalidDelegation + ); + + // agent cannot delegate to self + assert_noop!( + DelegatedStaking::delegate_to_agent(RawOrigin::Signed(agent_one).into(), agent_one, 10), + Error::::InvalidDelegation + ); + }); +} + +#[test] +fn apply_pending_slash() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegators: Vec = (301..=350).collect(); + let reporter: AccountId = 400; + + let total_staked = setup_delegation_stake(agent, reward_acc, delegators.clone(), 10, 10); + + start_era(4); + // slash half of the stake + pallet_staking::slashing::do_slash::( + &agent, + total_staked / 2, + &mut Default::default(), + &mut Default::default(), + 3, + ); + + // agent cannot slash an account that is not its delegator. + setup_delegation_stake(210, 211, (351..=352).collect(), 100, 0); + assert_noop!( + ::delegator_slash( + Agent::from(agent), + Delegator::from(351), + 1, + Some(400) + ), + Error::::NotAgent + ); + // or a non delegator account + fund(&353, 100); + assert_noop!( + ::delegator_slash( + Agent::from(agent), + Delegator::from(353), + 1, + Some(400) + ), + Error::::NotDelegator + ); + + // ensure bookkept pending slash is correct. + assert_eq!(get_agent_ledger(&agent).ledger.pending_slash, total_staked / 2); + let mut old_reporter_balance = Balances::free_balance(reporter); + + // lets apply the pending slash on delegators. + for i in delegators { + // balance before slash + let initial_pending_slash = get_agent_ledger(&agent).ledger.pending_slash; + assert!(initial_pending_slash > 0); + let unslashed_balance = DelegatedStaking::held_balance_of(Delegator::from(i)); + let slash = unslashed_balance / 2; + // slash half of delegator's delegation. + assert_ok!(::delegator_slash( + Agent::from(agent), + Delegator::from(i), + slash, + Some(400) + )); + + // balance after slash. + assert_eq!( + DelegatedStaking::held_balance_of(Delegator::from(i)), + unslashed_balance - slash + ); + // pending slash is reduced by the amount slashed. + assert_eq!( + get_agent_ledger(&agent).ledger.pending_slash, + initial_pending_slash - slash + ); + // reporter get 10% of the slash amount. + assert_eq!( + Balances::free_balance(reporter) - old_reporter_balance, + ::slash_reward_fraction() * slash, + ); + // update old balance + old_reporter_balance = Balances::free_balance(reporter); + } + + // nothing to slash anymore + assert_eq!(get_agent_ledger(&agent).ledger.pending_slash, 0); + + // cannot slash anymore + assert_noop!( + ::delegator_slash( + Agent::from(agent), + Delegator::from(350), + 1, + None + ), + Error::::NothingToSlash + ); + }); +} + +/// Integration tests with pallet-staking. +mod staking_integration { + use super::*; + use pallet_staking::RewardDestination; + use sp_staking::Stake; + + #[test] + fn bond() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 99; + let reward_acc: AccountId = 100; + assert_eq!(Staking::status(&agent), Err(StakingError::::NotStash.into())); + + // set intention to become an agent + fund(&agent, 100); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_acc + )); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 0); + + let mut delegated_balance: Balance = 0; + + // set some delegations + for delegator in 200..250 { + fund(&delegator, 200); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 100 + )); + delegated_balance += 100; + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + 100 + ); + assert_eq!( + DelegatedStaking::delegator_balance(Delegator::from(delegator)).unwrap(), + 100 + ); + + let agent_obj = get_agent_ledger(&agent); + assert_eq!(agent_obj.ledger.stakeable_balance(), delegated_balance); + assert_eq!(agent_obj.available_to_bond(), 0); + assert_eq!(agent_obj.bonded_stake(), delegated_balance); + } + + assert_eq!(Staking::stake(&agent).unwrap(), Stake { total: 50 * 100, active: 50 * 100 }) + }); + } + + #[test] + fn withdraw_test() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegators: Vec = (301..=350).collect(); + let total_staked = + setup_delegation_stake(agent, reward_acc, delegators.clone(), 10, 10); + + // lets go to a new era + start_era(2); + + assert!(eq_stake(agent, total_staked, total_staked)); + // Withdrawing without unbonding would fail. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 301, 50, 0), + Error::::NotEnoughFunds + ); + + // 305 wants to unbond 50 in era 2, withdrawable in era 5. + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 50)); + + // 310 wants to unbond 100 in era 3, withdrawable in era 6. + start_era(3); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 100)); + + // 320 wants to unbond 200 in era 4, withdrawable in era 7. + start_era(4); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 200)); + + // active stake is now reduced.. + let expected_active = total_staked - (50 + 100 + 200); + assert!(eq_stake(agent, total_staked, expected_active)); + + // nothing to withdraw at era 4 + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 50, 0), + Error::::NotEnoughFunds + ); + + assert_eq!(get_agent_ledger(&agent).available_to_bond(), 0); + // full amount is still delegated + assert_eq!(get_agent_ledger(&agent).ledger.effective_balance(), total_staked); + + start_era(5); + // at era 5, 50 tokens are withdrawable, cannot withdraw more. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 51, 0), + Error::::NotEnoughFunds + ); + // less is possible + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 305, + 30, + 0 + )); + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 305, + 20, + 0 + )); + + // Lets go to future era where everything is unbonded. Withdrawable amount: 100 + 200 + start_era(7); + // 305 has no more amount delegated so it cannot withdraw. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 305, 5, 0), + Error::::NotDelegator + ); + // 309 is an active delegator but has total delegation of 90, so it cannot withdraw more + // than that. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 309, 91, 0), + Error::::NotEnoughFunds + ); + // 310 cannot withdraw more than delegated funds. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 310, 101, 0), + Error::::NotEnoughFunds + ); + // but can withdraw all its delegation amount. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 310, + 100, + 0 + )); + // 320 can withdraw all its delegation amount. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 320, + 200, + 0 + )); + + // cannot withdraw anything more.. + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 301, 1, 0), + Error::::NotEnoughFunds + ); + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 350, 1, 0), + Error::::NotEnoughFunds + ); + }); + } + + #[test] + fn withdraw_happens_with_unbonded_balance_first() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let agent = 200; + setup_delegation_stake(agent, 201, (300..350).collect(), 100, 0); + + // verify withdraw not possible yet + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 100, 0), + Error::::NotEnoughFunds + ); + + // fill up unlocking chunks in core staking. + // 10 is the max chunks + for i in 2..=11 { + start_era(i); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); + // no withdrawals from core staking yet. + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 0); + } + + // another unbond would trigger withdrawal + start_era(12); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 10)); + + // 8 previous unbonds would be withdrawn as they were already unlocked. Unlocking period + // is 3 eras. + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 8 * 10); + + // release some delegation now. + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 300, + 40, + 0 + )); + assert_eq!(get_agent_ledger(&agent).ledger.unclaimed_withdrawals, 80 - 40); + + // cannot release more than available + assert_noop!( + DelegatedStaking::release_delegation(RawOrigin::Signed(agent).into(), 300, 50, 0), + Error::::NotEnoughFunds + ); + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + 300, + 40, + 0 + )); + + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(300)), 100 - 80); + }); + } + + #[test] + fn reward_destination_restrictions() { + ExtBuilder::default().build_and_execute(|| { + // give some funds to 200 + fund(&200, 1000); + let balance_200 = Balances::free_balance(200); + + // `Agent` account cannot be reward destination + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 200), + Error::::InvalidRewardDestination + ); + + // different reward account works + assert_ok!(DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 201)); + // add some delegations to it + fund(&300, 1000); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(300).into(), + 200, + 100 + )); + + // update_payee to self fails. + assert_noop!( + ::update_payee(&200, &200), + StakingError::::RewardDestinationRestricted + ); + + // passing correct reward destination works + assert_ok!(::update_payee(&200, &201)); + + // amount is staked correctly + assert!(eq_stake(200, 100, 100)); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), 100); + + // free balance of delegate is untouched + assert_eq!(Balances::free_balance(200), balance_200); + }); + } + + #[test] + fn agent_restrictions() { + ExtBuilder::default().build_and_execute(|| { + setup_delegation_stake(200, 201, (202..203).collect(), 100, 0); + + // Registering again is noop + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(200).into(), 201), + Error::::NotAllowed + ); + // a delegator cannot become delegate + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(202).into(), 203), + Error::::NotAllowed + ); + // existing staker cannot become a delegate + assert_noop!( + DelegatedStaking::register_agent( + RawOrigin::Signed(GENESIS_NOMINATOR_ONE).into(), + 201 + ), + Error::::AlreadyStaking + ); + assert_noop!( + DelegatedStaking::register_agent(RawOrigin::Signed(GENESIS_VALIDATOR).into(), 201), + Error::::AlreadyStaking + ); + }); + } + + #[test] + fn migration_works() { + ExtBuilder::default().build_and_execute(|| { + // add a nominator + let staked_amount = 4000; + let agent_amount = 5000; + fund(&200, agent_amount); + + assert_ok!(Staking::bond( + RuntimeOrigin::signed(200), + staked_amount, + RewardDestination::Account(201) + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(200), vec![GENESIS_VALIDATOR],)); + let init_stake = Staking::stake(&200).unwrap(); + + // scenario: 200 is a pool account, and the stake comes from its 4 delegators (300..304) + // in equal parts. lets try to migrate this nominator into delegate based stake. + + // all balance currently is in 200 + assert_eq!(Balances::free_balance(200), agent_amount); + + // to migrate, nominator needs to set an account as a proxy delegator where staked funds + // will be moved and delegated back to this old nominator account. This should be funded + // with at least ED. + let proxy_delegator = + DelegatedStaking::generate_proxy_delegator(Agent::from(200)).get(); + + assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(200).into(), 201)); + + // verify all went well + let mut expected_proxy_delegated_amount = agent_amount; + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &proxy_delegator), + expected_proxy_delegated_amount + ); + // stake amount is transferred from delegate to proxy delegator account. + assert_eq!(Balances::free_balance(200), 0); + assert_eq!(Staking::stake(&200).unwrap(), init_stake); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!( + get_agent_ledger(&200).ledger.unclaimed_withdrawals, + agent_amount - staked_amount + ); + + // now lets migrate the delegators + let delegator_share = agent_amount / 4; + for delegator in 300..304 { + assert_eq!(Balances::free_balance(delegator), 0); + // fund them with ED + fund(&delegator, ExistentialDeposit::get()); + // migrate 1/4th amount into each delegator + assert_ok!(DelegatedStaking::migrate_delegation( + RawOrigin::Signed(200).into(), + delegator, + delegator_share + )); + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + delegator_share + ); + expected_proxy_delegated_amount -= delegator_share; + assert_eq!( + Balances::balance_on_hold( + &HoldReason::StakingDelegation.into(), + &proxy_delegator + ), + expected_proxy_delegated_amount + ); + + // delegate stake is unchanged. + assert_eq!(Staking::stake(&200).unwrap(), init_stake); + assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!( + get_agent_ledger(&200).ledger.unclaimed_withdrawals, + agent_amount - staked_amount + ); + } + + // cannot use migrate delegator anymore + assert_noop!( + DelegatedStaking::migrate_delegation(RawOrigin::Signed(200).into(), 305, 1), + Error::::NotEnoughFunds + ); + }); + } +} + +mod pool_integration { + use super::*; + use pallet_nomination_pools::{BondExtra, BondedPools, PoolState}; + + #[test] + fn create_pool_test() { + ExtBuilder::default().build_and_execute(|| { + let creator: AccountId = 100; + fund(&creator, 500); + let delegate_amount = 200; + + // nothing held initially + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(creator)), 0); + + // create pool + assert_ok!(Pools::create( + RawOrigin::Signed(creator).into(), + delegate_amount, + creator, + creator, + creator + )); + + // correct amount is locked in depositor's account. + assert_eq!( + DelegatedStaking::held_balance_of(Delegator::from(creator)), + delegate_amount + ); + + let pool_account = Pools::generate_bonded_account(1); + let agent = get_agent_ledger(&pool_account); + + // verify state + assert_eq!(agent.ledger.effective_balance(), delegate_amount); + assert_eq!(agent.available_to_bond(), 0); + assert_eq!(agent.total_unbonded(), 0); + }); + } + + #[test] + fn join_pool() { + ExtBuilder::default().build_and_execute(|| { + // create a pool + let pool_id = create_pool(100, 200); + // keep track of staked amount. + let mut staked_amount: Balance = 200; + + // fund delegator + let delegator: AccountId = 300; + fund(&delegator, 500); + // nothing held initially + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 0); + + // delegator joins pool + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), 100, pool_id)); + staked_amount += 100; + + // correct amount is locked in depositor's account. + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 100); + + // delegator is not actively exposed to core staking. + assert_eq!(Staking::status(&delegator), Err(StakingError::::NotStash.into())); + + let pool_agent = get_agent_ledger(&Pools::generate_bonded_account(1)); + // verify state + assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); + assert_eq!(pool_agent.bonded_stake(), staked_amount); + assert_eq!(pool_agent.available_to_bond(), 0); + assert_eq!(pool_agent.total_unbonded(), 0); + + // cannot reap agent in staking. + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(100), pool_agent.key, 0), + StakingError::::VirtualStakerNotAllowed + ); + + // let a bunch of delegators join this pool + for i in 301..350 { + fund(&i, 500); + assert_ok!(Pools::join(RawOrigin::Signed(i).into(), 100 + i, pool_id)); + staked_amount += 100 + i; + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 100 + i); + } + + let pool_agent = pool_agent.reload().unwrap(); + assert_eq!(pool_agent.ledger.effective_balance(), staked_amount); + assert_eq!(pool_agent.bonded_stake(), staked_amount); + assert_eq!(pool_agent.available_to_bond(), 0); + assert_eq!(pool_agent.total_unbonded(), 0); + }); + } + + #[test] + fn bond_extra_to_pool() { + ExtBuilder::default().build_and_execute(|| { + let pool_id = create_pool(100, 200); + add_delegators_to_pool(pool_id, (300..310).collect(), 100); + let mut staked_amount = 200 + 100 * 10; + assert_eq!(get_pool_agent(pool_id).bonded_stake(), staked_amount); + + // bond extra to pool + for i in 300..310 { + assert_ok!(Pools::bond_extra( + RawOrigin::Signed(i).into(), + BondExtra::FreeBalance(50) + )); + staked_amount += 50; + assert_eq!(get_pool_agent(pool_id).bonded_stake(), staked_amount); + } + }); + } + + #[test] + fn claim_pool_rewards() { + ExtBuilder::default().build_and_execute(|| { + let creator = 100; + let creator_stake = 1000; + let pool_id = create_pool(creator, creator_stake); + add_delegators_to_pool(pool_id, (300..310).collect(), 100); + add_delegators_to_pool(pool_id, (310..320).collect(), 200); + let total_staked = creator_stake + 100 * 10 + 200 * 10; + + // give some rewards + let reward_acc = Pools::generate_reward_account(pool_id); + let reward_amount = 1000; + fund(&reward_acc, reward_amount); + + // claim rewards + for i in 300..320 { + let pre_balance = Balances::free_balance(i); + let delegator_staked_balance = + DelegatedStaking::held_balance_of(Delegator::from(i)); + // payout reward + assert_ok!(Pools::claim_payout(RawOrigin::Signed(i).into())); + + let reward = Balances::free_balance(i) - pre_balance; + assert_eq!(reward, delegator_staked_balance * reward_amount / total_staked); + } + + // payout creator + let pre_balance = Balances::free_balance(creator); + assert_ok!(Pools::claim_payout(RawOrigin::Signed(creator).into())); + // verify they are paid out correctly + let reward = Balances::free_balance(creator) - pre_balance; + assert_eq!(reward, creator_stake * reward_amount / total_staked); + + // reward account should only have left minimum balance after paying out everyone. + assert_eq!(Balances::free_balance(reward_acc), ExistentialDeposit::get()); + }); + } + + #[test] + fn withdraw_from_pool() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + + let pool_id = create_pool(100, 1000); + let bond_amount = 200; + add_delegators_to_pool(pool_id, (300..310).collect(), bond_amount); + let total_staked = 1000 + bond_amount * 10; + let pool_acc = Pools::generate_bonded_account(pool_id); + + start_era(2); + // nothing to release yet. + assert_noop!( + Pools::withdraw_unbonded(RawOrigin::Signed(301).into(), 301, 0), + PoolsError::::SubPoolsNotFound + ); + + // 301 wants to unbond 50 in era 2, withdrawable in era 5. + assert_ok!(Pools::unbond(RawOrigin::Signed(301).into(), 301, 50)); + + // 302 wants to unbond 100 in era 3, withdrawable in era 6. + start_era(3); + assert_ok!(Pools::unbond(RawOrigin::Signed(302).into(), 302, 100)); + + // 303 wants to unbond 200 in era 4, withdrawable in era 7. + start_era(4); + assert_ok!(Pools::unbond(RawOrigin::Signed(303).into(), 303, 200)); + + // active stake is now reduced.. + let expected_active = total_staked - (50 + 100 + 200); + assert!(eq_stake(pool_acc, total_staked, expected_active)); + + // nothing to withdraw at era 4 + for i in 301..310 { + assert_noop!( + Pools::withdraw_unbonded(RawOrigin::Signed(i).into(), i, 0), + PoolsError::::CannotWithdrawAny + ); + } + + assert!(eq_stake(pool_acc, total_staked, expected_active)); + + start_era(5); + // at era 5, 301 can withdraw. + + System::reset_events(); + let held_301 = DelegatedStaking::held_balance_of(Delegator::from(301)); + let free_301 = Balances::free_balance(301); + + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(301).into(), 301, 0)); + assert_eq!( + events_since_last_call(), + vec![Event::Released { agent: pool_acc, delegator: 301, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Withdrawn { member: 301, pool_id, balance: 50, points: 50 }] + ); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(301)), held_301 - 50); + assert_eq!(Balances::free_balance(301), free_301 + 50); + + start_era(7); + // era 7 both delegators can withdraw + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(302).into(), 302, 0)); + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(303).into(), 303, 0)); + + assert_eq!( + events_since_last_call(), + vec![ + Event::Released { agent: pool_acc, delegator: 302, amount: 100 }, + Event::Released { agent: pool_acc, delegator: 303, amount: 200 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 302, pool_id, balance: 100, points: 100 }, + PoolsEvent::Withdrawn { member: 303, pool_id, balance: 200, points: 200 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 303 }, + ] + ); + + // 303 is killed + assert!(!Delegators::::contains_key(303)); + }); + } + + #[test] + fn pool_withdraw_unbonded() { + ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + let pool_id = create_pool(100, 1000); + add_delegators_to_pool(pool_id, (300..310).collect(), 200); + + start_era(2); + // 1000 tokens to be unbonded in era 5. + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(i).into(), i, 100)); + } + + start_era(3); + // 500 tokens to be unbonded in era 6. + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(i).into(), i, 50)); + } + + start_era(5); + // withdraw pool should withdraw 1000 tokens + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1000); + + start_era(6); + // should withdraw 500 more + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1000 + 500); + + start_era(7); + // Nothing to withdraw, still at 1500. + assert_ok!(Pools::pool_withdraw_unbonded(RawOrigin::Signed(100).into(), pool_id, 0)); + assert_eq!(get_pool_agent(pool_id).total_unbonded(), 1500); + }); + } + + #[test] + fn update_nominations() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + // can't nominate for non-existent pool + assert_noop!( + Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![99]), + PoolsError::::PoolNotFound + ); + + let pool_id = create_pool(100, 1000); + let pool_acc = Pools::generate_bonded_account(pool_id); + assert_ok!(Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![20, 21, 22])); + assert!(Staking::status(&pool_acc) == Ok(StakerStatus::Nominator(vec![20, 21, 22]))); + + start_era(3); + assert_ok!(Pools::nominate(RawOrigin::Signed(100).into(), 1, vec![18, 19, 22])); + assert!(Staking::status(&pool_acc) == Ok(StakerStatus::Nominator(vec![18, 19, 22]))); + }); + } + + #[test] + fn destroy_pool() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let creator = 100; + let creator_stake = 1000; + let pool_id = create_pool(creator, creator_stake); + add_delegators_to_pool(pool_id, (300..310).collect(), 200); + + start_era(3); + // lets destroy the pool + assert_ok!(Pools::set_state( + RawOrigin::Signed(creator).into(), + pool_id, + PoolState::Destroying + )); + assert_ok!(Pools::chill(RawOrigin::Signed(creator).into(), pool_id)); + + // unbond all members by the creator/admin + for i in 300..310 { + assert_ok!(Pools::unbond(RawOrigin::Signed(creator).into(), i, 200)); + } + + start_era(6); + // withdraw all members by the creator/admin + for i in 300..310 { + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(creator).into(), i, 0)); + } + + // unbond creator + assert_ok!(Pools::unbond(RawOrigin::Signed(creator).into(), creator, creator_stake)); + + start_era(9); + System::reset_events(); + // Withdraw self + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(creator).into(), creator, 0)); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { + member: creator, + pool_id, + balance: creator_stake, + points: creator_stake, + }, + PoolsEvent::MemberRemoved { pool_id, member: creator }, + PoolsEvent::Destroyed { pool_id }, + ] + ); + + // Make sure all data is cleaned up. + assert!(!Agents::::contains_key(Pools::generate_bonded_account(pool_id))); + assert!(!System::account_exists(&Pools::generate_bonded_account(pool_id))); + assert!(!Delegators::::contains_key(creator)); + for i in 300..310 { + assert!(!Delegators::::contains_key(i)); + } + }); + } + + #[test] + fn pool_partially_slashed() { + ExtBuilder::default().build_and_execute(|| { + start_era(1); + let creator = 100; + let creator_stake = 500; + let pool_id = create_pool(creator, creator_stake); + let delegator_stake = 100; + add_delegators_to_pool(pool_id, (300..306).collect(), delegator_stake); + let pool_acc = Pools::generate_bonded_account(pool_id); + + let total_staked = creator_stake + delegator_stake * 6; + assert_eq!(Staking::stake(&pool_acc).unwrap().total, total_staked); + + // lets unbond a delegator each in next eras (2, 3, 4). + start_era(2); + assert_ok!(Pools::unbond(RawOrigin::Signed(300).into(), 300, delegator_stake)); + + start_era(3); + assert_ok!(Pools::unbond(RawOrigin::Signed(301).into(), 301, delegator_stake)); + + start_era(4); + assert_ok!(Pools::unbond(RawOrigin::Signed(302).into(), 302, delegator_stake)); + System::reset_events(); + + // slash the pool at era 3 + assert_eq!( + BondedPools::::get(1).unwrap().points, + creator_stake + delegator_stake * 6 - delegator_stake * 3 + ); + + // pool has currently no pending slash + assert_eq!(Pools::api_pool_pending_slash(pool_id), 0); + + // slash the pool partially + pallet_staking::slashing::do_slash::( + &pool_acc, + 500, + &mut Default::default(), + &mut Default::default(), + 3, + ); + + // pool has now pending slash of 500. + assert_eq!(Pools::api_pool_pending_slash(pool_id), 500); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 300 did not get slashed as all as it unbonded in an era before slash. + // 301 got slashed 50% of 100 = 50. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 6, balance: 50 }, + // 302 got slashed 50% of 100 = 50. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 7, balance: 50 }, + // Rest of the pool slashed 50% of 800 = 400. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 400 }, + ] + ); + + // slash is lazy and balance is still locked in user's accounts. + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(creator)), creator_stake); + for i in 300..306 { + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), delegator_stake); + } + assert_eq!( + get_pool_agent(pool_id).ledger.effective_balance(), + Staking::total_stake(&pool_acc).unwrap() + ); + + // pending slash is book kept. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 500); + + // go in some distant future era. + start_era(10); + System::reset_events(); + + // 300 is not slashed and can withdraw all balance. + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(300).into(), 300, 1)); + assert_eq!( + events_since_last_call(), + vec![Event::Released { agent: pool_acc, delegator: 300, amount: 100 }] + ); + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 500); + + // withdraw the other two delegators (301 and 302) who were unbonding. + for i in 301..=302 { + let pre_balance = Balances::free_balance(i); + let pre_pending_slash = get_pool_agent(pool_id).ledger.pending_slash; + assert_ok!(Pools::withdraw_unbonded(RawOrigin::Signed(i).into(), i, 0)); + assert_eq!( + events_since_last_call(), + vec![ + Event::Slashed { agent: pool_acc, delegator: i, amount: 50 }, + Event::Released { agent: pool_acc, delegator: i, amount: 50 }, + ] + ); + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 0); + assert_eq!(Balances::free_balance(i) - pre_balance, 50); + } + + // let's update all the slash + let slash_reporter = 99; + // give our reporter some balance. + fund(&slash_reporter, 100); + + for i in 303..306 { + let pre_pending_slash = get_pool_agent(pool_id).ledger.pending_slash; + // pool api returns correct pending slash. + assert_eq!(Pools::api_pool_pending_slash(pool_id), pre_pending_slash); + // delegator has pending slash of 50. + assert_eq!(Pools::api_member_pending_slash(i), 50); + // apply slash + assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), i)); + // nothing pending anymore. + assert_eq!(Pools::api_member_pending_slash(i), 0); + + // each member is slashed 50% of 100 = 50. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, pre_pending_slash - 50); + // pool api returns correctly as well. + assert_eq!(Pools::api_pool_pending_slash(pool_id), pre_pending_slash - 50); + // left with 50. + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(i)), 50); + } + + // pool has still pending slash of creator. + assert_eq!(Pools::api_pool_pending_slash(pool_id), 250); + + // reporter is paid SlashRewardFraction of the slash, i.e. 10% of 50 = 5 + assert_eq!(Balances::free_balance(slash_reporter), 100 + 5 * 3); + // creator has pending slash. + assert_eq!(Pools::api_member_pending_slash(creator), 250); + // slash creator + assert_ok!(Pools::apply_slash(RawOrigin::Signed(slash_reporter).into(), creator)); + // no pending slash anymore. + assert_eq!(Pools::api_member_pending_slash(creator), 0); + + // all slash should be applied now. + assert_eq!(get_pool_agent(pool_id).ledger.pending_slash, 0); + assert_eq!(Pools::api_pool_pending_slash(pool_id), 0); + // for creator, 50% of stake should be slashed (250), 10% of which should go to reporter + // (25). + assert_eq!(Balances::free_balance(slash_reporter), 115 + 25); + }); + } + + fn create_pool(creator: AccountId, amount: Balance) -> u32 { + fund(&creator, amount * 2); + assert_ok!(Pools::create( + RawOrigin::Signed(creator).into(), + amount, + creator, + creator, + creator + )); + + pallet_nomination_pools::LastPoolId::::get() + } + + fn add_delegators_to_pool(pool_id: u32, delegators: Vec, amount: Balance) { + for delegator in delegators { + fund(&delegator, amount * 2); + assert_ok!(Pools::join(RawOrigin::Signed(delegator).into(), amount, pool_id)); + } + } + + fn get_pool_agent(pool_id: u32) -> AgentLedgerOuter { + get_agent_ledger(&Pools::generate_bonded_account(pool_id)) + } +} diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..24b4573565441301c3a63e4adf159488b44feb73 --- /dev/null +++ b/substrate/frame/delegated-staking/src/types.rs @@ -0,0 +1,294 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Basic types used in delegated staking. + +use super::*; +use frame_support::traits::DefensiveSaturating; + +/// The type of pot account being created. +#[derive(Encode, Decode)] +pub(crate) enum AccountType { + /// A proxy delegator account created for a nominator who migrated to an `Agent` account. + /// + /// Funds for unmigrated `delegator` accounts of the `Agent` are kept here. + ProxyDelegator, +} + +/// Information about delegation of a `delegator`. +#[derive(Default, Encode, Clone, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct Delegation { + /// The target of delegation. + pub agent: T::AccountId, + /// The amount delegated. + pub amount: BalanceOf, +} + +impl Delegation { + /// Get delegation of a `delegator`. + pub(crate) fn get(delegator: &T::AccountId) -> Option { + >::get(delegator) + } + + /// Create and return a new delegation instance. + pub(crate) fn new(agent: &T::AccountId, amount: BalanceOf) -> Self { + Delegation { agent: agent.clone(), amount } + } + + /// Ensure the delegator is either a new delegator or they are adding more delegation to the + /// existing agent. + /// + /// Delegators are prevented from delegating to multiple agents at the same time. + pub(crate) fn can_delegate(delegator: &T::AccountId, agent: &T::AccountId) -> bool { + Delegation::::get(delegator) + .map(|delegation| delegation.agent == *agent) + .unwrap_or( + // all good if it is a new delegator except it should not be an existing agent. + !>::contains_key(delegator), + ) + } + + /// Save self to storage. If the delegation amount is zero, remove the delegation. + pub(crate) fn update_or_kill(self, key: &T::AccountId) { + // Clean up if no delegation left. + if self.amount == Zero::zero() { + >::remove(key); + return + } + + >::insert(key, self) + } +} + +/// Ledger of all delegations to an `Agent`. +/// +/// This keeps track of the active balance of the `Agent` that is made up from the funds that +/// are currently delegated to this `Agent`. It also tracks the pending slashes yet to be +/// applied among other things. +#[derive(Default, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(T))] +pub struct AgentLedger { + /// Where the reward should be paid out. + pub payee: T::AccountId, + /// Sum of all delegated funds to this `Agent`. + #[codec(compact)] + pub total_delegated: BalanceOf, + /// Funds that are withdrawn from core staking but not released to delegator/s. It is a subset + /// of `total_delegated` and can never be greater than it. + /// + /// We need this register to ensure that the `Agent` does not bond funds from delegated + /// funds that are withdrawn and should be claimed by delegators. + #[codec(compact)] + pub unclaimed_withdrawals: BalanceOf, + /// Slashes that are not yet applied. This affects the effective balance of the `Agent`. + #[codec(compact)] + pub pending_slash: BalanceOf, +} + +impl AgentLedger { + /// Create a new instance of `AgentLedger`. + pub(crate) fn new(reward_destination: &T::AccountId) -> Self { + AgentLedger { + payee: reward_destination.clone(), + total_delegated: Zero::zero(), + unclaimed_withdrawals: Zero::zero(), + pending_slash: Zero::zero(), + } + } + + /// Get `AgentLedger` from storage. + pub(crate) fn get(key: &T::AccountId) -> Option { + >::get(key) + } + + /// Save self to storage with the given key. + pub(crate) fn update(self, key: &T::AccountId) { + >::insert(key, self) + } + + /// Effective total balance of the `Agent`. + /// + /// This takes into account any slashes reported to `Agent` but unapplied. + pub(crate) fn effective_balance(&self) -> BalanceOf { + defensive_assert!( + self.total_delegated >= self.pending_slash, + "slash cannot be higher than actual balance of delegator" + ); + + // pending slash needs to be burned and cannot be used for stake. + self.total_delegated.saturating_sub(self.pending_slash) + } + + /// Agent balance that can be staked/bonded in [`T::CoreStaking`]. + pub(crate) fn stakeable_balance(&self) -> BalanceOf { + self.effective_balance().saturating_sub(self.unclaimed_withdrawals) + } +} + +/// Wrapper around `AgentLedger` to provide some helper functions to mutate the ledger. +#[derive(Clone)] +pub struct AgentLedgerOuter { + /// storage key + pub key: T::AccountId, + /// storage value + pub ledger: AgentLedger, +} + +impl AgentLedgerOuter { + /// Get `Agent` from storage if it exists or return an error. + pub(crate) fn get(agent: &T::AccountId) -> Result, DispatchError> { + let ledger = AgentLedger::::get(agent).ok_or(Error::::NotAgent)?; + Ok(AgentLedgerOuter { key: agent.clone(), ledger }) + } + + /// Remove funds that are withdrawn from [Config::CoreStaking] but not claimed by a delegator. + /// + /// Checked decrease of delegation amount from `total_delegated` and `unclaimed_withdrawals` + /// registers. Consumes self and returns a new instance of self if success. + pub(crate) fn remove_unclaimed_withdraw( + self, + amount: BalanceOf, + ) -> Result { + let new_total_delegated = self + .ledger + .total_delegated + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + let new_unclaimed_withdrawals = self + .ledger + .unclaimed_withdrawals + .checked_sub(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + Ok(AgentLedgerOuter { + ledger: AgentLedger { + total_delegated: new_total_delegated, + unclaimed_withdrawals: new_unclaimed_withdrawals, + ..self.ledger + }, + ..self + }) + } + + /// Add funds that are withdrawn from [Config::CoreStaking] to be claimed by delegators later. + pub(crate) fn add_unclaimed_withdraw( + self, + amount: BalanceOf, + ) -> Result { + let new_unclaimed_withdrawals = self + .ledger + .unclaimed_withdrawals + .checked_add(&amount) + .defensive_ok_or(ArithmeticError::Overflow)?; + + Ok(AgentLedgerOuter { + ledger: AgentLedger { unclaimed_withdrawals: new_unclaimed_withdrawals, ..self.ledger }, + ..self + }) + } + + /// Amount that is delegated but not bonded yet. + /// + /// This importantly does not include `unclaimed_withdrawals` as those should not be bonded + /// again unless explicitly requested. + pub(crate) fn available_to_bond(&self) -> BalanceOf { + let bonded_stake = self.bonded_stake(); + let stakeable = self.ledger.stakeable_balance(); + + defensive_assert!( + stakeable >= bonded_stake, + "cannot be bonded with more than total amount delegated to agent" + ); + + stakeable.saturating_sub(bonded_stake) + } + + /// Remove slashes from the `AgentLedger`. + pub(crate) fn remove_slash(self, amount: BalanceOf) -> Self { + let pending_slash = self.ledger.pending_slash.defensive_saturating_sub(amount); + let total_delegated = self.ledger.total_delegated.defensive_saturating_sub(amount); + + AgentLedgerOuter { + ledger: AgentLedger { pending_slash, total_delegated, ..self.ledger }, + ..self + } + } + + /// Get the total stake of agent bonded in [`Config::CoreStaking`]. + pub(crate) fn bonded_stake(&self) -> BalanceOf { + T::CoreStaking::total_stake(&self.key).unwrap_or(Zero::zero()) + } + + /// Returns true if the agent is bonded in [`Config::CoreStaking`]. + pub(crate) fn is_bonded(&self) -> bool { + T::CoreStaking::stake(&self.key).is_ok() + } + + /// Returns the reward account registered by the agent. + pub(crate) fn reward_account(&self) -> &T::AccountId { + &self.ledger.payee + } + + /// Save self to storage. + pub(crate) fn save(self) { + let key = self.key; + self.ledger.update(&key) + } + + /// Save self and remove if no delegation left. + /// + /// Returns: + /// - true if agent killed. + /// - error if the delegate is in an unexpected state. + pub(crate) fn update_or_kill(self) -> Result { + let key = self.key; + // see if delegate can be killed + if self.ledger.total_delegated == Zero::zero() { + ensure!( + self.ledger.unclaimed_withdrawals == Zero::zero() && + self.ledger.pending_slash == Zero::zero(), + Error::::BadState + ); + >::remove(key); + return Ok(true) + } + self.ledger.update(&key); + Ok(false) + } + + /// Reloads self from storage. + pub(crate) fn reload(self) -> Result, DispatchError> { + Self::get(&self.key) + } + + /// Balance of `Agent` that is not bonded. + /// + /// This is similar to [Self::available_to_bond] except it also includes `unclaimed_withdrawals` + /// of `Agent`. + #[cfg(test)] + pub(crate) fn total_unbonded(&self) -> BalanceOf { + let bonded_stake = self.bonded_stake(); + + let net_balance = self.ledger.effective_balance(); + + assert!(net_balance >= bonded_stake, "cannot be bonded with more than the agent balance"); + + net_balance.saturating_sub(bonded_stake) + } +} diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index edd2d742b5069a73212a291aaf440de40e96fd24..7f182447ead61a0b694b7dc21a888fd4480e1197 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index aa66137ad880403a3a3934e121e358bc193d0826..799d614c37f4a87a147c1d1e0c21febc1ad68cd3 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -108,7 +108,7 @@ benchmarks! { whitelist_account!(caller); }: _(RawOrigin::Signed(caller), proposal, value) verify { - assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); + assert_eq!(PublicProps::::get().len(), p as usize, "Proposals not created."); } second { @@ -122,12 +122,12 @@ benchmarks! { Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } - let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); }: _(RawOrigin::Signed(caller), 0) verify { - let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; + let deposits = DepositOf::::get(0).ok_or("Proposal not created")?; assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); } @@ -175,7 +175,7 @@ benchmarks! { // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; - let ref_index = Democracy::::referendum_count() - 1; + let ref_index = ReferendumCount::::get() - 1; // This tests when a user changes a vote whitelist_account!(caller); @@ -186,7 +186,7 @@ benchmarks! { _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); - let referendum_info = Democracy::::referendum_info(ref_index) + let referendum_info = ReferendumInfoOf::::get(ref_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, @@ -261,7 +261,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } external_propose_majority { @@ -271,7 +271,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } external_propose_default { @@ -281,7 +281,7 @@ benchmarks! { }: _(origin, proposal) verify { // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); } fast_track { @@ -303,7 +303,7 @@ benchmarks! { let delay = 0u32; }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) verify { - assert_eq!(Democracy::::referendum_count(), 1, "referendum not created"); + assert_eq!(ReferendumCount::::get(), 1, "referendum not created"); assert_last_event::(crate::Event::MetadataTransferred { prev_owner: MetadataOwner::External, owner: MetadataOwner::Referendum(0), @@ -338,7 +338,7 @@ benchmarks! { }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); - let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; + let (_, new_vetoers) = Blacklist::::get(&proposal_hash).ok_or("no blacklist")?; assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); } @@ -382,7 +382,7 @@ benchmarks! { add_referendum::(i); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); // Launch external LastTabledWasExternal::::put(false); @@ -393,15 +393,15 @@ benchmarks! { let call = Call::::external_propose_majority { proposal }; call.dispatch_bypass_filter(origin)?; // External proposal created - ensure!(>::exists(), "External proposal didn't work"); + ensure!(NextExternal::::exists(), "External proposal didn't work"); let block_number = T::LaunchPeriod::get(); }: { Democracy::::on_initialize(block_number) } verify { // One extra because of next external - assert_eq!(Democracy::::referendum_count(), r + 1, "referenda not created"); - ensure!(!>::exists(), "External wasn't taken"); + assert_eq!(ReferendumCount::::get(), r + 1, "referenda not created"); + ensure!(!NextExternal::::exists(), "External wasn't taken"); // All but the new next external should be finished for i in 0 .. r { @@ -422,7 +422,7 @@ benchmarks! { add_referendum::(i); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); // Launch public assert!(add_proposal::(r).is_ok(), "proposal not created"); @@ -433,7 +433,7 @@ benchmarks! { }: { Democracy::::on_initialize(block_number) } verify { // One extra because of next public - assert_eq!(Democracy::::referendum_count(), r + 1, "proposal not accepted"); + assert_eq!(ReferendumCount::::get(), r + 1, "proposal not accepted"); // All should be finished for i in 0 .. r { @@ -461,8 +461,8 @@ benchmarks! { ReferendumInfoOf::::insert(key, info); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); - assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); + assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); }: { Democracy::::on_initialize(1u32.into()) } verify { @@ -491,8 +491,8 @@ benchmarks! { ReferendumInfoOf::::insert(key, info); } - assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); - assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + assert_eq!(ReferendumCount::::get(), r, "referenda not created"); + assert_eq!(LowestUnbaked::::get(), 0, "invalid referenda init"); let block_number = T::LaunchPeriod::get(); diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index f3d33a72f3ad1a088d4dc050d1e637acd162db7d..19cdc754659d34b08b3c149e5766ea7bb6c01219 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -346,12 +346,10 @@ pub mod pallet { /// The number of (public) proposals that have been made so far. #[pallet::storage] - #[pallet::getter(fn public_prop_count)] pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; /// The public proposals. Unsorted. The second item is the proposal. #[pallet::storage] - #[pallet::getter(fn public_props)] pub type PublicProps = StorageValue< _, BoundedVec<(PropIndex, BoundedCallOf, T::AccountId), T::MaxProposals>, @@ -362,7 +360,6 @@ pub mod pallet { /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] - #[pallet::getter(fn deposit_of)] pub type DepositOf = StorageMap< _, Twox64Concat, @@ -372,20 +369,17 @@ pub mod pallet { /// The next free referendum index, aka the number of referenda started so far. #[pallet::storage] - #[pallet::getter(fn referendum_count)] pub type ReferendumCount = StorageValue<_, ReferendumIndex, ValueQuery>; /// The lowest referendum index representing an unbaked referendum. Equal to /// `ReferendumCount` if there isn't a unbaked referendum. #[pallet::storage] - #[pallet::getter(fn lowest_unbaked)] pub type LowestUnbaked = StorageValue<_, ReferendumIndex, ValueQuery>; /// Information concerning any given referendum. /// /// TWOX-NOTE: SAFE as indexes are not under an attacker’s control. #[pallet::storage] - #[pallet::getter(fn referendum_info)] pub type ReferendumInfoOf = StorageMap< _, Twox64Concat, @@ -595,15 +589,15 @@ pub mod pallet { let who = T::SubmitOrigin::ensure_origin(origin)?; ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); - let index = Self::public_prop_count(); + let index = PublicPropCount::::get(); let real_prop_count = PublicProps::::decode_len().unwrap_or(0) as u32; let max_proposals = T::MaxProposals::get(); ensure!(real_prop_count < max_proposals, Error::::TooMany); let proposal_hash = proposal.hash(); - if let Some((until, _)) = >::get(proposal_hash) { + if let Some((until, _)) = Blacklist::::get(proposal_hash) { ensure!( - >::block_number() >= until, + frame_system::Pallet::::block_number() >= until, Error::::ProposalBlacklisted, ); } @@ -638,11 +632,11 @@ pub mod pallet { let seconds = Self::len_of_deposit_of(proposal).ok_or(Error::::ProposalMissing)?; ensure!(seconds < T::MaxDeposits::get(), Error::::TooMany); - let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; + let mut deposit = DepositOf::::get(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; let ok = deposit.0.try_push(who.clone()).is_ok(); debug_assert!(ok, "`seconds` is below static limit; `try_insert` should succeed; qed"); - >::insert(proposal, deposit); + DepositOf::::insert(proposal, deposit); Self::deposit_event(Event::::Seconded { seconder: who, prop_index: proposal }); Ok(()) } @@ -683,9 +677,9 @@ pub mod pallet { let status = Self::referendum_status(ref_index)?; let h = status.proposal.hash(); - ensure!(!>::contains_key(h), Error::::AlreadyCanceled); + ensure!(!Cancellations::::contains_key(h), Error::::AlreadyCanceled); - >::insert(h, true); + Cancellations::::insert(h, true); Self::internal_cancel_referendum(ref_index); Ok(()) } @@ -703,14 +697,14 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; - ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal.hash()) { + ensure!(!NextExternal::::exists(), Error::::DuplicateProposal); + if let Some((until, _)) = Blacklist::::get(proposal.hash()) { ensure!( - >::block_number() >= until, + frame_system::Pallet::::block_number() >= until, Error::::ProposalBlacklisted, ); } - >::put((proposal, VoteThreshold::SuperMajorityApprove)); + NextExternal::::put((proposal, VoteThreshold::SuperMajorityApprove)); Ok(()) } @@ -732,7 +726,7 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SimpleMajority)); + NextExternal::::put((proposal, VoteThreshold::SimpleMajority)); Ok(()) } @@ -754,7 +748,7 @@ pub mod pallet { proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal, VoteThreshold::SuperMajorityAgainst)); + NextExternal::::put((proposal, VoteThreshold::SuperMajorityAgainst)); Ok(()) } @@ -800,15 +794,15 @@ pub mod pallet { ensure!(voting_period > Zero::zero(), Error::::VotingPeriodLow); let (ext_proposal, threshold) = - >::get().ok_or(Error::::ProposalMissing)?; + NextExternal::::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, ); ensure!(proposal_hash == ext_proposal.hash(), Error::::InvalidHash); - >::kill(); - let now = >::block_number(); + NextExternal::::kill(); + let now = frame_system::Pallet::::block_number(); let ref_index = Self::inject_referendum( now.saturating_add(voting_period), ext_proposal, @@ -840,7 +834,7 @@ pub mod pallet { } let mut existing_vetoers = - >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); + Blacklist::::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); let insert_position = existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; existing_vetoers @@ -848,11 +842,11 @@ pub mod pallet { .map_err(|_| Error::::TooMany)?; let until = - >::block_number().saturating_add(T::CooloffPeriod::get()); - >::insert(&proposal_hash, (until, existing_vetoers)); + frame_system::Pallet::::block_number().saturating_add(T::CooloffPeriod::get()); + Blacklist::::insert(&proposal_hash, (until, existing_vetoers)); Self::deposit_event(Event::::Vetoed { who, proposal_hash, until }); - >::kill(); + NextExternal::::kill(); Self::clear_metadata(MetadataOwner::External); Ok(()) } @@ -943,7 +937,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::clear_public_proposals())] pub fn clear_public_proposals(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; - >::kill(); + PublicProps::::kill(); Ok(()) } @@ -1146,7 +1140,7 @@ pub mod pallet { ) -> DispatchResult { match owner { MetadataOwner::External => { - let (_, threshold) = >::get().ok_or(Error::::NoProposal)?; + let (_, threshold) = NextExternal::::get().ok_or(Error::::NoProposal)?; Self::ensure_external_origin(threshold, origin)?; }, MetadataOwner::Proposal(index) => { @@ -1201,7 +1195,7 @@ impl Pallet { /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal /// index. pub fn backing_for(proposal: PropIndex) -> Option> { - Self::deposit_of(proposal).map(|(l, d)| d.saturating_mul((l.len() as u32).into())) + DepositOf::::get(proposal).map(|(l, d)| d.saturating_mul((l.len() as u32).into())) } /// Get all referenda ready for tally at block `n`. @@ -1209,8 +1203,8 @@ impl Pallet { n: BlockNumberFor, ) -> Vec<(ReferendumIndex, ReferendumStatus, BoundedCallOf, BalanceOf>)> { - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); + let next = LowestUnbaked::::get(); + let last = ReferendumCount::::get(); Self::maturing_referenda_at_inner(n, next..last) } @@ -1221,7 +1215,7 @@ impl Pallet { { range .into_iter() - .map(|i| (i, Self::referendum_info(i))) + .map(|i| (i, ReferendumInfoOf::::get(i))) .filter_map(|(i, maybe_info)| match maybe_info { Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), _ => None, @@ -1238,8 +1232,8 @@ impl Pallet { threshold: VoteThreshold, delay: BlockNumberFor, ) -> ReferendumIndex { - >::inject_referendum( - >::block_number().saturating_add(T::VotingPeriod::get()), + Pallet::::inject_referendum( + frame_system::Pallet::::block_number().saturating_add(T::VotingPeriod::get()), proposal, threshold, delay, @@ -1529,12 +1523,12 @@ impl Pallet { threshold: VoteThreshold, delay: BlockNumberFor, ) -> ReferendumIndex { - let ref_index = Self::referendum_count(); + let ref_index = ReferendumCount::::get(); ReferendumCount::::put(ref_index + 1); let status = ReferendumStatus { end, proposal, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); - >::insert(ref_index, item); + ReferendumInfoOf::::insert(ref_index, item); Self::deposit_event(Event::::Started { ref_index, threshold }); ref_index } @@ -1551,7 +1545,7 @@ impl Pallet { /// Table the waiting external proposal for a vote, if there is one. fn launch_external(now: BlockNumberFor) -> DispatchResult { - if let Some((proposal, threshold)) = >::take() { + if let Some((proposal, threshold)) = NextExternal::::take() { LastTabledWasExternal::::put(true); Self::deposit_event(Event::::ExternalTabled); let ref_index = Self::inject_referendum( @@ -1569,15 +1563,15 @@ impl Pallet { /// Table the waiting public proposal with the highest backing for a vote. fn launch_public(now: BlockNumberFor) -> DispatchResult { - let mut public_props = Self::public_props(); + let mut public_props = PublicProps::::get(); if let Some((winner_index, _)) = public_props.iter().enumerate().max_by_key( // defensive only: All current public proposals have an amount locked |x| Self::backing_for((x.1).0).defensive_unwrap_or_else(Zero::zero), ) { let (prop_index, proposal, _) = public_props.swap_remove(winner_index); - >::put(public_props); + PublicProps::::put(public_props); - if let Some((depositors, deposit)) = >::take(prop_index) { + if let Some((depositors, deposit)) = DepositOf::::take(prop_index) { // refund depositors for d in depositors.iter() { T::Currency::unreserve(d, deposit); @@ -1642,8 +1636,8 @@ impl Pallet { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = Weight::zero(); - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); + let next = LowestUnbaked::::get(); + let last = ReferendumCount::::get(); let r = last.saturating_sub(next); // pick out another public referendum if it's time. @@ -1674,9 +1668,9 @@ impl Pallet { // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number // of unbaked referendum is bounded by this number. In case those number have changed in a // runtime upgrade the formula should be adjusted but the bound should still be sensible. - >::mutate(|ref_index| { + LowestUnbaked::::mutate(|ref_index| { while *ref_index < last && - Self::referendum_info(*ref_index) + ReferendumInfoOf::::get(*ref_index) .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) { *ref_index += 1 @@ -1692,7 +1686,7 @@ impl Pallet { fn len_of_deposit_of(proposal: PropIndex) -> Option { // DepositOf first tuple element is a vec, decoding its len is equivalent to decode a // `Compact`. - decode_compact_u32_at(&>::hashed_key_for(proposal)) + decode_compact_u32_at(&DepositOf::::hashed_key_for(proposal)) } /// Return a proposal of an index. diff --git a/substrate/frame/democracy/src/migrations/v1.rs b/substrate/frame/democracy/src/migrations/v1.rs index 5e423b9ab6eff7d2a96f94416bdd425827869fba..47f8df017f1e18d19289477936dc2a3c380f8e2c 100644 --- a/substrate/frame/democracy/src/migrations/v1.rs +++ b/substrate/frame/democracy/src/migrations/v1.rs @@ -108,7 +108,7 @@ pub mod v1 { .collect::>(); let bounded = BoundedVec::<_, T::MaxProposals>::truncate_from(props.clone()); PublicProps::::put(bounded); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); if props.len() as u32 > T::MaxProposals::get() { log::error!( @@ -126,7 +126,7 @@ pub mod v1 { StorageVersion::new(1).put::>(); - weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) + weight.saturating_add(T::DbWeight::get().reads_writes(1, 3)) } #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index e2946ba98156ad3480f85ae4e86ba4a2036ff59c..9303c0da504f366288f6f06cb33c54abbd125321 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -194,7 +194,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { #[test] fn params_should_work() { new_test_ext().execute_with(|| { - assert_eq!(Democracy::referendum_count(), 0); + assert_eq!(ReferendumCount::::get(), 0); assert_eq!(Balances::free_balance(42), 0); assert_eq!(Balances::total_issuance(), 210); }); diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index b4c42f9c79053ad6db1641e7e431dc4be7661e9f..eeb1df301db8ffda60e349bb7dd2fa7c673b4be4 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -30,14 +30,14 @@ fn cancel_referendum_should_work() { ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r.into())); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); next_block(); - assert_eq!(Democracy::lowest_unbaked(), 1); - assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + assert_eq!(LowestUnbaked::::get(), 1); + assert_eq!(LowestUnbaked::::get(), ReferendumCount::::get()); assert_eq!(Balances::free_balance(42), 0); }); } @@ -56,7 +56,7 @@ fn emergency_cancel_should_work() { assert_noop!(Democracy::emergency_cancel(RuntimeOrigin::signed(3), r), BadOrigin); assert_ok!(Democracy::emergency_cancel(RuntimeOrigin::signed(4), r)); - assert!(Democracy::referendum_info(r).is_none()); + assert!(ReferendumInfoOf::::get(r).is_none()); // some time later... diff --git a/substrate/frame/democracy/src/tests/external_proposing.rs b/substrate/frame/democracy/src/tests/external_proposing.rs index 08b497ab4b90e6ce458b7462c521b0d9d6d78640..78ef2904e5b6c29bf78f594ac3ffaff5c00c1c2a 100644 --- a/substrate/frame/democracy/src/tests/external_proposing.rs +++ b/substrate/frame/democracy/src/tests/external_proposing.rs @@ -24,12 +24,12 @@ fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - assert!(>::exists()); + assert!(NextExternal::::exists()); let h = set_balance_proposal(2).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); // cancelled. - assert!(!>::exists()); + assert!(!NextExternal::::exists()); // fails - same proposal can't be resubmitted. assert_noop!( Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), @@ -46,7 +46,7 @@ fn veto_external_works() { fast_forward_to(2); // works; as we're out of the cooloff period. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - assert!(>::exists()); + assert!(NextExternal::::exists()); // 3 can't veto the same thing twice. assert_noop!( @@ -57,7 +57,7 @@ fn veto_external_works() { // 4 vetoes. assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(4), h)); // cancelled again. - assert!(!>::exists()); + assert!(!NextExternal::::exists()); fast_forward_to(3); // same proposal fails as we're still in cooloff diff --git a/substrate/frame/democracy/src/tests/fast_tracking.rs b/substrate/frame/democracy/src/tests/fast_tracking.rs index 85e7792a4c2edf3728f2b0215fe94fc0dc3efdbb..89dce1dffe13eb5f950ee874d0366af3983f24a9 100644 --- a/substrate/frame/democracy/src/tests/fast_tracking.rs +++ b/substrate/frame/democracy/src/tests/fast_tracking.rs @@ -33,13 +33,13 @@ fn fast_track_referendum_works() { set_balance_proposal(2) )); let hash = note_preimage(1); - assert!(>::get(MetadataOwner::External).is_none()); + assert!(MetadataOf::::get(MetadataOwner::External).is_none()); assert_ok!(Democracy::set_metadata( RuntimeOrigin::signed(3), MetadataOwner::External, Some(hash), ),); - assert!(>::get(MetadataOwner::External).is_some()); + assert!(MetadataOf::::get(MetadataOwner::External).is_some()); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); assert_eq!( @@ -53,8 +53,8 @@ fn fast_track_referendum_works() { }) ); // metadata reset from the external proposal to the referendum. - assert!(>::get(MetadataOwner::External).is_none()); - assert!(>::get(MetadataOwner::Referendum(0)).is_some()); + assert!(MetadataOf::::get(MetadataOwner::External).is_none()); + assert!(MetadataOf::::get(MetadataOwner::Referendum(0)).is_some()); }); } diff --git a/substrate/frame/democracy/src/tests/metadata.rs b/substrate/frame/democracy/src/tests/metadata.rs index 1b6d66a8bc4486c555a5f0e1241f76280b33eeb7..341f14e5586bf9e0e3191eb8dbf87f1515739636 100644 --- a/substrate/frame/democracy/src/tests/metadata.rs +++ b/substrate/frame/democracy/src/tests/metadata.rs @@ -33,7 +33,7 @@ fn set_external_metadata_works() { ); // create an external proposal. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); - assert!(>::exists()); + assert!(NextExternal::::exists()); // fails to set metadata with non external origin. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash)), @@ -61,7 +61,7 @@ fn clear_metadata_works() { let owner = MetadataOwner::External; // create an external proposal. assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); - assert!(>::exists()); + assert!(NextExternal::::exists()); // set metadata. let hash = note_preimage(1); assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(2), owner.clone(), Some(hash))); @@ -87,7 +87,7 @@ fn set_proposal_metadata_works() { // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. - let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); + let owner = MetadataOwner::Proposal(PublicPropCount::::get() - 1); // fails to set non-existing preimage. assert_noop!( Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(invalid_hash)), @@ -115,7 +115,7 @@ fn clear_proposal_metadata_works() { // create an external proposal. assert_ok!(propose_set_balance(1, 2, 5)); // metadata owner is a public proposal. - let owner = MetadataOwner::Proposal(Democracy::public_prop_count() - 1); + let owner = MetadataOwner::Proposal(PublicPropCount::::get() - 1); // set metadata. let hash = note_preimage(1); assert_ok!(Democracy::set_metadata(RuntimeOrigin::signed(1), owner.clone(), Some(hash))); diff --git a/substrate/frame/democracy/src/tests/public_proposals.rs b/substrate/frame/democracy/src/tests/public_proposals.rs index 69a2d3e25686f6a17337ff78c81d3c69a13ac345..01f47947f8e50e9be8fbcb9072b861fdf84f923e 100644 --- a/substrate/frame/democracy/src/tests/public_proposals.rs +++ b/substrate/frame/democracy/src/tests/public_proposals.rs @@ -97,10 +97,10 @@ fn cancel_proposal_should_work() { MetadataOwner::Proposal(0), Some(hash) )); - assert!(>::get(MetadataOwner::Proposal(0)).is_some()); + assert!(MetadataOf::::get(MetadataOwner::Proposal(0)).is_some()); assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); // metadata cleared, preimage unrequested. - assert!(>::get(MetadataOwner::Proposal(0)).is_none()); + assert!(MetadataOf::::get(MetadataOwner::Proposal(0)).is_none()); System::assert_has_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); System::assert_last_event( crate::Event::MetadataCleared { owner: MetadataOwner::Proposal(0), hash }.into(), diff --git a/substrate/frame/democracy/src/tests/scheduling.rs b/substrate/frame/democracy/src/tests/scheduling.rs index fdbc8fdb34947ae1454355ebd4afd6493e7de992..43f51628aaf5e6929434185787ad481851628bd7 100644 --- a/substrate/frame/democracy/src/tests/scheduling.rs +++ b/substrate/frame/democracy/src/tests/scheduling.rs @@ -30,10 +30,10 @@ fn simple_passing_should_work() { ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); next_block(); - assert_eq!(Democracy::lowest_unbaked(), 1); + assert_eq!(LowestUnbaked::::get(), 1); assert_eq!(Balances::free_balance(42), 2); }); } @@ -140,16 +140,16 @@ fn lowest_unbaked_should_be_sensible() { assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); // r3 is canceled assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r3.into())); - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); // r2 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 0); + assert_eq!(LowestUnbaked::::get(), 0); next_block(); // r1 ends with approval - assert_eq!(Democracy::lowest_unbaked(), 3); - assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + assert_eq!(LowestUnbaked::::get(), 3); + assert_eq!(LowestUnbaked::::get(), ReferendumCount::::get()); // r2 is executed assert_eq!(Balances::free_balance(42), 2); diff --git a/substrate/frame/democracy/src/tests/voting.rs b/substrate/frame/democracy/src/tests/voting.rs index f096b633ee6d4a2b226dc5be0421523edb453de5..61b80cc97fed6ad458a1fcda01efe5bd89799ad4 100644 --- a/substrate/frame/democracy/src/tests/voting.rs +++ b/substrate/frame/democracy/src/tests/voting.rs @@ -65,13 +65,13 @@ fn single_proposal_should_work() { System::set_block_number(0); assert_ok!(propose_set_balance(1, 2, 1)); let r = 0; - assert!(Democracy::referendum_info(r).is_none()); + assert!(ReferendumInfoOf::::get(r).is_none()); // start of 2 => next referendum scheduled. fast_forward_to(2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - assert_eq!(Democracy::referendum_count(), 1); + assert_eq!(ReferendumCount::::get(), 1); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 2074b51f50f475f077f814593da74b31b5dba6e0..43e3e7079d2fa78aa1e3515b41df87446ac5b370 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index ea47530966c2092e2091812e3dab2eb901a0ee99..43d051b1a89f3818ee1d5225f8de590bef7fc20f 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } log = { workspace = true } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index c00bb66ea13044f8b11aa781e30896e03225aea8..2b1f1335c6fe888c3ff756b00327b74d5ba9f279 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -322,7 +322,7 @@ fn automatic_unbonding_pools() { let init_free_balance_2 = Balances::free_balance(2); let init_free_balance_3 = Balances::free_balance(3); - let pool_bonded_account = Pools::create_bonded_account(1); + let pool_bonded_account = Pools::generate_bonded_account(1); // creates a pool with 5 bonded, owned by 1. assert_ok!(Pools::create(RuntimeOrigin::signed(1), 5, 1, 1, 1)); diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 3bdd9be17bdfde31f3f52a78d20e6b3701169b96..2e900f807ba605b966b8744c0b03392d1b0768fe 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -93,7 +93,6 @@ pub(crate) type Moment = u32; impl frame_system::Config for Runtime { type Block = Block; type AccountData = pallet_balances::AccountData; - type BlockHashCount = ConstU32<10>; } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); @@ -301,7 +300,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = sp_runtime::FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = ConstU32<2>; type PalletId = PoolsPalletId; type MaxMetadataLen = ConstU32<256>; diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 0d9748ee34e56fa5d43f0e238d2d69eda548eaec..1c63f90720f7b010fb584dc44f2e84ad78c61021 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index 6e13f17bec1bbf237914df6baffdc2ede46c05f9..c2e644cfefab99145b839bf1c57917369a5f5cb2 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 09c6a492dd0d6ba498111fc2c8926212e7b35fc2..3f8893dad6f2094ae75dec2e6e9de00d0b091495 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -24,7 +24,7 @@ proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" [dev-dependencies] -parity-scale-codec = "3.6.1" +parity-scale-codec = "3.6.12" scale-info = "2.11.1" sp-arithmetic = { path = "../../../primitives/arithmetic" } # used by generate_solution_type: diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 1fb9e2387ed8de1be004faf876315c21eeac9696..98da507384fd956e21a01cf075556bbe0b6b916b 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -20,7 +20,7 @@ clap = { version = "4.5.3", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["small_rng", "std"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = ".." } frame-election-provider-support = { path = "../.." } diff --git a/substrate/frame/election-provider-support/solution-type/src/lib.rs b/substrate/frame/election-provider-support/solution-type/src/lib.rs index 80773f6fb476859746677711401d70bd955b8ad7..1a88f0cf835fbe813da9d990ef4fd98651096614 100644 --- a/substrate/frame/election-provider-support/solution-type/src/lib.rs +++ b/substrate/frame/election-provider-support/solution-type/src/lib.rs @@ -263,7 +263,16 @@ fn imports() -> Result { use _feps::private as _fepsp; )) }, - Err(e) => Err(syn::Error::new(Span::call_site(), e)), + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(polkadot_sdk)) => { + let ident = syn::Ident::new(&polkadot_sdk, Span::call_site()); + Ok(quote!( + use #ident::frame_election_provider_support as _feps; + use _feps::private as _fepsp; + )) + }, + _ => Err(syn::Error::new(Span::call_site(), e)), + }, } } diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index 81dc48476a065b60e41ce6f703317b77ae75b0fe..dbcb740518b1379f54cd4265973b445f53109b7b 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index 43b37c6beba2c6c2545c4b6bc5191e40009065fa..ba9f9eca27d79c854dd67a8d81130062f3d1f70d 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index de37bcf75569575b5eb42336424af2e3a917bf1d..d351b27eecde3c506e17339f9f7300d522fcc87f 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -60,7 +60,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 2aa062ee6c1a0765f9e20d955d1f312c06157966..0ad5b56cb6faa473d337f54fe7eaedaf31a70d9d 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 71b97796ecddf89275c3594362d9ece122b7387f..d7570f570946f1df05665ad8804e62ad9c18030a 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index 1c79b5f5fa6050758f5158bdcbede75e914eaf97..e8a18ec13fe912497e32a821e4f10d75582609e1 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -54,7 +54,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 48cb25f90949a9401a8261999c0332d16d497d04..29984bab3e0ffc5ac881eb086d91820c01e7d504 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame = { package = "polkadot-sdk-frame", path = "../..", default-features = false, features = ["experimental", "runtime"] } diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index d8311897c6e1158f8a94520bfd1c6f2712aecbbc..db3e22daa01bd044cae876aabeac25543e814dcb 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/multi-block-migrations/Cargo.toml b/substrate/frame/examples/multi-block-migrations/Cargo.toml index 28eca8577154e85c7fbb0d22fca8aefb3c7d9237..61bb2bc61b4e3758fc47c413f35ac5420bbfb4ef 100644 --- a/substrate/frame/examples/multi-block-migrations/Cargo.toml +++ b/substrate/frame/examples/multi-block-migrations/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.5", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } pallet-migrations = { path = "../../migrations", default-features = false } frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 9363f7533526650e69bcc791e99d37fac17155a7..23ce79c34402da91cea1f1e02ae7ebd90dc6e5d3 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } lite-json = { version = "0.2.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index 3525b3b67edf1569f00600e1e210e426c5f858dc..e2c57a8c1e1abcb855a97db9590edc19c7bf881b 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -61,7 +61,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml index b1d560a85f3ff59de2ab6560223f7b7657f2b27a..080500f629671837316e724b6825493c3fcb4b67 100644 --- a/substrate/frame/examples/single-block-migrations/Cargo.toml +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] docify = "0.2.8" log = { version = "0.4.21", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../../support", default-features = false } frame-executive = { path = "../../executive", default-features = false } diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index 18ef4e72cc4f5ce938abf03d5eaea3039ec0b030..7b543d72c98404d84ba8f68b3d864176c4182fb9 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -67,10 +67,10 @@ impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { // Write the new value to storage let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; crate::Value::::put(new); - // One read for the old value, one write for the new value - T::DbWeight::get().reads_writes(1, 1) + // One read + write for taking the old value, and one write for setting the new value + T::DbWeight::get().reads_writes(1, 2) } else { - // One read for trying to access the old value + // No writes since there was no old value, just one read for checking T::DbWeight::get().reads(1) } } @@ -184,7 +184,7 @@ mod test { // value. assert_eq!( weight, - ::DbWeight::get().reads_writes(1, 1) + ::DbWeight::get().reads_writes(1, 2) ); // After the migration, the new value should be set as the `current` value. diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index 1ef3521e06063cf15679e248c6b46ae27a14e946..6cb4d7ddd6c06951b749d796746a7f5a63ee343f 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/examples/tasks/Cargo.toml b/substrate/frame/examples/tasks/Cargo.toml index 3f59d57ea0f202729f11ac549daa828424343671..95246ef3f6643aafe215bb4bacf0e2e2598149ae 100644 --- a/substrate/frame/examples/tasks/Cargo.toml +++ b/substrate/frame/examples/tasks/Cargo.toml @@ -14,7 +14,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 22fcaa993ab8093625443d4c46398673e4612110..4cce0fa9f9504ae48ca1929f87311896b81ca5cf 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] aquamarine = "0.5.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index f05f22f764164ce15704a9200c78e65b2867c908..5b7121e2eae3788fdf37c0467871f0233dd62393 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/fast-unstake/src/lib.rs b/substrate/frame/fast-unstake/src/lib.rs index 8ba306201310e220afdb4cf8cb0d92a043bcafa6..f31c9c640260b2bccad684ce5aa23ef384847d43 100644 --- a/substrate/frame/fast-unstake/src/lib.rs +++ b/substrate/frame/fast-unstake/src/lib.rs @@ -141,7 +141,7 @@ macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { log::$level!( target: crate::LOG_TARGET, - concat!("[{:?}] 💨 ", $patter), >::block_number() $(, $values)* + concat!("[{:?}] 💨 ", $patter), frame_system::Pallet::::block_number() $(, $values)* ) }; } @@ -227,7 +227,6 @@ pub mod pallet { /// checked. The checking is represented by updating [`UnstakeRequest::checked`], which is /// stored in [`Head`]. #[pallet::storage] - #[pallet::getter(fn eras_to_check_per_block)] pub type ErasToCheckPerBlock = StorageValue<_, u32, ValueQuery>; #[pallet::event] @@ -332,7 +331,7 @@ pub mod pallet { pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + ensure!(ErasToCheckPerBlock::::get() != 0, Error::::CallNotAllowed); let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; ensure!(!Queue::::contains_key(&stash_account), Error::::AlreadyQueued); @@ -373,7 +372,7 @@ pub mod pallet { pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; - ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + ensure!(ErasToCheckPerBlock::::get() != 0, Error::::CallNotAllowed); let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 5ce010f1c2622fbbefa053bf1bb45383e853af4c..730c4e70935c0be3cb840087011a37ca9668b317 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] blake2 = { version = "0.10.4", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index f4dd92129f38aeb76a1df8afd21494102eb68bd9..302ce327aed4b037432b660cf65b0c9aba5123f6 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 8c0052004ae3d595f3d1f0bef75d92584d3c3ff5..e0bce8a77bdc68c26a9de9b95438b8b06ff4228b 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 4a977880b3150cf9ffa7e2b4032e258483a6d242..5a36101cc2f793c6e1f858b7e0cc2b53d4bf74b2 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -1169,7 +1169,9 @@ pub mod pallet { pub fn set_primary_username(origin: OriginFor, username: Username) -> DispatchResult { // ensure `username` maps to `origin` (i.e. has already been set by an authority). let who = ensure_signed(origin)?; - ensure!(AccountOfUsername::::contains_key(&username), Error::::NoUsername); + let account_of_username = + AccountOfUsername::::get(&username).ok_or(Error::::NoUsername)?; + ensure!(who == account_of_username, Error::::InvalidUsername); let (registration, _maybe_username) = IdentityOf::::get(&who).ok_or(Error::::NoIdentity)?; IdentityOf::::insert(&who, (registration, Some(username.clone()))); diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 0a9464256ce3fbf7bf84d0a463b9c00ad87bef29..60579a23b91b9d51bbe850085ae97b283d83e47f 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ - assert_noop, assert_ok, derive_impl, parameter_types, + assert_err, assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64, Get, OnFinalize, OnInitialize}, BoundedVec, }; @@ -1491,6 +1491,76 @@ fn setting_primary_should_work() { }); } +#[test] +fn must_own_primary() { + new_test_ext().execute_with(|| { + // set up authority + let [authority, _] = unfunded_accounts(); + let suffix: Vec = b"test".to_vec(); + let allocation: u32 = 10; + assert_ok!(Identity::add_username_authority( + RuntimeOrigin::root(), + authority.clone(), + suffix.clone(), + allocation + )); + + // Set up first user ("pi") and a username. + let pi_public = sr25519_generate(0.into(), None); + let pi_account: AccountIdOf = MultiSigner::Sr25519(pi_public).into_account().into(); + let (pi_username, pi_to_sign) = + test_username_of(b"username314159".to_vec(), suffix.clone()); + let encoded_pi_username = Encode::encode(&pi_to_sign.to_vec()); + let pi_signature = MultiSignature::Sr25519( + sr25519_sign(0.into(), &pi_public, &encoded_pi_username).unwrap(), + ); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + pi_account.clone(), + pi_username.clone(), + Some(pi_signature) + )); + + // Set up second user ("e") and a username. + let e_public = sr25519_generate(1.into(), None); + let e_account: AccountIdOf = MultiSigner::Sr25519(e_public).into_account().into(); + let (e_username, e_to_sign) = test_username_of(b"username271828".to_vec(), suffix.clone()); + let encoded_e_username = Encode::encode(&e_to_sign.to_vec()); + let e_signature = MultiSignature::Sr25519( + sr25519_sign(1.into(), &e_public, &encoded_e_username).unwrap(), + ); + assert_ok!(Identity::set_username_for( + RuntimeOrigin::signed(authority.clone()), + e_account.clone(), + e_username.clone(), + Some(e_signature) + )); + + // Ensure that both users have their usernames. + assert_eq!( + AccountOfUsername::::get::<&Username>(&pi_to_sign), + Some(pi_account.clone()) + ); + assert_eq!( + AccountOfUsername::::get::<&Username>(&e_to_sign), + Some(e_account.clone()) + ); + + // Cannot set primary to a username that does not exist. + let (_, c_username) = test_username_of(b"speedoflight".to_vec(), suffix.clone()); + assert_err!( + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), c_username,), + Error::::NoUsername + ); + + // Cannot take someone else's username as your primary. + assert_err!( + Identity::set_primary_username(RuntimeOrigin::signed(pi_account.clone()), e_to_sign,), + Error::::InvalidUsername + ); + }); +} + #[test] fn unaccepted_usernames_should_expire() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index 46b416f0f9a8d72a12aa12f9d8eeb99aaf4c1c2a..78192a81d7b461d0c834662b8bd0e4c50ea31333 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index cc448dc1ae10bcc80a4389348b5969042bc02caf..2aff9a0e26df5cfbd96813c741ea184c6b11a308 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -127,7 +127,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 8684f347270f59b3032a0056ba25bbb133cd3a22..248bae003ed856a828ce82863ad2fadfb7f0a927 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 9f8bf8c3758832d203d088616b4cb5247a116805..87b8d79a7f83b0c6f20ad1b11b99fb9b8dcbe9a8 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -53,7 +53,6 @@ impl frame_system::Config for Test { type Lookup = Indices; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index f4d65d9e560a51afe6211ca2889a95eb864a42f3..c2ec14cb4bc77c5668750684ae983b41ac46e780 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index 5f79704445f964eae544f0c8678f08f86ab85213..be59e5ec8935102bd03782f7c745775c46bacc18 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 6f67db0ae709901f30c2339c40fd6d3399b06606..9f19c40973687a41936372f6e116eb2dc8417306 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 8a301387ae6972768786cb67e6abe3bbdb1b49ef..0d73c567cf4e16babb1488d0ad6e5d43c11639d8 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index e2b40974579e8376723f466198d74755603285d0..a86443f2e0114120cac8b14e85de08ee183eda93 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -260,15 +260,15 @@ pub mod pallet { /// Stateless MMR proof verification for batch of leaves. /// -/// This function can be used to verify received MMR [primitives::Proof] (`proof`) +/// This function can be used to verify received MMR [primitives::LeafProof] (`proof`) /// for given leaves set (`leaves`) against a known MMR root hash (`root`). /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof]. +/// [primitives::LeafProof]. pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::LeafProof, ) -> Result<(), primitives::Error> where H: traits::Hash, @@ -342,7 +342,7 @@ impl, I: 'static> Pallet { pub fn generate_proof( block_numbers: Vec>, best_known_block_number: Option>, - ) -> Result<(Vec>, primitives::Proof>), primitives::Error> { + ) -> Result<(Vec>, primitives::LeafProof>), primitives::Error> { // check whether best_known_block_number provided, else use current best block let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); @@ -362,11 +362,6 @@ impl, I: 'static> Pallet { mmr.generate_proof(leaf_indices) } - /// Return the on-chain MMR root hash. - pub fn mmr_root() -> HashOf { - RootHash::::get() - } - /// Verify MMR proof for given `leaves`. /// /// This method is safe to use within the runtime code. @@ -375,7 +370,7 @@ impl, I: 'static> Pallet { /// or the proof is invalid. pub fn verify_leaves( leaves: Vec>, - proof: primitives::Proof>, + proof: primitives::LeafProof>, ) -> Result<(), primitives::Error> { if proof.leaf_count > NumberOfLeaves::::get() || proof.leaf_count == 0 || @@ -393,4 +388,37 @@ impl, I: 'static> Pallet { Err(primitives::Error::Verify.log_debug("The proof is incorrect.")) } } + + pub fn generate_ancestry_proof( + prev_block_number: BlockNumberFor, + best_known_block_number: Option>, + ) -> Result>, Error> { + // check whether best_known_block_number provided, else use current best block + let best_known_block_number = + best_known_block_number.unwrap_or_else(|| >::block_number()); + + let leaf_count = Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + let prev_leaf_count = Self::block_num_to_leaf_index(prev_block_number)?.saturating_add(1); + + let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); + mmr.generate_ancestry_proof(prev_leaf_count) + } + + pub fn verify_ancestry_proof( + ancestry_proof: primitives::AncestryProof>, + ) -> Result<(), Error> { + let mmr: ModuleMmr = + mmr::Mmr::new(ancestry_proof.leaf_count); + let is_valid = mmr.verify_ancestry_proof(ancestry_proof)?; + if is_valid { + Ok(()) + } else { + Err(Error::Verify.log_debug("The ancestry proof is incorrect.")) + } + } + + /// Return the on-chain MMR root hash. + pub fn mmr_root() -> HashOf { + RootHash::::get() + } } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index aeb3e7ea66414a062a238da3f84795b53da4f68b..5efc172d1e93f1d7a56e3b36da20f93c6e1df1f9 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -23,17 +23,17 @@ use crate::{ primitives::{self, Error, NodeIndex}, Config, HashOf, HashingOf, }; -use sp_mmr_primitives::{mmr_lib, utils::NodesUtils}; +use sp_mmr_primitives::{mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, LeafIndex}; use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof] +/// [primitives::LeafProof] pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::LeafProof, ) -> Result where H: sp_runtime::traits::Hash, @@ -69,7 +69,8 @@ where T: Config, I: 'static, L: primitives::FullLeaf, - Storage: mmr_lib::MMRStore>, + Storage: + MMRStoreReadOps> + mmr_lib::MMRStoreWriteOps>, { mmr: mmr_lib::MMR, Hasher, L>, Storage>, leaves: NodeIndex, @@ -80,7 +81,8 @@ where T: Config, I: 'static, L: primitives::FullLeaf, - Storage: mmr_lib::MMRStore>, + Storage: + MMRStoreReadOps> + mmr_lib::MMRStoreWriteOps>, { /// Create a pointer to an existing MMR with given number of leaves. pub fn new(leaves: NodeIndex) -> Self { @@ -91,11 +93,11 @@ where /// Verify proof for a set of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have /// the same position in both the `leaves` vector and the `leaf_indices` vector contained in the - /// [primitives::Proof] + /// [primitives::LeafProof] pub fn verify_leaves_proof( &self, leaves: Vec, - proof: primitives::Proof>, + proof: primitives::LeafProof>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), @@ -117,6 +119,44 @@ where .map_err(|e| Error::Verify.log_debug(e)) } + pub fn verify_ancestry_proof( + &self, + ancestry_proof: primitives::AncestryProof>, + ) -> Result { + let prev_peaks_proof = + mmr_lib::NodeMerkleProof::, Hasher, L>>::new( + self.mmr.mmr_size(), + ancestry_proof + .items + .into_iter() + .map(|(index, hash)| (index, Node::Hash(hash))) + .collect(), + ); + + let raw_ancestry_proof = mmr_lib::AncestryProof::< + NodeOf, + Hasher, L>, + > { + prev_peaks: ancestry_proof + .prev_peaks + .into_iter() + .map(|hash| Node::Hash(hash)) + .collect(), + prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), + proof: prev_peaks_proof, + }; + + let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::< + NodeOf, + Hasher, L>, + >(raw_ancestry_proof.prev_peaks.clone()) + .map_err(|e| Error::Verify.log_debug(e))?; + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + raw_ancestry_proof + .verify_ancestor(root, prev_root) + .map_err(|e| Error::Verify.log_debug(e)) + } + /// Return the internal size of the MMR (number of nodes). #[cfg(test)] pub fn size(&self) -> NodeIndex { @@ -145,7 +185,7 @@ where /// Commit the changes to underlying storage, return current number of leaves and /// calculate the new MMR's root hash. - pub fn finalize(self) -> Result<(NodeIndex, HashOf), Error> { + pub fn finalize(mut self) -> Result<(NodeIndex, HashOf), Error> { let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; Ok((self.leaves, root.hash())) @@ -166,7 +206,7 @@ where pub fn generate_proof( &self, leaf_indices: Vec, - ) -> Result<(Vec, primitives::Proof>), Error> { + ) -> Result<(Vec, primitives::LeafProof>), Error> { let positions = leaf_indices .iter() .map(|index| mmr_lib::leaf_index_to_pos(*index)) @@ -174,7 +214,7 @@ where let store = >::default(); let leaves = positions .iter() - .map(|pos| match mmr_lib::MMRStore::get_elem(&store, *pos) { + .map(|pos| match store.get_elem(*pos) { Ok(Some(Node::Data(leaf))) => Ok(leaf), e => Err(Error::LeafNotFound.log_debug(e)), }) @@ -184,11 +224,34 @@ where self.mmr .gen_proof(positions) .map_err(|e| Error::GenerateProof.log_error(e)) - .map(|p| primitives::Proof { + .map(|p| primitives::LeafProof { leaf_indices, leaf_count, items: p.proof_items().iter().map(|x| x.hash()).collect(), }) .map(|p| (leaves, p)) } + + pub fn generate_ancestry_proof( + &self, + prev_leaf_count: LeafIndex, + ) -> Result>, Error> { + let prev_mmr_size = NodesUtils::new(prev_leaf_count).size(); + let raw_ancestry_proof = self + .mmr + .gen_ancestry_proof(prev_mmr_size) + .map_err(|e| Error::GenerateProof.log_error(e))?; + + Ok(primitives::AncestryProof { + prev_peaks: raw_ancestry_proof.prev_peaks.into_iter().map(|p| p.hash()).collect(), + prev_leaf_count, + leaf_count: self.leaves, + items: raw_ancestry_proof + .proof + .proof_items() + .iter() + .map(|(index, item)| (*index, item.hash())) + .collect(), + }) + } } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index f2acc35a137ffb46a39396762eafe30d7296e017..6848b8f1b9906b85bfc7c3b9ca9d5a52b4ddaed8 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -60,7 +60,7 @@ impl Default for Storage { } } -impl mmr_lib::MMRStore> for Storage +impl mmr_lib::MMRStoreReadOps> for Storage where T: Config, I: 'static, @@ -98,13 +98,20 @@ where Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &temp_key) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } +} +impl mmr_lib::MMRStoreWriteOps> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf + codec::Decode, +{ fn append(&mut self, _: NodeIndex, _: Vec>) -> mmr_lib::Result<()> { panic!("MMR must not be altered in the off-chain context.") } } -impl mmr_lib::MMRStore> for Storage +impl mmr_lib::MMRStoreReadOps> for Storage where T: Config, I: 'static, @@ -113,7 +120,14 @@ where fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { Ok(Nodes::::get(pos).map(Node::Hash)) } +} +impl mmr_lib::MMRStoreWriteOps> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { if elems.is_empty() { return Ok(()) diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index 88de7511c9f280b6996419f07ac1f5c458d8a395..f8cfcb4e2c286f949207826990ba715485247ca2 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -22,7 +22,7 @@ use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, Proof}; +use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, LeafProof}; use sp_runtime::BuildStorage; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { @@ -283,7 +283,7 @@ fn should_generate_proofs_correctly() { proofs[0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { + LeafProof { leaf_indices: vec![0], leaf_count: 7, items: vec![ @@ -298,7 +298,7 @@ fn should_generate_proofs_correctly() { historical_proofs[0][0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + LeafProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } ) ); @@ -314,7 +314,7 @@ fn should_generate_proofs_correctly() { proofs[2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 7, items: vec![ @@ -334,7 +334,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][0], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 3, items: vec![hex( @@ -354,7 +354,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 5, items: vec![ @@ -372,7 +372,7 @@ fn should_generate_proofs_correctly() { ( // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { + LeafProof { leaf_indices: vec![4], leaf_count: 7, items: vec![ @@ -387,7 +387,7 @@ fn should_generate_proofs_correctly() { historical_proofs[4][0], ( vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { + LeafProof { leaf_indices: vec![4], leaf_count: 5, items: vec![hex( @@ -402,7 +402,7 @@ fn should_generate_proofs_correctly() { proofs[6], ( vec![Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),))], - Proof { + LeafProof { leaf_indices: vec![6], leaf_count: 7, items: vec![ @@ -433,7 +433,7 @@ fn should_generate_batch_proof_correctly() { // then assert_eq!( proof, - Proof { + LeafProof { // the leaf indices are equivalent to the above specified block numbers - 1. leaf_indices: vec![0, 4, 5], leaf_count: 7, @@ -451,7 +451,7 @@ fn should_generate_batch_proof_correctly() { // then assert_eq!( historical_proof, - Proof { + LeafProof { leaf_indices: vec![0, 4, 5], leaf_count: 6, items: vec![ @@ -516,43 +516,40 @@ fn should_verify() { }); } -#[test] -fn should_verify_batch_proofs() { - fn generate_and_verify_batch_proof( - ext: &mut sp_io::TestExternalities, - block_numbers: &Vec, - blocks_to_add: usize, - ) { - let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() - }); +fn generate_and_verify_batch_proof( + ext: &mut sp_io::TestExternalities, + block_numbers: &Vec, + blocks_to_add: usize, +) { + let (leaves, proof) = ext.execute_with(|| { + crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() + }); - let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); - let min_block_number = block_numbers.iter().max().unwrap(); + let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); + let min_block_number = block_numbers.iter().max().unwrap(); - // generate all possible historical proofs for the given blocks - let historical_proofs = (*min_block_number..=max_block_number) - .map(|best_block| { - ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) - .unwrap() - }) + // generate all possible historical proofs for the given blocks + let historical_proofs = (*min_block_number..=max_block_number) + .map(|best_block| { + ext.execute_with(|| { + crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) + .unwrap() }) - .collect::>(); - - ext.execute_with(|| { - add_blocks(blocks_to_add); - // then - assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); - historical_proofs.iter().for_each(|(leaves, proof)| { - assert_eq!( - crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), - Ok(()) - ); - }); }) - } + .collect::>(); + + ext.execute_with(|| { + add_blocks(blocks_to_add); + // then + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + historical_proofs.iter().for_each(|(leaves, proof)| { + assert_eq!(crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), Ok(())); + }); + }) +} +#[test] +fn should_verify_batch_proofs() { let _ = env_logger::try_init(); use itertools::Itertools; @@ -790,3 +787,24 @@ fn does_not_panic_when_generating_historical_proofs() { ); }); } + +#[test] +fn generating_and_verifying_ancestry_proofs_works_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + ext.execute_with(|| add_blocks(500)); + ext.persist_offchain_overlay(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + // Check that generating and verifying ancestry proofs works correctly + // for each previous block + for prev_block_number in 1..501 { + let proof = Pallet::::generate_ancestry_proof(prev_block_number, None).unwrap(); + Pallet::::verify_ancestry_proof(proof).unwrap(); + } + + // Check that we can't generate ancestry proofs for a future block. + assert_eq!(Pallet::::generate_ancestry_proof(501, None), Err(Error::GenerateProof)); + }); +} diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index f263c41831beab8f77aa04522e6cec40d75af703..e44cbeb1550ccfdb0daf8e871e924df261a59227 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet to queue and process messages" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..13d4bd0c2ea909700dda8d1ec621892c4a728724 --- /dev/null +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "frame-metadata-hash-extension" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME signed extension for verifying the metadata hash" + +[dependencies] +array-bytes = "6.2.2" +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } +sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +log = { workspace = true, default-features = false } +docify = "0.2.8" + +[dev-dependencies] +substrate-wasm-builder = { path = "../../utils/wasm-builder", features = ["metadata-hash"] } +substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-api = { path = "../../primitives/api" } +sp-transaction-pool = { path = "../../primitives/transaction-pool" } +merkleized-metadata = "0.1.0" +frame-metadata = { version = "16.0.0", features = ["current"] } +sp-tracing = { path = "../../primitives/tracing" } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/substrate/frame/metadata-hash-extension/src/lib.rs b/substrate/frame/metadata-hash-extension/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d09acbfb3df22e6c3e6706019ede1f16621ef767 --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/lib.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! The [`CheckMetadataHash`] signed extension. +//! +//! The extension for optionally checking the metadata hash. For information how it works and what +//! it does exactly, see the docs of [`CheckMetadataHash`]. +//! +//! # Integration +//! +//! As any signed extension you will need to add it to your runtime signed extensions: +#![doc = docify::embed!("src/tests.rs", add_metadata_hash_extension)] +//! As the extension requires the `RUNTIME_METADATA_HASH` environment variable to be present at +//! compile time, it requires a little bit more setup. To have this environment variable available +//! at compile time required to tell the `substrate-wasm-builder` to do so: +#![doc = docify::embed!("src/tests.rs", enable_metadata_hash_in_wasm_builder)] +//! As generating the metadata hash requires to compile the runtime twice, it is +//! recommended to only enable the metadata hash generation when doing a build for a release or when +//! you want to test this feature. + +extern crate alloc; +/// For our tests +extern crate self as frame_metadata_hash_extension; + +use codec::{Decode, Encode}; +use frame_support::DebugNoBound; +use frame_system::Config; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, SignedExtension}, + transaction_validity::{TransactionValidityError, UnknownTransaction}, +}; + +#[cfg(test)] +mod tests; + +/// The mode of [`CheckMetadataHash`]. +#[derive(Decode, Encode, PartialEq, Debug, TypeInfo, Clone, Copy, Eq)] +enum Mode { + Disabled, + Enabled, +} + +/// Wrapper around the metadata hash and from where to get it from. +#[derive(Default, Debug, PartialEq, Clone, Copy, Eq)] +enum MetadataHash { + /// Fetch it from the `RUNTIME_METADATA_HASH` env variable at compile time. + #[default] + FetchFromEnv, + /// Use the given metadata hash. + Custom([u8; 32]), +} + +impl MetadataHash { + /// Returns the metadata hash. + fn hash(&self) -> Option<[u8; 32]> { + match self { + Self::FetchFromEnv => + option_env!("RUNTIME_METADATA_HASH").map(array_bytes::hex2array_unchecked), + Self::Custom(hash) => Some(*hash), + } + } +} + +/// Extension for optionally verifying the metadata hash. +/// +/// The metadata hash is cryptographical representation of the runtime metadata. This metadata hash +/// is build as described in [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// This metadata hash should give users the confidence that what they build with an online wallet +/// is the same they are signing with their offline wallet and then applying on chain. To ensure +/// that the online wallet is not tricking the offline wallet into decoding and showing an incorrect +/// extrinsic, the offline wallet will include the metadata hash into the additional signed data and +/// the runtime will then do the same. If the metadata hash doesn't match, the signature +/// verification will fail and thus, the transaction will be rejected. The RFC contains more details +/// on how it works. +/// +/// The extension adds one byte (the `mode`) to the size of the extrinsic. This one byte is +/// controlling if the metadata hash should be added to the signed data or not. Mode `0` means that +/// the metadata hash is not added and thus, `None` is added to the signed data. Mode `1` means that +/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the signed data. Further +/// values of `mode` are reserved for future changes. +/// +/// The metadata hash is read from the environment variable `RUNTIME_METADATA_HASH`. This +/// environment variable is for example set by the `substrate-wasm-builder` when the feature for +/// generating the metadata hash is enabled. If the environment variable is not set and `mode = 1` +/// is passed, the transaction is rejected with [`UnknownTransaction::CannotLookup`]. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, DebugNoBound)] +#[scale_info(skip_type_params(T))] +pub struct CheckMetadataHash { + _phantom: core::marker::PhantomData, + mode: Mode, + #[codec(skip)] + metadata_hash: MetadataHash, +} + +impl CheckMetadataHash { + /// Creates new `SignedExtension` to check metadata hash. + pub fn new(enable: bool) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: if enable { Mode::Enabled } else { Mode::Disabled }, + metadata_hash: MetadataHash::FetchFromEnv, + } + } + + /// Create an instance that uses the given `metadata_hash`. + /// + /// This is useful for testing the extension. + pub fn new_with_custom_hash(metadata_hash: [u8; 32]) -> Self { + Self { + _phantom: core::marker::PhantomData, + mode: Mode::Enabled, + metadata_hash: MetadataHash::Custom(metadata_hash), + } + } +} + +impl SignedExtension for CheckMetadataHash { + type AccountId = T::AccountId; + type Call = ::RuntimeCall; + type AdditionalSigned = Option<[u8; 32]>; + type Pre = (); + const IDENTIFIER: &'static str = "CheckMetadataHash"; + + fn additional_signed(&self) -> Result { + let signed = match self.mode { + Mode::Disabled => None, + Mode::Enabled => match self.metadata_hash.hash() { + Some(hash) => Some(hash), + None => return Err(UnknownTransaction::CannotLookup.into()), + }, + }; + + log::debug!( + target: "runtime::metadata-hash", + "CheckMetadataHash::additional_signed => {:?}", + signed.as_ref().map(|h| array_bytes::bytes2hex("0x", h)), + ); + + Ok(signed) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info, len).map(|_| ()) + } +} diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..f13eecfd94bfb5f78224e7dc40f2b08e1ce7c034 --- /dev/null +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::CheckMetadataHash; +use codec::{Decode, Encode}; +use frame_metadata::RuntimeMetadataPrefixed; +use frame_support::{ + derive_impl, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, +}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sp_api::{Metadata, ProvideRuntimeApi}; +use sp_runtime::{ + traits::{Extrinsic as _, SignedExtension}, + transaction_validity::{TransactionSource, UnknownTransaction}, +}; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{self, ExtrinsicBuilder}, + DefaultTestClientBuilderExt, TestClientBuilder, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime! { + pub enum Test { + System: frame_system, + } +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +#[test] +fn rejects_when_no_metadata_hash_was_passed() { + let ext = CheckMetadataHash::::decode(&mut &1u8.encode()[..]).unwrap(); + assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.additional_signed()); +} + +#[test] +fn rejects_unknown_mode() { + assert!(CheckMetadataHash::::decode(&mut &50u8.encode()[..]).is_err()); +} + +/// Generate the metadata hash for the `test-runtime`. +fn generate_metadata_hash(metadata: RuntimeMetadataPrefixed) -> [u8; 32] { + let runtime_version = runtime::VERSION; + let base58_prefix = 0; + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: 10, + token_symbol: "TOKEN".into(), + }; + + generate_metadata_digest(&metadata.1, extra_info).unwrap().hash() +} + +#[test] +fn ensure_check_metadata_works_on_real_extrinsics() { + sp_tracing::try_init_simple(); + + let client = TestClientBuilder::new().build(); + let runtime_api = client.runtime_api(); + let best_hash = client.chain_info().best_hash; + + let metadata = RuntimeMetadataPrefixed::decode( + &mut &runtime_api.metadata_at_version(best_hash, 15).unwrap().unwrap()[..], + ) + .unwrap(); + + let valid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash(generate_metadata_hash(metadata)) + .build(); + // Ensure that the transaction is signed. + assert!(valid_transaction.is_signed().unwrap()); + + runtime_api + .validate_transaction(best_hash, TransactionSource::External, valid_transaction, best_hash) + .unwrap() + .unwrap(); + + // Including some random metadata hash should make the transaction invalid. + let invalid_transaction = ExtrinsicBuilder::new_include_data(vec![1, 2, 3]) + .metadata_hash([10u8; 32]) + .build(); + // Ensure that the transaction is signed. + assert!(invalid_transaction.is_signed().unwrap()); + + assert_eq!( + TransactionValidityError::from(InvalidTransaction::BadProof), + runtime_api + .validate_transaction( + best_hash, + TransactionSource::External, + invalid_transaction, + best_hash + ) + .unwrap() + .unwrap_err() + ); +} + +#[allow(unused)] +mod docs { + use super::*; + + #[docify::export] + mod add_metadata_hash_extension { + frame_support::construct_runtime! { + pub enum Runtime { + System: frame_system, + } + } + + /// The `SignedExtension` to the basic transaction logic. + pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + // Add the `CheckMetadataHash` extension. + // The position in this list is not important, so we could also add it to beginning. + frame_metadata_hash_extension::CheckMetadataHash, + ); + + /// In your runtime this will be your real address type. + type Address = (); + /// In your runtime this will be your real signature type. + type Signature = (); + + /// Unchecked extrinsic type as expected by this runtime. + pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; + } + + // Put here to not have it in the docs as well. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for add_metadata_hash_extension::Runtime { + type Block = Block; + type RuntimeEvent = add_metadata_hash_extension::RuntimeEvent; + type RuntimeOrigin = add_metadata_hash_extension::RuntimeOrigin; + type RuntimeCall = add_metadata_hash_extension::RuntimeCall; + type PalletInfo = add_metadata_hash_extension::PalletInfo; + } + + #[docify::export] + fn enable_metadata_hash_in_wasm_builder() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + // Requires the `metadata-hash` feature to be activated. + // You need to pass the main token symbol and its number of decimals. + .enable_metadata_hash("TOKEN", 12) + // The runtime will be build twice and the second time the `RUNTIME_METADATA_HASH` + // environment variable will be set for the `CheckMetadataHash` extension. + .build() + } +} diff --git a/substrate/frame/migrations/Cargo.toml b/substrate/frame/migrations/Cargo.toml index 4726ac5c521e7395a29d374ab195a4a5a2147ff5..69e910a4e4f6e56b2b848f9b0f402da40d73f77d 100644 --- a/substrate/frame/migrations/Cargo.toml +++ b/substrate/frame/migrations/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" impl-trait-for-tuples = "0.2.2" log = "0.4.21" diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 964d6acb889ad0e02b900efb0264f761c5afbeb7..44a567d668fb3c3452b4492831b0be66595a7c05 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } frame-benchmarking = { default-features = false, optional = true, path = "../benchmarking" } frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index 2437acbc2e231b8d9c12fef4e2089e0110193be4..649a7100325f931e940760688710f88a1fbb6154 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index 0d73e3db66153f6c5fe16dd4974567c3584a2372..cfdd33f7dfcc3d3f5a49c302c0784718a70dfaca 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -41,7 +41,6 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = ConstU32<250>; type AccountData = pallet_balances::AccountData; // This pallet wishes to overwrite this. type BaseCallFilter = TestBaseCallFilter; diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index b5a929468f7dc734ac3efb394b8a99475bb00a41..e2a7e34c637b430dbda56f1690938c1cde2b100e 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 4f818ea3e08c52e741124f42463f3b9ecc3ae9c9..5c5c011c94ea27467e5b8bbd18fda890a181acf2 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } enumflags2 = { version = "0.7.7" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 84cbd1f51c98c673df2f9084d34fad77ec69dd54..6bee98fb51e0c908dfb70d28891e91cb55a7c6ea 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } pallet-nfts = { path = "..", default-features = false } sp-api = { path = "../../../primitives/api", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index d0ba74a9273152dcac8e541ee8be8a41d1a5b584..1e3a0609c46bbfc40b044eae839f9bd0303e0178 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index 63376163cdcb78baa03a266189d3e52ee13466a7..17ed16d2623368826f1ad93c6b4f5032775319cf 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index eddcc8e4e1ddb9a51fcbec8dd1c922d20c43833c..bf4e01a3184793d638b1c87f5984a517a18e2ceb 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index 4985d7acbec92f8258f32338bc348981922c53a9..3f9463a9c429b93da7cf5945a6ebd2ac46077196 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # parity -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } # FRAME @@ -27,6 +27,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-bags-list = { path = "../../bags-list", default-features = false } pallet-staking = { path = "../../staking", default-features = false } +pallet-delegated-staking = { path = "../../delegated-staking", default-features = false } pallet-nomination-pools = { path = "..", default-features = false } # Substrate Primitives @@ -53,6 +54,7 @@ std = [ "frame-system/std", "pallet-bags-list/std", "pallet-balances/std", + "pallet-delegated-staking/std", "pallet-nomination-pools/std", "pallet-staking/std", "pallet-timestamp/std", @@ -72,6 +74,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", "pallet-nomination-pools/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs index 277060e7f640f962bd112a6f337339ec1cf2559e..b8c978945e9ee18657e5307c0aebd1d4aaf742fd 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -23,22 +23,24 @@ use frame_support::{ assert_ok, ensure, traits::{ fungible::{Inspect, Mutate, Unbalanced}, - Get, + tokens::Preservation, + Get, Imbalance, }, }; use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ + adapter::{Member, Pool, StakeStrategy, StakeStrategyType}, BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, - Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, + Pallet as Pools, PoolId, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; use pallet_staking::MaxNominationsOf; use sp_runtime::{ traits::{Bounded, StaticLookup, Zero}, Perbill, }; -use sp_staking::{EraIndex, StakingInterface}; +use sp_staking::EraIndex; use sp_std::{vec, vec::Vec}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -101,18 +103,50 @@ fn create_pool_account( let pool_account = pallet_nomination_pools::BondedPools::::iter() .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) - .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) + .map(|(pool_id, _)| Pools::::generate_bonded_account(pool_id)) .expect("pool_creator created a pool above"); (pool_creator, pool_account) } +fn migrate_to_transfer_stake(pool_id: PoolId) { + if T::StakeAdapter::strategy_type() == StakeStrategyType::Transfer { + // should already be in the correct strategy + return; + } + let pool_acc = Pools::::generate_bonded_account(pool_id); + // drop the agent and its associated delegators . + T::StakeAdapter::remove_as_agent(Pool::from(pool_acc.clone())); + + // tranfer funds from all members to the pool account. + PoolMembers::::iter() + .filter(|(_, member)| member.pool_id == pool_id) + .for_each(|(member_acc, member)| { + let member_balance = member.total_balance(); + ::Currency::transfer( + &member_acc, + &pool_acc, + member_balance, + Preservation::Preserve, + ) + .expect("member should have enough balance to transfer"); + }); +} + fn vote_to_balance( vote: u64, ) -> Result, &'static str> { vote.try_into().map_err(|_| "could not convert u64 to Balance") } +/// `assertion` should strictly be true if the adapter is using `Delegate` strategy and strictly +/// false if the adapter is not using `Delegate` strategy. +fn assert_if_delegate(assertion: bool) { + let legacy_adapter_used = T::StakeAdapter::strategy_type() != StakeStrategyType::Delegate; + // one and only one of the two should be true. + assert!(assertion ^ legacy_adapter_used); +} + #[allow(unused)] struct ListScenario { /// Stash/Controller that is expected to be moved. @@ -151,8 +185,8 @@ impl ListScenario { let (pool_creator1, pool_origin1) = create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate( - &pool_origin1, + T::StakeAdapter::nominate( + Pool::from(pool_origin1.clone()), // NOTE: these don't really need to be validators. vec![account("random_validator", 0, USER_SEED)], )?; @@ -160,8 +194,8 @@ impl ListScenario { let (_, pool_origin2) = create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate( - &pool_origin2, + T::StakeAdapter::nominate( + Pool::from(pool_origin2.clone()), vec![account("random_validator", 0, USER_SEED)].clone(), )?; @@ -178,7 +212,10 @@ impl ListScenario { let (_, pool_dest1) = create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); - T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + T::StakeAdapter::nominate( + Pool::from(pool_dest1.clone()), + vec![account("random_validator", 0, USER_SEED)], + )?; let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); @@ -204,11 +241,12 @@ impl ListScenario { self.origin1_member = Some(joiner.clone()); CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); - let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); + let original_bonded = T::StakeAdapter::active_stake(Pool::from(self.origin1.clone())); // Unbond `amount` from the underlying pool account so when the member joins // we will maintain `current_bonded`. - T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); + T::StakeAdapter::unbond(Pool::from(self.origin1.clone()), amount) + .expect("the pool was created in `Self::new`."); // Account pool points for the unbonded balance. BondedPools::::mutate(&1, |maybe_pool| { @@ -231,13 +269,20 @@ impl ListScenario { } frame_benchmarking::benchmarks! { + where_clause { + where + T: pallet_staking::Config, + pallet_staking::BalanceOf: From, + BalanceOf: Into, + } + join { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakeAdapter::active_stake(Pool::from(scenario.origin1.clone())), origin_weight ); @@ -252,7 +297,7 @@ frame_benchmarking::benchmarks! { verify { assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)), scenario.dest_weight ); } @@ -267,7 +312,7 @@ frame_benchmarking::benchmarks! { }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight ); } @@ -283,7 +328,7 @@ frame_benchmarking::benchmarks! { let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); // transfer exactly `extra` to the depositor of the src pool (1), - let reward_account1 = Pools::::create_reward_account(1); + let reward_account1 = Pools::::generate_reward_account(1); assert!(extra >= CurrencyOf::::minimum_balance()); let _ = CurrencyOf::::mint_into(&reward_account1, extra); @@ -291,7 +336,7 @@ frame_benchmarking::benchmarks! { verify { // commission of 50% deducted here. assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= + T::StakeAdapter::active_stake(Pool::from(scenario.origin1)) >= scenario.dest_weight / 2u32.into() ); } @@ -302,7 +347,7 @@ frame_benchmarking::benchmarks! { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); // Send funds to the reward account of the pool CurrencyOf::::set_balance(&reward_account, ed + origin_weight); @@ -345,7 +390,7 @@ frame_benchmarking::benchmarks! { whitelist_account!(member_id); }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) verify { - let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); + let bonded_after = T::StakeAdapter::active_stake(Pool::from(scenario.origin1)); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( @@ -354,7 +399,7 @@ frame_benchmarking::benchmarks! { .unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::Staking::bonding_duration()] + vec![0 + T::StakeAdapter::bonding_duration()] ); assert_eq!( member.unbonding_eras.values().cloned().collect::>(), @@ -376,7 +421,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -386,7 +431,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -419,7 +464,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); @@ -430,7 +475,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -470,18 +515,18 @@ frame_benchmarking::benchmarks! { // here to ensure the complete flow for destroying a pool works - the reward pool account // should never exist by time the depositor withdraws so we test that it gets cleaned // up when unbonding. - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), Zero::zero() ); assert_eq!( - CurrencyOf::::balance(&pool_account), - min_create_bond + T::StakeAdapter::total_balance(Pool::from(pool_account.clone())), + Some(min_create_bond) ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -522,8 +567,8 @@ frame_benchmarking::benchmarks! { let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond - CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); - + // it needs to transfer min balance to reward account as well so give additional min balance. + CurrencyOf::::set_balance(&depositor, min_create_bond + CurrencyOf::::minimum_balance() * 2u32.into()); // Make sure no Pools exist at a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); @@ -556,8 +601,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakeAdapter::active_stake(Pool::from(Pools::::generate_bonded_account(1))), + min_create_bond ); } @@ -596,8 +641,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) + T::StakeAdapter::active_stake(Pool::from(Pools::::generate_bonded_account(1))), + min_create_bond ); } @@ -681,13 +726,13 @@ frame_benchmarking::benchmarks! { .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); + assert_ok!(T::StakeAdapter::nominate(Pool::from(pool_account.clone()), validators)); + assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_some()); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); + assert!(T::StakeAdapter::nominations(Pool::from(pool_account.clone())).is_none()); } set_commission { @@ -786,7 +831,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), + T::StakeAdapter::active_stake(Pool::from(pool_account.clone())), min_create_bond + min_join_bond ); }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) @@ -800,7 +845,7 @@ frame_benchmarking::benchmarks! { let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); + let reward_account = Pools::::generate_reward_account(1); CurrencyOf::::set_balance(&reward_account, ed + origin_weight); // member claims a payout to make some commission available. @@ -829,7 +874,7 @@ frame_benchmarking::benchmarks! { let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. - let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); + let _ = Pools::::unfreeze_pool_deposit(&Pools::::generate_reward_account(1)); assert!(&Pools::::check_ed_imbalance().is_err()); whitelist_account!(depositor); @@ -838,6 +883,141 @@ frame_benchmarking::benchmarks! { assert!(&Pools::::check_ed_imbalance().is_ok()); } + apply_slash { + // Note: With older `TransferStake` strategy, slashing is greedy and apply_slash should + // always fail. + + // We want to fill member's unbonding pools. So let's bond with big enough amount. + let deposit_amount = Pools::::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // verify user balance in the pool. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + // verify delegated balance. + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + + // ugly type conversion between balances of pallet staking and pools (which really are same + // type). Maybe there is a better way? + let slash_amount: u128 = deposit_amount.into()/2; + + // slash pool by half + pallet_staking::slashing::do_slash::( + &pool_account, + slash_amount.into(), + &mut pallet_staking::BalanceOf::::zero(), + &mut pallet_staking::NegativeImbalanceOf::::zero(), + EraIndex::zero() + ); + + // verify user balance is slashed in the pool. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + // verify delegated balance are not yet slashed. + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + + // Fill member's sub pools for the worst case. + for i in 1..(T::MaxUnbonding::get() + 1) { + pallet_staking::CurrentEra::::put(i); + assert!(Pools::::unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone(), Pools::::depositor_min_bond()).is_ok()); + } + + pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 2); + + let slash_reporter = create_funded_user_with_balance::("slasher", 0, CurrencyOf::::minimum_balance()); + whitelist_account!(depositor); + }: + { + assert_if_delegate::(Pools::::apply_slash(RuntimeOrigin::Signed(slash_reporter.clone()).into(), depositor_lookup.clone()).is_ok()); + } + verify { + // verify balances are correct and slash applied. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount/2u32.into()); + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount/2u32.into())); + } + + apply_slash_fail { + // Bench the scenario where pool has some unapplied slash but the member does not have any + // slash to be applied. + let deposit_amount = Pools::::depositor_min_bond() * 10u32.into(); + // Create pool. + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + + // slash pool by half + let slash_amount: u128 = deposit_amount.into()/2; + pallet_staking::slashing::do_slash::( + &pool_account, + slash_amount.into(), + &mut pallet_staking::BalanceOf::::zero(), + &mut pallet_staking::NegativeImbalanceOf::::zero(), + EraIndex::zero() + ); + + pallet_staking::CurrentEra::::put(1); + + // new member joins the pool who should not be affected by slash. + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let join_amount = min_join_bond * T::MaxUnbonding::get().into() * 2u32.into(); + let joiner = create_funded_user_with_balance::("joiner", 0, join_amount * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + assert!(Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), join_amount, 1).is_ok()); + + // Fill member's sub pools for the worst case. + for i in 0..T::MaxUnbonding::get() { + pallet_staking::CurrentEra::::put(i + 2); // +2 because we already set the current era to 1. + assert!(Pools::::unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone(), min_join_bond).is_ok()); + } + + pallet_staking::CurrentEra::::put(T::MaxUnbonding::get() + 3); + whitelist_account!(joiner); + + }: { + // Since the StakeAdapter can be different based on the runtime config, the errors could be different as well. + assert!(Pools::::apply_slash(RuntimeOrigin::Signed(joiner.clone()).into(), joiner_lookup.clone()).is_err()); + } + + + pool_migrate { + // create a pool. + let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + + // migrate pool to transfer stake. + let _ = migrate_to_transfer_stake::(1); + }: { + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); + } + verify { + // this queries agent balance if `DelegateStake` strategy. + assert!(T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) == Some(deposit_amount)); + } + + migrate_delegation { + // create a pool. + let deposit_amount = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, deposit_amount, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // migrate pool to transfer stake. + let _ = migrate_to_transfer_stake::(1); + + // Now migrate pool to delegate stake keeping delegators unmigrated. + assert_if_delegate::(Pools::::migrate_pool_to_delegate_stake(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into()).is_ok()); + + // delegation does not exist. + assert!(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())).is_none()); + // contribution exists in the pool. + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + + whitelist_account!(depositor); + }: { + assert_if_delegate::(Pools::::migrate_delegation(RuntimeOrigin::Signed(depositor.clone()).into(), depositor_lookup.clone()).is_ok()); + } + verify { + // verify balances once more. + assert_if_delegate::(T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) == Some(deposit_amount)); + assert_eq!(PoolMembers::::get(&depositor).unwrap().total_balance(), deposit_amount); + } + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(), diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index 45e8f1f27e99a52b5900aa5e18c5439d9d7baa5c..910cdf2e3dff6353d90a2ba138c4731b0b020a0f 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -18,6 +18,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. #![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 2752d53a6b9f3364c234d9144a6c40b10b449a5b..def98b4d2945e20c6c3475383e8e526f19b8110d 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -77,7 +77,7 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); type FreezeIdentifier = RuntimeFreezeReason; type MaxFreezes = ConstU32<1>; - type RuntimeHoldReason = (); + type RuntimeHoldReason = RuntimeHoldReason; type RuntimeFreezeReason = (); } @@ -120,7 +120,7 @@ impl pallet_staking::Config for Runtime { type MaxControllersInDeprecationBatch = ConstU32<100>; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; - type EventListeners = Pools; + type EventListeners = (Pools, DelegatedStaking); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; @@ -166,7 +166,8 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; @@ -175,6 +176,20 @@ impl pallet_nomination_pools::Config for Runtime { type AdminOrigin = frame_system::EnsureRoot; } +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + impl crate::Config for Runtime {} type Block = frame_system::mocking::MockBlock; @@ -187,6 +202,7 @@ frame_support::construct_runtime!( Staking: pallet_staking, VoterList: pallet_bags_list::, Pools: pallet_nomination_pools, + DelegatedStaking: pallet_delegated_staking, } ); diff --git a/substrate/frame/nomination-pools/fuzzer/src/call.rs b/substrate/frame/nomination-pools/fuzzer/src/call.rs index 027fb2b69138c64f8903f99737187b7cd65c12c2..9e10d87da6750e4171d2736eb6fb7dcbc7324640 100644 --- a/substrate/frame/nomination-pools/fuzzer/src/call.rs +++ b/substrate/frame/nomination-pools/fuzzer/src/call.rs @@ -306,7 +306,7 @@ fn main() { BondedPools::::iter().for_each(|(id, _)| { let amount = random_ed_multiple(&mut rng); let _ = - Balances::deposit_creating(&Pools::create_reward_account(id), amount); + Balances::deposit_creating(&Pools::generate_reward_account(id), amount); // if we just paid out the reward agent, let's calculate how much we expect // our reward agent to have earned. if reward_agent.pool_id.map_or(false, |mid| mid == id) { diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index 7828f26fe6fe1378942479f44f40e325da265817..a0ddac9e045675aefe7b8139e0048f3399663da8 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { path = "../../../primitives/api", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } pallet-nomination-pools = { path = "..", default-features = false } diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 881c3c36331b64168eceed8eeebfa5b62b7e5f41..67627e0acb13de6120fdd7f9b0c0cf910e8d6dc3 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -38,5 +38,30 @@ sp_api::decl_runtime_apis! { /// Returns the equivalent points of `new_funds` for a given pool. fn balance_to_points(pool_id: PoolId, new_funds: Balance) -> Balance; + + /// Returns the pending slash for a given pool. + fn pool_pending_slash(pool_id: PoolId) -> Balance; + + /// Returns the pending slash for a given pool member. + fn member_pending_slash(member: AccountId) -> Balance; + + /// Returns true if the pool with `pool_id` needs migration. + /// + /// This can happen when the `pallet-nomination-pools` has switched to using strategy + /// [`DelegateStake`](pallet_nomination_pools::adapter::DelegateStake) but the pool + /// still has funds that were staked using the older strategy + /// [TransferStake](pallet_nomination_pools::adapter::TransferStake). Use + /// [`migrate_pool_to_delegate_stake`](pallet_nomination_pools::Call::migrate_pool_to_delegate_stake) + /// to migrate the pool. + fn pool_needs_delegate_migration(pool_id: PoolId) -> bool; + + /// Returns true if the delegated funds of the pool `member` needs migration. + /// + /// Once a pool has successfully migrated to the strategy + /// [`DelegateStake`](pallet_nomination_pools::adapter::DelegateStake), the funds of the + /// member can be migrated from pool account to the member's account. Use + /// [`migrate_delegation`](pallet_nomination_pools::Call::migrate_delegation) + /// to migrate the funds of the pool member. + fn member_needs_delegate_migration(member: AccountId) -> bool; } } diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs new file mode 100644 index 0000000000000000000000000000000000000000..4809fbc0e9da08f3f8d84f662acec5ce5a4de6cd --- /dev/null +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -0,0 +1,438 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator}; + +/// Types of stake strategies. +/// +/// Useful for determining current staking strategy of a runtime and enforce integrity tests. +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebugNoBound, PartialEq)] +pub enum StakeStrategyType { + /// Member funds are transferred to pool account and staked. + /// + /// This is the older staking strategy used by pools. For a new runtime, it is recommended to + /// use [`StakeStrategyType::Delegate`] strategy instead. + Transfer, + /// Member funds are delegated to pool account and staked. + Delegate, +} + +/// A type that only belongs in context of a pool. +/// +/// Maps directly [`Agent`] account. +#[derive(Clone, Debug)] +pub struct Pool(T); +impl Into> for Pool { + fn into(self) -> Agent { + Agent::from(self.0) + } +} +impl From for Pool { + fn from(acc: T) -> Self { + Pool(acc) + } +} + +impl Pool { + pub fn get(self) -> T { + self.0 + } +} + +/// A type that only belongs in context of a pool member. +/// +/// Maps directly [`Delegator`] account. +#[derive(Clone, Debug)] +pub struct Member(T); +impl Into> for Member { + fn into(self) -> Delegator { + Delegator::from(self.0) + } +} +impl From for Member { + fn from(acc: T) -> Self { + Member(acc) + } +} + +impl Member { + pub fn get(self) -> T { + self.0 + } +} + +/// An adapter trait that can support multiple staking strategies. +/// +/// Depending on which staking strategy we want to use, the staking logic can be slightly +/// different. Refer the two possible strategies currently: [`TransferStake`] and +/// [`DelegateStake`] for more detail. +pub trait StakeStrategy { + type Balance: frame_support::traits::tokens::Balance; + type AccountId: Clone + sp_std::fmt::Debug; + type CoreStaking: StakingInterface; + + /// The type of staking strategy of the current adapter. + fn strategy_type() -> StakeStrategyType; + + /// See [`StakingInterface::bonding_duration`]. + fn bonding_duration() -> EraIndex { + Self::CoreStaking::bonding_duration() + } + + /// See [`StakingInterface::current_era`]. + fn current_era() -> EraIndex { + Self::CoreStaking::current_era() + } + + /// See [`StakingInterface::minimum_nominator_bond`]. + fn minimum_nominator_bond() -> Self::Balance { + Self::CoreStaking::minimum_nominator_bond() + } + + /// Balance that can be transferred from pool account to member. + /// + /// This is part of the pool balance that is not actively staked. That is, tokens that are + /// in unbonding period or unbonded. + fn transferable_balance(pool_account: Pool) -> Self::Balance; + + /// Total balance of the pool including amount that is actively staked. + fn total_balance(pool_account: Pool) -> Option; + + /// Amount of tokens delegated by the member. + fn member_delegation_balance(member_account: Member) -> Option; + + /// See [`StakingInterface::active_stake`]. + fn active_stake(pool_account: Pool) -> Self::Balance { + Self::CoreStaking::active_stake(&pool_account.0).unwrap_or_default() + } + + /// See [`StakingInterface::total_stake`]. + fn total_stake(pool_account: Pool) -> Self::Balance { + Self::CoreStaking::total_stake(&pool_account.0).unwrap_or_default() + } + + /// Which strategy the pool account is using. + /// + /// This can be different from the [`Self::strategy_type`] of the adapter if the pool has not + /// migrated to the new strategy yet. + fn pool_strategy(pool_account: Pool) -> StakeStrategyType { + match Self::CoreStaking::is_virtual_staker(&pool_account.0) { + true => StakeStrategyType::Delegate, + false => StakeStrategyType::Transfer, + } + } + + /// See [`StakingInterface::nominate`]. + fn nominate( + pool_account: Pool, + validators: Vec, + ) -> DispatchResult { + Self::CoreStaking::nominate(&pool_account.0, validators) + } + + /// See [`StakingInterface::chill`]. + fn chill(pool_account: Pool) -> DispatchResult { + Self::CoreStaking::chill(&pool_account.0) + } + + /// Pledge `amount` towards `pool_account` and update the pool bond. Also see + /// [`StakingInterface::bond`]. + fn pledge_bond( + who: Member, + pool_account: Pool, + reward_account: &Self::AccountId, + amount: Self::Balance, + bond_type: BondType, + ) -> DispatchResult; + + /// See [`StakingInterface::unbond`]. + fn unbond(pool_account: Pool, amount: Self::Balance) -> DispatchResult { + Self::CoreStaking::unbond(&pool_account.0, amount) + } + + /// See [`StakingInterface::withdraw_unbonded`]. + fn withdraw_unbonded( + pool_account: Pool, + num_slashing_spans: u32, + ) -> Result { + Self::CoreStaking::withdraw_unbonded(pool_account.0, num_slashing_spans) + } + + /// Withdraw funds from pool account to member account. + fn member_withdraw( + who: Member, + pool_account: Pool, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult; + + /// Check if there is any pending slash for the pool. + fn pending_slash(pool_account: Pool) -> Self::Balance; + + /// Slash the member account with `amount` against pending slashes for the pool. + fn member_slash( + who: Member, + pool_account: Pool, + amount: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult; + + /// Migrate pool account from being a direct nominator to a delegated agent. + /// + /// This is useful for migrating a pool account from [`StakeStrategyType::Transfer`] to + /// [`StakeStrategyType::Delegate`]. + fn migrate_nominator_to_agent( + pool_account: Pool, + reward_account: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate member balance from pool account to member account. + /// + /// This is useful for a pool account that migrated from [`StakeStrategyType::Transfer`] to + /// [`StakeStrategyType::Delegate`]. Its members can then migrate their delegated balance + /// back to their account. + /// + /// Internally, the member funds that are locked in the pool account are transferred back and + /// locked in the member account. + fn migrate_delegation( + pool: Pool, + delegator: Member, + value: Self::Balance, + ) -> DispatchResult; + + /// List of validators nominated by the pool account. + #[cfg(feature = "runtime-benchmarks")] + fn nominations(pool_account: Pool) -> Option> { + Self::CoreStaking::nominations(&pool_account.0) + } + + /// Remove the pool account as agent. + /// + /// Useful for migrating pool account from a delegated agent to a direct nominator. Only used + /// in tests and benchmarks. + #[cfg(feature = "runtime-benchmarks")] + fn remove_as_agent(_pool: Pool) { + // noop by default + } +} + +/// A staking strategy implementation that supports transfer based staking. +/// +/// In order to stake, this adapter transfers the funds from the member/delegator account to the +/// pool account and stakes through the pool account on `Staking`. +/// +/// This is the older Staking strategy used by pools. To switch to the newer [`DelegateStake`] +/// strategy in an existing runtime, storage migration is required. See +/// [`migration::unversioned::DelegationStakeMigration`]. For new runtimes, it is highly recommended +/// to use the [`DelegateStake`] strategy. +pub struct TransferStake(PhantomData<(T, Staking)>); + +impl, AccountId = T::AccountId>> + StakeStrategy for TransferStake +{ + type Balance = BalanceOf; + type AccountId = T::AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + StakeStrategyType::Transfer + } + + fn transferable_balance(pool_account: Pool) -> BalanceOf { + T::Currency::balance(&pool_account.0).saturating_sub(Self::active_stake(pool_account)) + } + + fn total_balance(pool_account: Pool) -> Option> { + Some(T::Currency::total_balance(&pool_account.0)) + } + + fn member_delegation_balance( + _member_account: Member, + ) -> Option { + // for transfer stake, no delegation exists. + None + } + + fn pledge_bond( + who: Member, + pool_account: Pool, + reward_account: &Self::AccountId, + amount: BalanceOf, + bond_type: BondType, + ) -> DispatchResult { + match bond_type { + BondType::Create => { + // first bond + T::Currency::transfer(&who.0, &pool_account.0, amount, Preservation::Expendable)?; + Staking::bond(&pool_account.0, amount, &reward_account) + }, + BondType::Extra => { + // additional bond + T::Currency::transfer(&who.0, &pool_account.0, amount, Preservation::Preserve)?; + Staking::bond_extra(&pool_account.0, amount) + }, + } + } + + fn member_withdraw( + who: Member, + pool_account: Pool, + amount: BalanceOf, + _num_slashing_spans: u32, + ) -> DispatchResult { + T::Currency::transfer(&pool_account.0, &who.0, amount, Preservation::Expendable)?; + + Ok(()) + } + + fn pending_slash(_: Pool) -> Self::Balance { + // for transfer stake strategy, slashing is greedy and never deferred. + Zero::zero() + } + + fn member_slash( + _who: Member, + _pool: Pool, + _amount: Staking::Balance, + _maybe_reporter: Option, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } + + fn migrate_nominator_to_agent( + _pool: Pool, + _reward_account: &Self::AccountId, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } + + fn migrate_delegation( + _pool: Pool, + _delegator: Member, + _value: Self::Balance, + ) -> DispatchResult { + Err(Error::::Defensive(DefensiveError::DelegationUnsupported).into()) + } +} + +/// A staking strategy implementation that supports delegation based staking. +/// +/// In this approach, first the funds are delegated from delegator to the pool account and later +/// staked with `Staking`. The advantage of this approach is that the funds are held in the +/// user account itself and not in the pool account. +/// +/// This is the newer staking strategy used by pools. Once switched to this and migrated, ideally +/// the `TransferStake` strategy should not be used. Or a separate migration would be required for +/// it which is not provided by this pallet. +/// +/// Use [`migration::unversioned::DelegationStakeMigration`] to migrate to this strategy. +pub struct DelegateStake( + PhantomData<(T, Staking, Delegation)>, +); + +impl< + T: Config, + Staking: StakingInterface, AccountId = T::AccountId>, + Delegation: DelegationInterface, AccountId = T::AccountId> + + DelegationMigrator, AccountId = T::AccountId>, + > StakeStrategy for DelegateStake +{ + type Balance = BalanceOf; + type AccountId = T::AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + StakeStrategyType::Delegate + } + + fn transferable_balance(pool_account: Pool) -> BalanceOf { + Delegation::agent_balance(pool_account.clone().into()) + // pool should always be an agent. + .defensive_unwrap_or_default() + .saturating_sub(Self::active_stake(pool_account)) + } + + fn total_balance(pool_account: Pool) -> Option> { + Delegation::agent_balance(pool_account.into()) + } + + fn member_delegation_balance(member_account: Member) -> Option> { + Delegation::delegator_balance(member_account.into()) + } + + fn pledge_bond( + who: Member, + pool_account: Pool, + reward_account: &Self::AccountId, + amount: BalanceOf, + bond_type: BondType, + ) -> DispatchResult { + match bond_type { + BondType::Create => { + // first delegation + Delegation::delegate(who.into(), pool_account.into(), reward_account, amount) + }, + BondType::Extra => { + // additional delegation + Delegation::delegate_extra(who.into(), pool_account.into(), amount) + }, + } + } + + fn member_withdraw( + who: Member, + pool_account: Pool, + amount: BalanceOf, + num_slashing_spans: u32, + ) -> DispatchResult { + Delegation::withdraw_delegation(who.into(), pool_account.into(), amount, num_slashing_spans) + } + + fn pending_slash(pool_account: Pool) -> Self::Balance { + Delegation::pending_slash(pool_account.into()).defensive_unwrap_or_default() + } + + fn member_slash( + who: Member, + pool_account: Pool, + amount: BalanceOf, + maybe_reporter: Option, + ) -> DispatchResult { + Delegation::delegator_slash(pool_account.into(), who.into(), amount, maybe_reporter) + } + + fn migrate_nominator_to_agent( + pool: Pool, + reward_account: &Self::AccountId, + ) -> DispatchResult { + Delegation::migrate_nominator_to_agent(pool.into(), reward_account) + } + + fn migrate_delegation( + pool: Pool, + delegator: Member, + value: Self::Balance, + ) -> DispatchResult { + Delegation::migrate_delegation(pool.into(), delegator.into(), value) + } + + #[cfg(feature = "runtime-benchmarks")] + fn remove_as_agent(pool: Pool) { + Delegation::drop_agent(pool.into()) + } +} diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 0fdb7e3eff5ccca340a5b26f00010e0240377e03..2aaea04463661d6421588e791e7fc281cd872813 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -351,6 +351,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use adapter::{Member, Pool, StakeStrategy}; use codec::Codec; use frame_support::{ defensive, defensive_assert, ensure, @@ -397,6 +398,7 @@ pub mod mock; #[cfg(test)] mod tests; +pub mod adapter; pub mod migration; pub mod weights; @@ -425,11 +427,11 @@ pub enum ConfigOp { } /// The type of bonding that can happen to a pool. -enum BondType { +pub enum BondType { /// Someone is bonding into the pool upon creation. Create, /// Someone is adding more funds later to this pool. - Later, + Extra, } /// How to increase the bond of a member. @@ -549,9 +551,19 @@ impl PoolMember { /// Total balance of the member, both active and unbonding. /// Doesn't mutate state. - #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] - fn total_balance(&self) -> BalanceOf { - let pool = BondedPool::::get(self.pool_id).unwrap(); + /// + /// Worst case, iterates over [`TotalUnbondingPools`] member unbonding pools to calculate member + /// balance. + pub fn total_balance(&self) -> BalanceOf { + let pool = match BondedPool::::get(self.pool_id) { + Some(pool) => pool, + None => { + // this internal function is always called with a valid pool id. + defensive!("pool should exist; qed"); + return Zero::zero(); + }, + }; + let active_balance = pool.points_to_balance(self.active_points()); let sub_pools = match SubPoolsStorage::::get(self.pool_id) { @@ -973,12 +985,12 @@ impl BondedPool { /// Get the bonded account id of this pool. fn bonded_account(&self) -> T::AccountId { - Pallet::::create_bonded_account(self.id) + Pallet::::generate_bonded_account(self.id) } /// Get the reward account id of this pool. fn reward_account(&self) -> T::AccountId { - Pallet::::create_reward_account(self.id) + Pallet::::generate_reward_account(self.id) } /// Consume self and put into storage. @@ -995,8 +1007,7 @@ impl BondedPool { /// /// This is often used for bonding and issuing new funds into the pool. fn balance_to_point(&self, new_funds: BalanceOf) -> BalanceOf { - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); Pallet::::balance_to_point(bonded_balance, self.points, new_funds) } @@ -1004,8 +1015,7 @@ impl BondedPool { /// /// This is often used for unbonding. fn points_to_balance(&self, points: BalanceOf) -> BalanceOf { - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); Pallet::::point_to_balance(bonded_balance, self.points, points) } @@ -1052,18 +1062,6 @@ impl BondedPool { self } - /// The pools balance that is transferable provided it is expendable by staking pallet. - fn transferable_balance(&self) -> BalanceOf { - let account = self.bonded_account(); - // Note on why we can't use `Currency::reducible_balance`: Since pooled account has a - // provider (staking pallet), the account can not be set expendable by - // `pallet-nomination-pool`. This means reducible balance always returns balance preserving - // ED in the account. What we want though is transferable balance given the account can be - // dusted. - T::Currency::balance(&account) - .saturating_sub(T::Staking::active_stake(&account).unwrap_or_default()) - } - fn is_root(&self, who: &T::AccountId) -> bool { self.roles.root.as_ref().map_or(false, |root| root == who) } @@ -1127,8 +1125,7 @@ impl BondedPool { fn ok_to_be_open(&self) -> Result<(), DispatchError> { ensure!(!self.is_destroying(), Error::::CanNotChangeState); - let bonded_balance = - T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(self.bonded_account())); ensure!(!bonded_balance.is_zero(), Error::::OverflowRisk); let points_to_balance_ratio_floor = self @@ -1257,28 +1254,17 @@ impl BondedPool { amount: BalanceOf, ty: BondType, ) -> Result, DispatchError> { - // Cache the value - let bonded_account = self.bonded_account(); - T::Currency::transfer( - who, - &bonded_account, - amount, - match ty { - BondType::Create => Preservation::Expendable, - BondType::Later => Preservation::Preserve, - }, - )?; // We must calculate the points issued *before* we bond who's funds, else points:balance // ratio will be wrong. let points_issued = self.issue(amount); - match ty { - BondType::Create => T::Staking::bond(&bonded_account, amount, &self.reward_account())?, - // The pool should always be created in such a way its in a state to bond extra, but if - // the active balance is slashed below the minimum bonded or the account cannot be - // found, we exit early. - BondType::Later => T::Staking::bond_extra(&bonded_account, amount)?, - } + T::StakeAdapter::pledge_bond( + Member::from(who.clone()), + Pool::from(self.bonded_account()), + &self.reward_account(), + amount, + ty, + )?; TotalValueLocked::::mutate(|tvl| { tvl.saturating_accrue(amount); }); @@ -1456,7 +1442,7 @@ impl RewardPool { /// This is sum of all the rewards that are claimable by pool members. fn current_balance(id: PoolId) -> BalanceOf { T::Currency::reducible_balance( - &Pallet::::create_reward_account(id), + &Pallet::::generate_reward_account(id), Preservation::Expendable, Fortitude::Polite, ) @@ -1569,7 +1555,7 @@ impl Get for TotalUnbondingPools { // NOTE: this may be dangerous in the scenario bonding_duration gets decreased because // we would no longer be able to decode `BoundedBTreeMap::, // TotalUnbondingPools>`, which uses `TotalUnbondingPools` as the bound - T::Staking::bonding_duration() + T::PostUnbondingPoolsWindow::get() + T::StakeAdapter::bonding_duration() + T::PostUnbondingPoolsWindow::get() } } @@ -1646,7 +1632,9 @@ pub mod pallet { type U256ToBalance: Convert>; /// The interface for nominating. - type Staking: StakingInterface, AccountId = Self::AccountId>; + /// + /// Note: Switching to a new [`StakeStrategy`] might require a migration of the storage. + type StakeAdapter: StakeStrategy>; /// The amount of eras a `SubPools::with_era` pool can exist before it gets merged into the /// `SubPools::no_era` pool. In other words, this is the amount of eras a member will be @@ -1950,6 +1938,14 @@ pub mod pallet { BondExtraRestricted, /// No imbalance in the ED deposit for the pool. NothingToAdjust, + /// No slash pending that can be applied to the member. + NothingToSlash, + /// The pool or member delegation has already migrated to delegate stake. + AlreadyMigrated, + /// The pool or member delegation has not migrated yet to delegate stake. + NotMigrated, + /// This call is not allowed in the current state of the pallet. + NotSupported, } #[derive(Encode, Decode, PartialEq, TypeInfo, PalletError, RuntimeDebug)] @@ -1965,6 +1961,10 @@ pub mod pallet { /// The bonded account should only be killed by the staking system when the depositor is /// withdrawing BondedStashKilledPrematurely, + /// The delegation feature is unsupported. + DelegationUnsupported, + /// Unable to slash to the member of the pool. + SlashNotApplied, } impl From for Error { @@ -2019,7 +2019,7 @@ pub mod pallet { )?; bonded_pool.try_inc_members()?; - let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Later)?; + let points_issued = bonded_pool.try_bond_funds(&who, amount, BondType::Extra)?; PoolMembers::insert( who.clone(), @@ -2141,12 +2141,12 @@ pub mod pallet { &mut reward_pool, )?; - let current_era = T::Staking::current_era(); - let unbond_era = T::Staking::bonding_duration().saturating_add(current_era); + let current_era = T::StakeAdapter::current_era(); + let unbond_era = T::StakeAdapter::bonding_duration().saturating_add(current_era); // Unbond in the actual underlying nominator. let unbonding_balance = bonded_pool.dissolve(unbonding_points); - T::Staking::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; + T::StakeAdapter::unbond(Pool::from(bonded_pool.bonded_account()), unbonding_balance)?; // Note that we lazily create the unbonding pools here if they don't already exist let mut sub_pools = SubPoolsStorage::::get(member.pool_id) @@ -2209,7 +2209,10 @@ pub mod pallet { // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool // is destroying then `withdraw_unbonded` can be used. ensure!(pool.state != PoolState::Destroying, Error::::NotDestroying); - T::Staking::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; + T::StakeAdapter::withdraw_unbonded( + Pool::from(pool.bonded_account()), + num_slashing_spans, + )?; Ok(()) } @@ -2232,7 +2235,10 @@ pub mod pallet { /// /// # Note /// - /// If the target is the depositor, the pool will be destroyed. + /// - If the target is the depositor, the pool will be destroyed. + /// - If the pool has any pending slash, we also try to slash the member before letting them + /// withdraw. This calculation adds some weight overhead and is only defensive. In reality, + /// pool slashes must have been already applied via permissionless [`Call::apply_slash`]. #[pallet::call_index(5)] #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) @@ -2246,23 +2252,43 @@ pub mod pallet { let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; - let current_era = T::Staking::current_era(); + let current_era = T::StakeAdapter::current_era(); let bonded_pool = BondedPool::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::PoolNotFound.into())?; let mut sub_pools = SubPoolsStorage::::get(member.pool_id).ok_or(Error::::SubPoolsNotFound)?; + let slash_weight = + // apply slash if any before withdraw. + match Self::do_apply_slash(&member_account, None) { + Ok(_) => T::WeightInfo::apply_slash(), + Err(e) => { + let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); + // This is an expected error. We add appropriate fees and continue withdrawal. + if Err(e) == no_pending_slash { + T::WeightInfo::apply_slash_fail() + } else { + // defensive: if we can't apply slash for some reason, we abort. + return Err(Error::::Defensive(DefensiveError::SlashNotApplied).into()); + } + } + + }; + bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; + let pool_account = bonded_pool.bonded_account(); // NOTE: must do this after we have done the `ok_to_withdraw_unbonded_other_with` check. let withdrawn_points = member.withdraw_unlocked(current_era); ensure!(!withdrawn_points.is_empty(), Error::::CannotWithdrawAny); // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the - // `transferrable_balance` is correct. - let stash_killed = - T::Staking::withdraw_unbonded(bonded_pool.bonded_account(), num_slashing_spans)?; + // `transferable_balance` is correct. + let stash_killed = T::StakeAdapter::withdraw_unbonded( + Pool::from(bonded_pool.bonded_account()), + num_slashing_spans, + )?; // defensive-only: the depositor puts enough funds into the stash so that it will only // be destroyed when they are leaving. @@ -2271,6 +2297,20 @@ pub mod pallet { Error::::Defensive(DefensiveError::BondedStashKilledPrematurely) ); + if stash_killed { + // Maybe an extra consumer left on the pool account, if so, remove it. + if frame_system::Pallet::::consumers(&pool_account) == 1 { + frame_system::Pallet::::dec_consumers(&pool_account); + } + + // Note: This is not pretty, but we have to do this because of a bug where old pool + // accounts might have had an extra consumer increment. We know at this point no + // other pallet should depend on pool account so safe to do this. + // Refer to following issues: + // - https://github.com/paritytech/polkadot-sdk/issues/4440 + // - https://github.com/paritytech/polkadot-sdk/issues/2037 + } + let mut sum_unlocked_points: BalanceOf = Zero::zero(); let balance_to_unbond = withdrawn_points .iter() @@ -2295,15 +2335,18 @@ pub mod pallet { // don't exist. This check is also defensive in cases where the unbond pool does not // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in // order to ensure members can leave the pool and it can be destroyed. - .min(bonded_pool.transferable_balance()); - - T::Currency::transfer( - &bonded_pool.bonded_account(), - &member_account, + .min(T::StakeAdapter::transferable_balance(Pool::from( + bonded_pool.bonded_account(), + ))); + + // this can fail if the pool uses `DelegateStake` strategy and the member delegation + // is not claimed yet. See `Call::migrate_delegation()`. + T::StakeAdapter::member_withdraw( + Member::from(member_account.clone()), + Pool::from(bonded_pool.bonded_account()), balance_to_unbond, - Preservation::Expendable, - ) - .defensive()?; + num_slashing_spans, + )?; Self::deposit_event(Event::::Withdrawn { member: member_account.clone(), @@ -2325,20 +2368,20 @@ pub mod pallet { if member_account == bonded_pool.roles.depositor { Pallet::::dissolve_pool(bonded_pool); - None + Weight::default() } else { bonded_pool.dec_members().put(); SubPoolsStorage::::insert(member.pool_id, sub_pools); - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) } } else { // we certainly don't need to delete any pools, because no one is being removed. SubPoolsStorage::::insert(member.pool_id, sub_pools); PoolMembers::::insert(&member_account, member); - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) }; - Ok(post_info_weight.into()) + Ok(Some(post_info_weight.saturating_add(slash_weight)).into()) } /// Create a new delegation pool. @@ -2433,7 +2476,7 @@ pub mod pallet { Error::::MinimumBondNotMet ); - T::Staking::nominate(&bonded_pool.bonded_account(), validators) + T::StakeAdapter::nominate(Pool::from(bonded_pool.bonded_account()), validators) } /// Set a new state for the pool. @@ -2621,12 +2664,12 @@ pub mod pallet { .active_points(); if bonded_pool.points_to_balance(depositor_points) >= - T::Staking::minimum_nominator_bond() + T::StakeAdapter::minimum_nominator_bond() { ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); } - T::Staking::chill(&bonded_pool.bonded_account()) + T::StakeAdapter::chill(Pool::from(bonded_pool.bonded_account())) } /// `origin` bonds funds from `extra` for some pool member `member` into their respective @@ -2823,6 +2866,120 @@ pub mod pallet { Ok(()) } + + /// Apply a pending slash on a member. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This call can be dispatched permissionlessly (i.e. by any account). If the member has + /// slash to be applied, caller may be rewarded with the part of the slash. + #[pallet::call_index(23)] + #[pallet::weight(T::WeightInfo::apply_slash())] + pub fn apply_slash( + origin: OriginFor, + member_account: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let who = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; + Self::do_apply_slash(&member_account, Some(who))?; + + // If successful, refund the fees. + Ok(Pays::No.into()) + } + + /// Migrates delegated funds from the pool account to the `member_account`. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This is a permission-less call and refunds any fee if claim is successful. + /// + /// If the pool has migrated to delegation based staking, the staked tokens of pool members + /// can be moved and held in their own account. See [`adapter::DelegateStake`] + #[pallet::call_index(24)] + #[pallet::weight(T::WeightInfo::migrate_delegation())] + pub fn migrate_delegation( + origin: OriginFor, + member_account: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { + let _caller = ensure_signed(origin)?; + + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let member_account = T::Lookup::lookup(member_account)?; + let member = + PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; + + // ensure pool is migrated. + ensure!( + T::StakeAdapter::pool_strategy(Pool::from(Self::generate_bonded_account( + member.pool_id + ))) == adapter::StakeStrategyType::Delegate, + Error::::NotMigrated + ); + + let pool_contribution = member.total_balance(); + ensure!(pool_contribution >= MinJoinBond::::get(), Error::::MinimumBondNotMet); + // the member must have some contribution to be migrated. + ensure!(pool_contribution > Zero::zero(), Error::::AlreadyMigrated); + + let delegation = + T::StakeAdapter::member_delegation_balance(Member::from(member_account.clone())); + // delegation should not exist. + ensure!(delegation.is_none(), Error::::AlreadyMigrated); + + T::StakeAdapter::migrate_delegation( + Pool::from(Pallet::::generate_bonded_account(member.pool_id)), + Member::from(member_account), + pool_contribution, + )?; + + // if successful, we refund the fee. + Ok(Pays::No.into()) + } + + /// Migrate pool from [`adapter::StakeStrategyType::Transfer`] to + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// This call can be dispatched permissionlessly, and refunds any fee if successful. + /// + /// If the pool has already migrated to delegation based staking, this call will fail. + #[pallet::call_index(25)] + #[pallet::weight(T::WeightInfo::pool_migrate())] + pub fn migrate_pool_to_delegate_stake( + origin: OriginFor, + pool_id: PoolId, + ) -> DispatchResultWithPostInfo { + // gate this call to be called only if `DelegateStake` strategy is used. + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + Error::::NotSupported + ); + + let _caller = ensure_signed(origin)?; + // ensure pool exists. + let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + ensure!( + T::StakeAdapter::pool_strategy(Pool::from(bonded_pool.bonded_account())) == + adapter::StakeStrategyType::Transfer, + Error::::AlreadyMigrated + ); + + Self::migrate_to_delegate_stake(pool_id)?; + Ok(Pays::No.into()) + } } #[pallet::hooks] @@ -2838,7 +2995,7 @@ pub mod pallet { "Minimum points to balance ratio must be greater than 0" ); assert!( - T::Staking::bonding_duration() < TotalUnbondingPools::::get(), + T::StakeAdapter::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / so a slash can be applied to relevant unbonding pools. (We assume / the bonding duration > slash deffer duration.", @@ -2856,7 +3013,7 @@ impl Pallet { /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former /// is coming from the staking pallet and the latter two are configured in this pallet. pub fn depositor_min_bond() -> BalanceOf { - T::Staking::minimum_nominator_bond() + T::StakeAdapter::minimum_nominator_bond() .max(MinCreateBond::::get()) .max(MinJoinBond::::get()) .max(T::Currency::minimum_balance()) @@ -2892,7 +3049,7 @@ impl Pallet { "bonded account of dissolving pool should have no consumers" ); defensive_assert!( - T::Staking::total_stake(&bonded_account).unwrap_or_default() == Zero::zero(), + T::StakeAdapter::total_stake(Pool::from(bonded_pool.bonded_account())) == Zero::zero(), "dissolving pool should not have any stake in the staking pallet" ); @@ -2915,11 +3072,14 @@ impl Pallet { "could not transfer all amount to depositor while dissolving pool" ); defensive_assert!( - T::Currency::total_balance(&bonded_pool.bonded_account()) == Zero::zero(), + T::StakeAdapter::total_balance(Pool::from(bonded_pool.bonded_account())) + .unwrap_or_default() == + Zero::zero(), "dissolving pool should not have any balance" ); // NOTE: Defensively force set balance to zero. T::Currency::set_balance(&reward_account, Zero::zero()); + // NOTE: With `DelegateStake` strategy, this won't do anything. T::Currency::set_balance(&bonded_pool.bonded_account(), Zero::zero()); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); @@ -2930,12 +3090,19 @@ impl Pallet { } /// Create the main, bonded account of a pool with the given id. - pub fn create_bonded_account(id: PoolId) -> T::AccountId { + pub fn generate_bonded_account(id: PoolId) -> T::AccountId { T::PalletId::get().into_sub_account_truncating((AccountType::Bonded, id)) } + fn migrate_to_delegate_stake(id: PoolId) -> DispatchResult { + T::StakeAdapter::migrate_nominator_to_agent( + Pool::from(Self::generate_bonded_account(id)), + &Self::generate_reward_account(id), + ) + } + /// Create the reward account of a pool with the given id. - pub fn create_reward_account(id: PoolId) -> T::AccountId { + pub fn generate_reward_account(id: PoolId) -> T::AccountId { // NOTE: in order to have a distinction in the test account id type (u128), we put // account_type first so it does not get truncated out. T::PalletId::get().into_sub_account_truncating((AccountType::Reward, id)) @@ -3179,9 +3346,9 @@ impl Pallet { let (points_issued, bonded) = match extra { BondExtra::FreeBalance(amount) => - (bonded_pool.try_bond_funds(&member_account, amount, BondType::Later)?, amount), + (bonded_pool.try_bond_funds(&member_account, amount, BondType::Extra)?, amount), BondExtra::Rewards => - (bonded_pool.try_bond_funds(&member_account, claimed, BondType::Later)?, claimed), + (bonded_pool.try_bond_funds(&member_account, claimed, BondType::Extra)?, claimed), }; bonded_pool.ok_to_be_open()?; @@ -3302,6 +3469,64 @@ impl Pallet { Ok(()) } + /// Slash member against the pending slash for the pool. + fn do_apply_slash( + member_account: &T::AccountId, + reporter: Option, + ) -> DispatchResult { + let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; + + let pending_slash = + Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; + + // if nothing to slash, return error. + ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); + + T::StakeAdapter::member_slash( + Member::from(member_account.clone()), + Pool::from(Pallet::::generate_bonded_account(member.pool_id)), + pending_slash, + reporter, + ) + } + + /// Pending slash for a member. + /// + /// Takes the pool_member object corresponding to the `member_account`. + fn member_pending_slash( + member_account: Member, + pool_member: PoolMember, + ) -> Result, DispatchError> { + // only executed in tests: ensure the member account is correct. + debug_assert!( + PoolMembers::::get(member_account.clone().get()).expect("member must exist") == + pool_member + ); + + let pool_account = Pallet::::generate_bonded_account(pool_member.pool_id); + // if the pool doesn't have any pending slash, it implies the member also does not have any + // pending slash. + if T::StakeAdapter::pending_slash(Pool::from(pool_account.clone())).is_zero() { + return Ok(Zero::zero()) + } + + // this is their actual held balance that may or may not have been slashed. + let actual_balance = T::StakeAdapter::member_delegation_balance(member_account) + // no delegation implies the member delegation is not migrated yet to `DelegateStake`. + .ok_or(Error::::NotMigrated)?; + + // this is their balance in the pool + let expected_balance = pool_member.total_balance(); + + defensive_assert!( + actual_balance >= expected_balance, + "actual balance should always be greater or equal to the expected" + ); + + // return the amount to be slashed. + Ok(actual_balance.defensive_saturating_sub(expected_balance)) + } + /// Apply freeze on reward account to restrict it from going below ED. pub(crate) fn freeze_pool_deposit(reward_acc: &T::AccountId) -> DispatchResult { T::Currency::set_freeze( @@ -3380,7 +3605,7 @@ impl Pallet { ); for id in reward_pools { - let account = Self::create_reward_account(id); + let account = Self::generate_reward_account(id); if T::Currency::reducible_balance(&account, Preservation::Expendable, Fortitude::Polite) < T::Currency::minimum_balance() { @@ -3465,8 +3690,7 @@ impl Pallet { pool is being destroyed and the depositor is the last member", ); - expected_tvl += - T::Staking::total_stake(&bonded_pool.bonded_account()).unwrap_or_default(); + expected_tvl += T::StakeAdapter::total_stake(Pool::from(bonded_pool.bonded_account())); Ok(()) })?; @@ -3491,12 +3715,15 @@ impl Pallet { } for (pool_id, _pool) in BondedPools::::iter() { - let pool_account = Pallet::::create_bonded_account(pool_id); + let pool_account = Pallet::::generate_bonded_account(pool_id); let subs = SubPoolsStorage::::get(pool_id).unwrap_or_default(); let sum_unbonding_balance = subs.sum_unbonding_balance(); - let bonded_balance = T::Staking::active_stake(&pool_account).unwrap_or_default(); - let total_balance = T::Currency::total_balance(&pool_account); + let bonded_balance = T::StakeAdapter::active_stake(Pool::from(pool_account.clone())); + let total_balance = T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + // At the time when StakeAdapter is changed to `DelegateStake` but pool is not yet + // migrated, the total balance would be none. + .unwrap_or(T::Currency::total_balance(&pool_account)); assert!( total_balance >= bonded_balance + sum_unbonding_balance, @@ -3529,7 +3756,7 @@ impl Pallet { pub fn check_ed_imbalance() -> Result<(), DispatchError> { let mut failed: u32 = 0; BondedPools::::iter_keys().for_each(|id| { - let reward_acc = Self::create_reward_account(id); + let reward_acc = Self::generate_reward_account(id); let frozen_balance = T::Currency::balance_frozen(&FreezeReason::PoolMinBalance.into(), &reward_acc); @@ -3600,12 +3827,75 @@ impl Pallet { pub fn api_balance_to_points(pool_id: PoolId, new_funds: BalanceOf) -> BalanceOf { if let Some(pool) = BondedPool::::get(pool_id) { let bonded_balance = - T::Staking::active_stake(&pool.bonded_account()).unwrap_or(Zero::zero()); + T::StakeAdapter::active_stake(Pool::from(Self::generate_bonded_account(pool_id))); Pallet::::balance_to_point(bonded_balance, pool.points, new_funds) } else { Zero::zero() } } + + /// Returns the unapplied slash of the pool. + /// + /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + pub fn api_pool_pending_slash(pool_id: PoolId) -> BalanceOf { + T::StakeAdapter::pending_slash(Pool::from(Self::generate_bonded_account(pool_id))) + } + + /// Returns the unapplied slash of a member. + /// + /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { + PoolMembers::::get(who.clone()) + .map(|pool_member| { + Self::member_pending_slash(Member::from(who), pool_member).unwrap_or_default() + }) + .unwrap_or_default() + } + + /// Checks whether pool needs to be migrated to [`adapter::StakeStrategyType::Delegate`]. Only + /// applicable when the [`Config::StakeAdapter`] is [`adapter::DelegateStake`]. + /// + /// Useful to check this before calling [`Call::migrate_pool_to_delegate_stake`]. + pub fn api_pool_needs_delegate_migration(pool_id: PoolId) -> bool { + // if the `Delegate` strategy is not used in the pallet, then no migration required. + if T::StakeAdapter::strategy_type() != adapter::StakeStrategyType::Delegate { + return false + } + + let pool_account = Self::generate_bonded_account(pool_id); + // true if pool is still not migrated to `DelegateStake`. + T::StakeAdapter::pool_strategy(Pool::from(pool_account)) != + adapter::StakeStrategyType::Delegate + } + + /// Checks whether member delegation needs to be migrated to + /// [`adapter::StakeStrategyType::Delegate`]. Only applicable when the [`Config::StakeAdapter`] + /// is [`adapter::DelegateStake`]. + /// + /// Useful to check this before calling [`Call::migrate_delegation`]. + pub fn api_member_needs_delegate_migration(who: T::AccountId) -> bool { + // if the `Delegate` strategy is not used in the pallet, then no migration required. + if T::StakeAdapter::strategy_type() != adapter::StakeStrategyType::Delegate { + return false + } + + PoolMembers::::get(who.clone()) + .map(|pool_member| { + if Self::api_pool_needs_delegate_migration(pool_member.pool_id) { + // the pool needs to be migrated before members can be migrated. + return false + } + + let member_balance = pool_member.total_balance(); + let delegated_balance = + T::StakeAdapter::member_delegation_balance(Member::from(who.clone())); + + // if the member has no delegation but has some balance in the pool, then it needs + // to be migrated. + delegated_balance.is_none() && !member_balance.is_zero() + }) + .unwrap_or_default() + } } impl sp_staking::OnStakingUpdate> for Pallet { diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 796b310862afcb8b897d4dafd66395e33e1970fb..a9222ea53d75fde59ac47ca0c6fd4d209aa3177b 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -107,6 +107,137 @@ pub mod unversioned { Ok(()) } } + + /// Migrate existing pools from [`adapter::StakeStrategyType::Transfer`] to + /// [`adapter::StakeStrategyType::Delegate`]. + /// + /// Note: This only migrates the pools, the members are not migrated. They can use the + /// permission-less [`Pallet::migrate_delegation()`] to migrate their funds. + /// + /// This migration does not break any existing pool storage item, does not need to happen in any + /// sequence and hence can be applied unversioned on a production runtime. + /// + /// Takes `MaxPools` as type parameter to limit the number of pools that should be migrated in a + /// single block. It should be set such that migration weight does not exceed the block weight + /// limit. If all pools can be safely migrated, it is good to keep this number a little higher + /// than the actual number of pools to handle any extra pools created while the migration is + /// proposed, and before it is executed. + /// + /// If there are pools that fail to migrate or did not fit in the bounds, the remaining pools + /// can be migrated via the permission-less extrinsic [`Call::migrate_pool_to_delegate_stake`]. + pub struct DelegationStakeMigration(sp_std::marker::PhantomData<(T, MaxPools)>); + + impl> OnRuntimeUpgrade for DelegationStakeMigration { + fn on_runtime_upgrade() -> Weight { + let mut count: u32 = 0; + + BondedPools::::iter_keys().take(MaxPools::get() as usize).for_each(|id| { + let pool_acc = Pallet::::generate_bonded_account(id); + + // only migrate if the pool is in Transfer Strategy. + if T::StakeAdapter::pool_strategy(Pool::from(pool_acc)) == + adapter::StakeStrategyType::Transfer + { + let _ = Pallet::::migrate_to_delegate_stake(id).map_err(|err| { + log!( + warn, + "failed to migrate pool {:?} to delegate stake strategy with err: {:?}", + id, + err + ) + }); + count.saturating_inc(); + } + }); + + log!(info, "migrated {:?} pools to delegate stake strategy", count); + + // reads: (bonded pool key + current pool strategy) * MaxPools (worst case) + T::DbWeight::get() + .reads_writes(2, 0) + .saturating_mul(MaxPools::get() as u64) + // migration weight: `pool_migrate` weight * count + .saturating_add(T::WeightInfo::pool_migrate().saturating_mul(count.into())) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + // ensure stake adapter is correct. + ensure!( + T::StakeAdapter::strategy_type() == adapter::StakeStrategyType::Delegate, + "Current strategy is not `Delegate" + ); + + if BondedPools::::count() > MaxPools::get() { + // we log a warning if the number of pools exceeds the bound. + log!( + warn, + "Number of pools {} exceeds the maximum bound {}. This would leave some pools unmigrated.", BondedPools::::count(), MaxPools::get() + ); + } + + let mut pool_balances: Vec> = Vec::new(); + BondedPools::::iter_keys().take(MaxPools::get() as usize).for_each(|id| { + let pool_account = Pallet::::generate_bonded_account(id); + + // we ensure migration is idempotent. + let pool_balance = T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + // we check actual account balance if pool has not migrated yet. + .unwrap_or(T::Currency::total_balance(&pool_account)); + + pool_balances.push(pool_balance); + }); + + Ok(pool_balances.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(data: Vec) -> Result<(), TryRuntimeError> { + let expected_pool_balances: Vec> = Decode::decode(&mut &data[..]).unwrap(); + + for (index, id) in + BondedPools::::iter_keys().take(MaxPools::get() as usize).enumerate() + { + let pool_account = Pallet::::generate_bonded_account(id); + if T::StakeAdapter::pool_strategy(Pool::from(pool_account.clone())) == + adapter::StakeStrategyType::Transfer + { + log!(error, "Pool {} failed to migrate", id,); + return Err(TryRuntimeError::Other("Pool failed to migrate")); + } + + let actual_balance = + T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) + .expect("after migration, this should return a value"); + let expected_balance = expected_pool_balances.get(index).unwrap(); + + if actual_balance != *expected_balance { + log!( + error, + "Pool {} balance mismatch. Expected: {:?}, Actual: {:?}", + id, + expected_balance, + actual_balance + ); + return Err(TryRuntimeError::Other("Pool balance mismatch")); + } + + // account balance should be zero. + let pool_account_balance = T::Currency::total_balance(&pool_account); + if pool_account_balance != Zero::zero() { + log!( + error, + "Pool account balance was expected to be zero. Pool: {}, Balance: {:?}", + id, + pool_account_balance + ); + return Err(TryRuntimeError::Other("Pool account balance not migrated")); + } + } + + Ok(()) + } + } } pub mod v8 { @@ -201,7 +332,7 @@ pub(crate) mod v7 { impl V7BondedPool { #[allow(dead_code)] fn bonded_account(&self) -> T::AccountId { - Pallet::::create_bonded_account(self.id) + Pallet::::generate_bonded_account(self.id) } } @@ -275,7 +406,7 @@ mod v6 { impl MigrateToV6 { fn freeze_ed(pool_id: PoolId) -> Result<(), ()> { - let reward_acc = Pallet::::create_reward_account(pool_id); + let reward_acc = Pallet::::generate_reward_account(pool_id); Pallet::::freeze_pool_deposit(&reward_acc).map_err(|e| { log!(error, "Failed to freeze ED for pool {} with error: {:?}", pool_id, e); () @@ -760,7 +891,7 @@ pub mod v2 { }; let accumulated_reward = RewardPool::::current_balance(id); - let reward_account = Pallet::::create_reward_account(id); + let reward_account = Pallet::::generate_reward_account(id); let mut sum_paid_out = BalanceOf::::zero(); members @@ -882,7 +1013,7 @@ pub mod v2 { // all reward accounts must have more than ED. RewardPools::::iter().try_for_each(|(id, _)| -> Result<(), TryRuntimeError> { ensure!( - >::balance(&Pallet::::create_reward_account(id)) >= + >::balance(&Pallet::::generate_reward_account(id)) >= T::Currency::minimum_balance(), "Reward accounts must have greater balance than ED." ); @@ -1022,10 +1153,9 @@ mod helpers { use super::*; pub(crate) fn calculate_tvl_by_total_stake() -> BalanceOf { - BondedPools::::iter() - .map(|(id, inner)| { - T::Staking::total_stake(&BondedPool { id, inner: inner.clone() }.bonded_account()) - .unwrap_or_default() + BondedPools::::iter_keys() + .map(|id| { + T::StakeAdapter::total_stake(Pool::from(Pallet::::generate_bonded_account(id))) }) .reduce(|acc, total_balance| acc + total_balance) .unwrap_or_default() diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 686402b843492e8f92bb26231a7ed3becf0c0e03..b659c975a8395c0d8afb486909da3695a28ba102 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -36,12 +36,12 @@ pub type Currency = ::Currency; // Ext builder creates a pool with id 1. pub fn default_bonded_account() -> AccountId { - Pools::create_bonded_account(1) + Pools::generate_bonded_account(1) } // Ext builder creates a pool with id 1. pub fn default_reward_account() -> AccountId { - Pools::create_reward_account(1) + Pools::generate_reward_account(1) } parameter_types! { @@ -71,7 +71,7 @@ impl StakingMock { /// Does not modify any [`SubPools`] of the pool as [`Default::default`] is passed for /// `slashed_unlocking`. pub fn slash_by(pool_id: PoolId, amount: Balance) { - let acc = Pools::create_bonded_account(pool_id); + let acc = Pools::generate_bonded_account(pool_id); let bonded = BondedBalanceMap::get(); let pre_total = bonded.get(&acc).unwrap(); Self::set_bonded_balance(acc, pre_total - amount); @@ -111,6 +111,10 @@ impl sp_staking::StakingInterface for StakingMock { .ok_or(DispatchError::Other("NotStash")) } + fn is_virtual_staker(_who: &Self::AccountId) -> bool { + false + } + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); x.get_mut(who).map(|v| *v += extra); @@ -160,7 +164,8 @@ impl sp_staking::StakingInterface for StakingMock { Pools::on_withdraw(&who, unlocking_before.saturating_sub(unlocking(&staker_map))); UnbondingBalanceMap::set(&unbonding_map); - Ok(UnbondingBalanceMap::get().is_empty() && BondedBalanceMap::get().is_empty()) + Ok(UnbondingBalanceMap::get().get(&who).unwrap().is_empty() && + BondedBalanceMap::get().get(&who).unwrap().is_zero()) } fn bond(stash: &Self::AccountId, value: Self::Balance, _: &Self::AccountId) -> DispatchResult { @@ -313,7 +318,7 @@ impl pools::Config for Runtime { type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = StakingMock; + type StakeAdapter = adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index f6ef1e6eaac217816e410bc85a837f30bda179f4..28063c2ecaecd8caa6214553d2e16484ba5f5af8 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -95,8 +95,8 @@ fn test_setup_works() { PoolMember:: { pool_id: last_pool, points: 10, ..Default::default() } ); - let bonded_account = Pools::create_bonded_account(last_pool); - let reward_account = Pools::create_reward_account(last_pool); + let bonded_account = Pools::generate_bonded_account(last_pool); + let reward_account = Pools::generate_reward_account(last_pool); // the bonded_account should be bonded by the depositor's funds. assert_eq!(StakingMock::active_stake(&bonded_account).unwrap(), 10); @@ -728,7 +728,7 @@ mod join { ); // Force the pools bonded balance to 0, simulating a 100% slash - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), 0); + StakingMock::set_bonded_balance(Pools::generate_bonded_account(1), 0); assert_noop!( Pools::join(RuntimeOrigin::signed(11), 420, 1), Error::::OverflowRisk @@ -755,7 +755,7 @@ mod join { <::MaxPointsToBalance as Get>::get().into(); StakingMock::set_bonded_balance( - Pools::create_bonded_account(123), + Pools::generate_bonded_account(123), max_points_to_balance, ); assert_noop!( @@ -764,7 +764,7 @@ mod join { ); StakingMock::set_bonded_balance( - Pools::create_bonded_account(123), + Pools::generate_bonded_account(123), Balance::MAX / max_points_to_balance, ); // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` @@ -773,7 +773,10 @@ mod join { TokenError::FundsUnavailable, ); - StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); + StakingMock::set_bonded_balance( + Pools::generate_bonded_account(1), + max_points_to_balance, + ); // Cannot join a pool that isn't open unsafe_set_state(123, PoolState::Blocked); @@ -804,7 +807,7 @@ mod join { #[cfg_attr(not(debug_assertions), should_panic)] fn join_panics_when_reward_pool_not_found() { ExtBuilder::default().build_and_execute(|| { - StakingMock::set_bonded_balance(Pools::create_bonded_account(123), 100); + StakingMock::set_bonded_balance(Pools::generate_bonded_account(123), 100); BondedPool:: { id: 123, inner: BondedPoolInner { @@ -1979,7 +1982,7 @@ mod claim_payout { assert_eq!(member_20.last_recorded_reward_counter, 0.into()); // pre-fund the reward account of pool id 3 with some funds. - Currency::set_balance(&Pools::create_reward_account(3), 10); + Currency::set_balance(&Pools::generate_reward_account(3), 10); // create pool 3 Currency::set_balance(&30, 100); @@ -1988,7 +1991,7 @@ mod claim_payout { // reward counter is still the same. let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); assert_eq!( - Currency::free_balance(&Pools::create_reward_account(3)), + Currency::free_balance(&Pools::generate_reward_account(3)), 10 + Currency::minimum_balance() ); @@ -4594,6 +4597,92 @@ mod withdraw_unbonded { assert_eq!(ClaimPermissions::::contains_key(20), false); }); } + + #[test] + fn destroy_works_without_erroneous_extra_consumer() { + ExtBuilder::default().ed(1).build_and_execute(|| { + // 10 is the depositor for pool 1, with min join bond 10. + // set pool to destroying. + unsafe_set_state(1, PoolState::Destroying); + + // set current era + CurrentEra::set(1); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + ] + ); + + // move to era when unbonded funds can be withdrawn. + CurrentEra::set(4); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 }, + ] + ); + + // pool is destroyed. + assert!(!Metadata::::contains_key(1)); + // ensure the pool account is reaped. + assert!(!frame_system::Account::::contains_key(&Pools::generate_bonded_account(1))); + }) + } + + #[test] + fn destroy_works_with_erroneous_extra_consumer() { + ExtBuilder::default().ed(1).build_and_execute(|| { + // 10 is the depositor for pool 1, with min join bond 10. + let pool_one = Pools::generate_bonded_account(1); + + // set pool to destroying. + unsafe_set_state(1, PoolState::Destroying); + + // set current era + CurrentEra::set(1); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + ] + ); + + // move to era when unbonded funds can be withdrawn. + CurrentEra::set(4); + + // increment consumer by 1 reproducing the erroneous consumer bug. + // refer https://github.com/paritytech/polkadot-sdk/issues/4440. + assert_ok!(frame_system::Pallet::::inc_consumers(&pool_one)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::Destroyed { pool_id: 1 }, + ] + ); + + // pool is destroyed. + assert!(!Metadata::::contains_key(1)); + // ensure the pool account is reaped. + assert!(!frame_system::Account::::contains_key(&pool_one)); + }) + } } mod create { @@ -4604,7 +4693,7 @@ mod create { fn create_works() { ExtBuilder::default().build_and_execute(|| { // next pool id is 2. - let next_pool_stash = Pools::create_bonded_account(2); + let next_pool_stash = Pools::generate_bonded_account(2); let ed = Currency::minimum_balance(); assert_eq!(TotalValueLocked::::get(), 10); @@ -4925,6 +5014,17 @@ mod set_state { // surpassed. Making this pool destroyable by anyone. StakingMock::slash_by(1, 10); + // in mock we are using transfer stake which implies slash is greedy. Extrinsic to + // apply pending slash should fail. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(11), 10), + Error::::NotSupported + ); + + // pending slash api should return zero as well. + assert_eq!(Pools::api_pool_pending_slash(1), 0); + assert_eq!(Pools::api_member_pending_slash(10), 0); + // When assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); // Then @@ -7387,3 +7487,63 @@ mod chill { }) } } + +// the test mock is using `TransferStake` and so `DelegateStake` is not tested here. Extrinsics +// meant for `DelegateStake` should be gated. +// +// `DelegateStake` tests are in `pallet-nomination-pools-test-delegate-stake`. Since we support both +// strategies currently, we keep these tests as it is but in future we may remove `TransferStake` +// completely. +mod delegate_stake { + use super::*; + #[test] + fn delegation_specific_calls_are_gated() { + ExtBuilder::default().with_check(0).build_and_execute(|| { + // Given + Currency::set_balance(&11, ExistentialDeposit::get() + 2); + assert!(!PoolMembers::::contains_key(11)); + + // When + assert_ok!(Pools::join(RuntimeOrigin::signed(11), 2, 1)); + + // Then + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true }, + Event::Bonded { member: 11, pool_id: 1, bonded: 2, joined: true }, + ] + ); + + assert_eq!( + PoolMembers::::get(11).unwrap(), + PoolMember:: { pool_id: 1, points: 2, ..Default::default() } + ); + + // ensure pool 1 cannot be migrated. + assert!(!Pools::api_pool_needs_delegate_migration(1)); + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + Error::::NotSupported + ); + + // members cannot be migrated either. + assert!(!Pools::api_member_needs_delegate_migration(10)); + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 11), + Error::::NotSupported + ); + + // Given + // The bonded balance is slashed in half + StakingMock::slash_by(1, 6); + + // since slash is greedy with `TransferStake`, `apply_slash` should not work either. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 11), + Error::::NotSupported + ); + }); + } +} diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 57ea8dc388f6869c8f4f2be761816cced9758e19..21711a499b623fad15198b265b99476545f6b2b5 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dcu62vjg-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_nomination_pools -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/nomination-pools/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_nomination_pools +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/nomination-pools/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -73,6 +71,10 @@ pub trait WeightInfo { fn set_claim_permission() -> Weight; fn claim_commission() -> Weight; fn adjust_pool_deposit() -> Weight; + fn apply_slash() -> Weight; + fn apply_slash_fail() -> Weight; + fn pool_migrate() -> Weight; + fn migrate_delegation() -> Weight; } /// Weights for `pallet_nomination_pools` using the Substrate node and recommended hardware. @@ -100,6 +102,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -112,11 +116,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3425` + // Measured: `3458` // Estimated: `8877` - // Minimum execution time: 201_783_000 picoseconds. - Weight::from_parts(206_014_000, 8877) - .saturating_add(T::DbWeight::get().reads(20_u64)) + // Minimum execution time: 195_962_000 picoseconds. + Weight::from_parts(201_682_000, 8877) + .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -133,6 +137,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -145,11 +151,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3435` + // Measured: `3468` // Estimated: `8877` - // Minimum execution time: 204_124_000 picoseconds. - Weight::from_parts(207_910_000, 8877) - .saturating_add(T::DbWeight::get().reads(17_u64)) + // Minimum execution time: 197_466_000 picoseconds. + Weight::from_parts(201_356_000, 8877) + .saturating_add(T::DbWeight::get().reads(18_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -168,6 +174,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -180,11 +188,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3500` + // Measured: `3533` // Estimated: `8877` - // Minimum execution time: 240_342_000 picoseconds. - Weight::from_parts(245_735_000, 8877) - .saturating_add(T::DbWeight::get().reads(18_u64)) + // Minimum execution time: 232_623_000 picoseconds. + Weight::from_parts(236_970_000, 8877) + .saturating_add(T::DbWeight::get().reads(19_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -203,8 +211,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 81_054_000 picoseconds. - Weight::from_parts(83_324_000, 3719) + // Minimum execution time: 77_992_000 picoseconds. + Weight::from_parts(79_927_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -228,6 +236,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -242,11 +252,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3622` + // Measured: `3655` // Estimated: `27847` - // Minimum execution time: 188_835_000 picoseconds. - Weight::from_parts(192_565_000, 27847) - .saturating_add(T::DbWeight::get().reads(20_u64)) + // Minimum execution time: 182_368_000 picoseconds. + Weight::from_parts(185_387_000, 27847) + .saturating_add(T::DbWeight::get().reads(21_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -257,6 +267,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -268,13 +280,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1848` + // Measured: `1881` // Estimated: `4764` - // Minimum execution time: 73_556_000 picoseconds. - Weight::from_parts(76_075_881, 4764) - // Standard Error: 1_419 - .saturating_add(Weight::from_parts(54_476, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(8_u64)) + // Minimum execution time: 72_179_000 picoseconds. + Weight::from_parts(75_031_092, 4764) + // Standard Error: 1_487 + .saturating_add(Weight::from_parts(56_741, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -289,6 +301,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -306,13 +320,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2238` + // Measured: `2271` // Estimated: `27847` - // Minimum execution time: 144_177_000 picoseconds. - Weight::from_parts(148_686_524, 27847) - // Standard Error: 2_475 - .saturating_add(Weight::from_parts(77_460, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(12_u64)) + // Minimum execution time: 137_277_000 picoseconds. + Weight::from_parts(143_537_793, 27847) + // Standard Error: 3_049 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -329,6 +343,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -364,14 +380,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2525` + // Measured: `2558` // Estimated: `27847` - // Minimum execution time: 255_957_000 picoseconds. - Weight::from_parts(264_206_788, 27847) - // Standard Error: 4_229 - .saturating_add(Weight::from_parts(3_064, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(24_u64)) - .saturating_add(T::DbWeight::get().writes(20_u64)) + // Minimum execution time: 242_522_000 picoseconds. + Weight::from_parts(250_740_608, 27847) + // Standard Error: 4_517 + .saturating_add(Weight::from_parts(13_231, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(25_u64)) + .saturating_add(T::DbWeight::get().writes(21_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -393,12 +409,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -419,11 +437,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1284` + // Measured: `1317` // Estimated: `8538` - // Minimum execution time: 193_527_000 picoseconds. - Weight::from_parts(197_140_000, 8538) - .saturating_add(T::DbWeight::get().reads(24_u64)) + // Minimum execution time: 182_740_000 picoseconds. + Weight::from_parts(188_820_000, 8538) + .saturating_add(T::DbWeight::get().reads(25_u64)) .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -459,12 +477,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1976` + // Measured: `2009` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 86_054_000 picoseconds. - Weight::from_parts(88_743_932, 4556) - // Standard Error: 12_699 - .saturating_add(Weight::from_parts(1_829_097, 0).saturating_mul(n.into())) + // Minimum execution time: 83_649_000 picoseconds. + Weight::from_parts(85_754_306, 4556) + // Standard Error: 12_757 + .saturating_add(Weight::from_parts(1_616_356, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) @@ -478,10 +496,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1434` + // Measured: `1467` // Estimated: `4556` - // Minimum execution time: 34_544_000 picoseconds. - Weight::from_parts(35_910_000, 4556) + // Minimum execution time: 34_594_000 picoseconds. + Weight::from_parts(36_173_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -496,10 +514,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 14_111_000 picoseconds. - Weight::from_parts(15_204_218, 3735) - // Standard Error: 226 - .saturating_add(Weight::from_parts(1_291, 0).saturating_mul(n.into())) + // Minimum execution time: 13_945_000 picoseconds. + Weight::from_parts(14_764_062, 3735) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_406, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -519,8 +537,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_524_000 picoseconds. - Weight::from_parts(4_882_000, 0) + // Minimum execution time: 4_523_000 picoseconds. + Weight::from_parts(4_727_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -529,8 +547,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_975_000 picoseconds. - Weight::from_parts(18_549_000, 3719) + // Minimum execution time: 17_124_000 picoseconds. + Weight::from_parts(17_718_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -558,10 +576,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2143` + // Measured: `2176` // Estimated: `4556` - // Minimum execution time: 81_574_000 picoseconds. - Weight::from_parts(83_519_000, 4556) + // Minimum execution time: 78_293_000 picoseconds. + Weight::from_parts(81_177_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -577,8 +595,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 35_015_000 picoseconds. - Weight::from_parts(36_159_000, 3719) + // Minimum execution time: 33_105_000 picoseconds. + Weight::from_parts(34_106_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -590,8 +608,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_358_000, 3719) + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_269_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -601,8 +619,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_997_000 picoseconds. - Weight::from_parts(18_041_000, 3719) + // Minimum execution time: 16_557_000 picoseconds. + Weight::from_parts(17_431_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -612,8 +630,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_000_000 picoseconds. - Weight::from_parts(17_807_000, 3719) + // Minimum execution time: 16_723_000 picoseconds. + Weight::from_parts(17_155_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -625,8 +643,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_401_000, 3702) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_242_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -642,8 +660,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 69_759_000 picoseconds. - Weight::from_parts(71_985_000, 3719) + // Minimum execution time: 64_219_000 picoseconds. + Weight::from_parts(66_718_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -659,11 +677,58 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 73_829_000 picoseconds. - Weight::from_parts(75_966_000, 4764) + // Minimum execution time: 70_284_000 picoseconds. + Weight::from_parts(71_375_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `694` + // Estimated: `3702` + // Minimum execution time: 13_403_000 picoseconds. + Weight::from_parts(14_064_000, 3702) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `732` + // Estimated: `3702` + // Minimum execution time: 14_419_000 picoseconds. + Weight::from_parts(15_004_000, 3702) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 759_000 picoseconds. + Weight::from_parts(819_000, 0) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `1648` + // Estimated: `27847` + // Minimum execution time: 36_192_000 picoseconds. + Weight::from_parts(37_038_000, 27847) + .saturating_add(T::DbWeight::get().reads(6_u64)) + } } // For backwards compatibility and tests. @@ -690,6 +755,8 @@ impl WeightInfo for () { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -702,11 +769,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3425` + // Measured: `3458` // Estimated: `8877` - // Minimum execution time: 201_783_000 picoseconds. - Weight::from_parts(206_014_000, 8877) - .saturating_add(RocksDbWeight::get().reads(20_u64)) + // Minimum execution time: 195_962_000 picoseconds. + Weight::from_parts(201_682_000, 8877) + .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -723,6 +790,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -735,11 +804,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3435` + // Measured: `3468` // Estimated: `8877` - // Minimum execution time: 204_124_000 picoseconds. - Weight::from_parts(207_910_000, 8877) - .saturating_add(RocksDbWeight::get().reads(17_u64)) + // Minimum execution time: 197_466_000 picoseconds. + Weight::from_parts(201_356_000, 8877) + .saturating_add(RocksDbWeight::get().reads(18_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -758,6 +827,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -770,11 +841,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3500` + // Measured: `3533` // Estimated: `8877` - // Minimum execution time: 240_342_000 picoseconds. - Weight::from_parts(245_735_000, 8877) - .saturating_add(RocksDbWeight::get().reads(18_u64)) + // Minimum execution time: 232_623_000 picoseconds. + Weight::from_parts(236_970_000, 8877) + .saturating_add(RocksDbWeight::get().reads(19_u64)) .saturating_add(RocksDbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) @@ -793,8 +864,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1172` // Estimated: `3719` - // Minimum execution time: 81_054_000 picoseconds. - Weight::from_parts(83_324_000, 3719) + // Minimum execution time: 77_992_000 picoseconds. + Weight::from_parts(79_927_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -818,6 +889,8 @@ impl WeightInfo for () { /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) /// Storage: `Staking::MinNominatorBond` (r:1 w:0) /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -832,11 +905,11 @@ impl WeightInfo for () { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3622` + // Measured: `3655` // Estimated: `27847` - // Minimum execution time: 188_835_000 picoseconds. - Weight::from_parts(192_565_000, 27847) - .saturating_add(RocksDbWeight::get().reads(20_u64)) + // Minimum execution time: 182_368_000 picoseconds. + Weight::from_parts(185_387_000, 27847) + .saturating_add(RocksDbWeight::get().reads(21_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -847,6 +920,8 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -858,13 +933,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1848` + // Measured: `1881` // Estimated: `4764` - // Minimum execution time: 73_556_000 picoseconds. - Weight::from_parts(76_075_881, 4764) - // Standard Error: 1_419 - .saturating_add(Weight::from_parts(54_476, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(8_u64)) + // Minimum execution time: 72_179_000 picoseconds. + Weight::from_parts(75_031_092, 4764) + // Standard Error: 1_487 + .saturating_add(Weight::from_parts(56_741, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -879,6 +954,8 @@ impl WeightInfo for () { /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) @@ -896,13 +973,13 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2238` + // Measured: `2271` // Estimated: `27847` - // Minimum execution time: 144_177_000 picoseconds. - Weight::from_parts(148_686_524, 27847) - // Standard Error: 2_475 - .saturating_add(Weight::from_parts(77_460, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(12_u64)) + // Minimum execution time: 137_277_000 picoseconds. + Weight::from_parts(143_537_793, 27847) + // Standard Error: 3_049 + .saturating_add(Weight::from_parts(71_178, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) @@ -919,6 +996,8 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -954,14 +1033,14 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2525` + // Measured: `2558` // Estimated: `27847` - // Minimum execution time: 255_957_000 picoseconds. - Weight::from_parts(264_206_788, 27847) - // Standard Error: 4_229 - .saturating_add(Weight::from_parts(3_064, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(24_u64)) - .saturating_add(RocksDbWeight::get().writes(20_u64)) + // Minimum execution time: 242_522_000 picoseconds. + Weight::from_parts(250_740_608, 27847) + // Standard Error: 4_517 + .saturating_add(Weight::from_parts(13_231, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(25_u64)) + .saturating_add(RocksDbWeight::get().writes(21_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -983,12 +1062,14 @@ impl WeightInfo for () { /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) @@ -1009,11 +1090,11 @@ impl WeightInfo for () { /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1284` + // Measured: `1317` // Estimated: `8538` - // Minimum execution time: 193_527_000 picoseconds. - Weight::from_parts(197_140_000, 8538) - .saturating_add(RocksDbWeight::get().reads(24_u64)) + // Minimum execution time: 182_740_000 picoseconds. + Weight::from_parts(188_820_000, 8538) + .saturating_add(RocksDbWeight::get().reads(25_u64)) .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) @@ -1049,12 +1130,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1976` + // Measured: `2009` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 86_054_000 picoseconds. - Weight::from_parts(88_743_932, 4556) - // Standard Error: 12_699 - .saturating_add(Weight::from_parts(1_829_097, 0).saturating_mul(n.into())) + // Minimum execution time: 83_649_000 picoseconds. + Weight::from_parts(85_754_306, 4556) + // Standard Error: 12_757 + .saturating_add(Weight::from_parts(1_616_356, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(15_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) @@ -1068,10 +1149,10 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1434` + // Measured: `1467` // Estimated: `4556` - // Minimum execution time: 34_544_000 picoseconds. - Weight::from_parts(35_910_000, 4556) + // Minimum execution time: 34_594_000 picoseconds. + Weight::from_parts(36_173_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1086,10 +1167,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3735` - // Minimum execution time: 14_111_000 picoseconds. - Weight::from_parts(15_204_218, 3735) - // Standard Error: 226 - .saturating_add(Weight::from_parts(1_291, 0).saturating_mul(n.into())) + // Minimum execution time: 13_945_000 picoseconds. + Weight::from_parts(14_764_062, 3735) + // Standard Error: 127 + .saturating_add(Weight::from_parts(1_406, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1109,8 +1190,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_524_000 picoseconds. - Weight::from_parts(4_882_000, 0) + // Minimum execution time: 4_523_000 picoseconds. + Weight::from_parts(4_727_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) @@ -1119,8 +1200,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_975_000 picoseconds. - Weight::from_parts(18_549_000, 3719) + // Minimum execution time: 17_124_000 picoseconds. + Weight::from_parts(17_718_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1148,10 +1229,10 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2143` + // Measured: `2176` // Estimated: `4556` - // Minimum execution time: 81_574_000 picoseconds. - Weight::from_parts(83_519_000, 4556) + // Minimum execution time: 78_293_000 picoseconds. + Weight::from_parts(81_177_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1167,8 +1248,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `804` // Estimated: `3719` - // Minimum execution time: 35_015_000 picoseconds. - Weight::from_parts(36_159_000, 3719) + // Minimum execution time: 33_105_000 picoseconds. + Weight::from_parts(34_106_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1180,8 +1261,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `572` // Estimated: `3719` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_358_000, 3719) + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_269_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1191,8 +1272,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 16_997_000 picoseconds. - Weight::from_parts(18_041_000, 3719) + // Minimum execution time: 16_557_000 picoseconds. + Weight::from_parts(17_431_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1202,8 +1283,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `532` // Estimated: `3719` - // Minimum execution time: 17_000_000 picoseconds. - Weight::from_parts(17_807_000, 3719) + // Minimum execution time: 16_723_000 picoseconds. + Weight::from_parts(17_155_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1215,8 +1296,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_401_000, 3702) + // Minimum execution time: 14_667_000 picoseconds. + Weight::from_parts(15_242_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1232,8 +1313,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1002` // Estimated: `3719` - // Minimum execution time: 69_759_000 picoseconds. - Weight::from_parts(71_985_000, 3719) + // Minimum execution time: 64_219_000 picoseconds. + Weight::from_parts(66_718_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1249,9 +1330,56 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `901` // Estimated: `4764` - // Minimum execution time: 73_829_000 picoseconds. - Weight::from_parts(75_966_000, 4764) + // Minimum execution time: 70_284_000 picoseconds. + Weight::from_parts(71_375_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `694` + // Estimated: `3702` + // Minimum execution time: 13_403_000 picoseconds. + Weight::from_parts(14_064_000, 3702) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `732` + // Estimated: `3702` + // Minimum execution time: 14_419_000 picoseconds. + Weight::from_parts(15_004_000, 3702) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 759_000 picoseconds. + Weight::from_parts(819_000, 0) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `1648` + // Estimated: `27847` + // Minimum execution time: 36_192_000 picoseconds. + Weight::from_parts(37_038_000, 27847) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + } } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ea8eb20696931f8c45edc6f67e4d5af49412eccd --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "pallet-nomination-pools-test-delegate-stake" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME nomination pools pallet tests with the staking pallet" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } +scale-info = { version = "2.11.1", features = ["derive"] } + +sp-runtime = { path = "../../../primitives/runtime" } +sp-io = { path = "../../../primitives/io" } +sp-std = { path = "../../../primitives/std" } +sp-staking = { path = "../../../primitives/staking" } +sp-core = { path = "../../../primitives/core" } + +frame-system = { path = "../../system" } +frame-support = { path = "../../support" } +frame-election-provider-support = { path = "../../election-provider-support" } + +pallet-timestamp = { path = "../../timestamp" } +pallet-balances = { path = "../../balances" } +pallet-staking = { path = "../../staking" } +pallet-delegated-staking = { path = "../../delegated-staking" } +pallet-bags-list = { path = "../../bags-list" } +pallet-staking-reward-curve = { path = "../../staking/reward-curve" } +pallet-nomination-pools = { path = ".." } + +sp-tracing = { path = "../../../primitives/tracing" } +log = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..51f6470f90d028caefe38f1a0bdd2ce868a21643 --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -0,0 +1,1185 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +mod mock; + +use frame_support::{ + assert_noop, assert_ok, + traits::{fungible::InspectHold, Currency}, +}; +use mock::*; +use pallet_nomination_pools::{ + BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, + PoolMembers, PoolState, +}; +use pallet_staking::{ + CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination, +}; + +use pallet_delegated_staking::{Error as DelegatedStakingError, Event as DelegatedStakingEvent}; + +use sp_runtime::{bounded_btree_map, traits::Zero}; +use sp_staking::Agent; + +#[test] +fn pool_lifecycle_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // pool goes into destroying + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + + // depositor cannot unbond yet. + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + // now the members want to unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10, era: 3 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10, era: 3 }, + ] + ); + + // depositor cannot still unbond + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + for e in 1..BondingDuration::get() { + CurrentEra::::set(Some(e)); + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0), + PoolsError::::CannotWithdrawAny + ); + } + + // members are now unlocked. + CurrentEra::::set(Some(BondingDuration::get())); + + // depositor cannot still unbond + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + PoolsError::::MinimumBondNotMet, + ); + + // but members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + assert!(PoolMembers::::get(20).is_none()); + assert!(PoolMembers::::get(21).is_none()); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 20, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::Withdrawn { member: 21, pool_id: 1, points: 10, balance: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21 }, + ] + ); + + // as soon as all members have left, the depositor can try to unbond, but since the + // min-nominator intention is set, they must chill first. + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + pallet_staking::Error::::InsufficientBond + ); + + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50, era: 6 }] + ); + + // waiting another bonding duration: + CurrentEra::::set(Some(BondingDuration::get() * 2)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); + + // pools is fully destroyed now. + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }) +} + +#[test] +fn pool_chill_e2e() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + // in case depositor does not have more than `MinNominatorBond` staked, we can end up in + // situation where a member unbonding would cause pool balance to drop below + // `MinNominatorBond` and hence not allowed. This can happen if the `MinNominatorBond` is + // increased after the pool is created. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(55), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + + // members can unbond as long as total stake of the pool is above min nominator bond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10),); + assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(20).unwrap().points, 0); + + // this member cannot unbond since it will cause `pool stake < MinNominatorBond` + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(21), 21, 10), + StakingError::::InsufficientBond, + ); + + // members can call `chill` permissionlessly now + assert_ok!(Pools::chill(RuntimeOrigin::signed(20), 1)); + + // now another member can unbond. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + assert_eq!(PoolMembers::::get(21).unwrap().unbonding_eras.len(), 1); + assert_eq!(PoolMembers::::get(21).unwrap().points, 0); + + // nominator can not resume nomination until depositor have enough stake + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // other members joining pool does not affect the depositor's ability to resume nomination + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::MinimumBondNotMet, + ); + + // depositor can bond extra stake + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); + + // `chill` can not be called permissionlessly anymore + assert_noop!( + Pools::chill(RuntimeOrigin::signed(20), 1), + PoolsError::::NotNominator, + ); + + // now nominator can resume nomination + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + // skip to make the unbonding period end. + CurrentEra::::set(Some(BondingDuration::get())); + + // members can now withdraw. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // other member bonding + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // depositor bond extra + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 }, + ] + ); + }) +} + +#[test] +fn pool_slash_e2e() { + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + assert_eq!( + Payee::::get(POOL1_BONDED), + Some(RewardDestination::Account(POOL1_REWARD)) + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 20, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 20, joined: true }, + ] + ); + + // now let's progress a bit. + CurrentEra::::set(Some(1)); + + // 20 / 80 of the total funds are unlocked, and safe from any further slash. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 4 } + ] + ); + + CurrentEra::::set(Some(2)); + + // note: depositor cannot fully unbond at this point. + // these funds will still get slashed. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 5 }, + ] + ); + + // At this point, 20 are safe from slash, 30 are unlocking but vulnerable to slash, and and + // another 30 are active and vulnerable to slash. Let's slash half of them. + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 2, // slash era 2, affects chunks at era 5 onwards. + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // 30 has been slashed to 15 (15 slash) + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 5, balance: 15 }, + // 30 has been slashed to 15 (15 slash) + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + + CurrentEra::::set(Some(3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + PoolMembers::::get(21).unwrap(), + PoolMember { + pool_id: 1, + points: 0, + last_recorded_reward_counter: Zero::zero(), + // the 10 points unlocked just now correspond to 5 points in the unbond pool. + unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) + } + ); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] + ); + + // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. + CurrentEra::::set(Some(6)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // 20 had unbonded 10 safely, and 10 got slashed by half. + PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 10 + 5, points: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + // 21 unbonded all of it after the slash + PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 5 + 5, points: 15 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21 } + ] + ); + assert_eq!( + staking_events_since_last_call(), + // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] + ); + + // now, finally, we can unbond the depositor further than their current limit. + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 } + ] + ); + + CurrentEra::::set(Some(9)); + assert_eq!( + PoolMembers::::get(10).unwrap(), + PoolMember { + pool_id: 1, + points: 0, + last_recorded_reward_counter: Zero::zero(), + unbonding_eras: bounded_btree_map!(4 => 10, 5 => 10, 9 => 10) + } + ); + // withdraw the depositor, they should lose 12 balance in total due to slash. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, balance: 10 + 15, points: 30 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }); +} + +#[test] +fn pool_slash_proportional() { + // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that + // happened in era 100 should only affect the latter two. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 10, + amount: 40 + }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(22), bond, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 20, + amount: bond + }, + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 21, + amount: bond + }, + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: 22, + amount: bond + } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: bond, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: bond, joined: true }, + ] + ); + + // now let's progress a lot. + CurrentEra::::set(Some(99)); + + // and unbond + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + CurrentEra::::set(Some(100)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 21, + pool_id: 1, + balance: bond, + points: bond, + era: 128 + }] + ); + + CurrentEra::::set(Some(101)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 22, + pool_id: 1, + balance: bond, + points: bond, + era: 129 + }] + ); + + // Apply a slash that happened in era 100. This is typically applied with a delay. + // Of the total 100, 50 is slashed. + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + + // no pending slash yet. + assert_eq!(Pools::api_pool_pending_slash(1), 0); + + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + // Pools api returns correct slash amount. + assert_eq!(Pools::api_pool_pending_slash(1), 50); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // This era got slashed 12.5, which rounded up to 13. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 7 }, + // This era got slashed 12 instead of 12.5 because an earlier chunk got 0.5 more + // slashed, and 12 is all the remaining slash + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, + // Bonded pool got slashed for 25, remaining 15 in it. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } + ] + ); + + // 21's balance in the pool is slashed. + assert_eq!(PoolMembers::::get(21).unwrap().total_balance(), 7); + // But their actual balance is still unslashed. + assert_eq!(Balances::total_balance_on_hold(&21), bond); + // 21 has pending slash + assert_eq!(Pools::api_member_pending_slash(21), bond - 7); + // apply slash permissionlessly. + assert_ok!(Pools::apply_slash(RuntimeOrigin::signed(10), 21)); + // member balance is slashed. + assert_eq!(Balances::total_balance_on_hold(&21), 7); + // 21 has no pending slash anymore + assert_eq!(Pools::api_member_pending_slash(21), 0); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: 21, + amount: bond - 7 + }] + ); + + // 22 balance isn't slashed yet as well. + assert_eq!(PoolMembers::::get(22).unwrap().total_balance(), 8); + assert_eq!(Balances::total_balance_on_hold(&22), bond); + + // they try to withdraw. This should slash them. + CurrentEra::::set(Some(129)); + let pre_balance = Balances::free_balance(&22); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 0)); + // all balance should be released. + assert_eq!(Balances::total_balance_on_hold(&22), 0); + assert_eq!(Balances::free_balance(&22), pre_balance + 8); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: 22, + amount: bond - 8 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 22, amount: 8 }, + ] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_only_bonded_pool() { + // A typical example where a pool member unbonds in era 99, and they can get away with a slash + // that happened in era 100, as long as the pool has enough active bond to cover the slash. If + // everything else in the slashing/staking system works, this should always be the case. + // Nonetheless, `ledger.slash` has been written such that it will slash greedily from any chunk + // if it runs out of chunks that it thinks should be affected by the slash. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash for 30. This will be deducted only from the bonded pool. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] + ); + }); +} + +#[test] +fn pool_slash_non_proportional_bonded_pool_and_chunks() { + // An uncommon example where even though some funds are unlocked such that they should not be + // affected by a slash, we still slash out of them. This should not happen at all. If a + // nomination has unbonded, from the next era onwards, their exposure will drop, so if an era + // happens in that era, then their share of that slash should naturally be less, such that only + // their active ledger stake is enough to compensate it. + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + BondingDuration::set(28); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true }, + ] + ); + + // have two members join + let bond = 20; + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }] + ); + + // progress and unbond. + CurrentEra::::set(Some(99)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 20, + pool_id: 1, + balance: bond, + points: bond, + era: 127 + }] + ); + + // slash 50. This will be deducted only from the bonded pool and one of the unbonding pools. + CurrentEra::::set(Some(100)); + assert_eq!(BondedPools::::get(1).unwrap().points, 40); + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 50, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + // out of 20, 10 was taken. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 10 }, + // out of 40, all was taken. + PoolsEvent::PoolSlashed { pool_id: 1, balance: 0 } + ] + ); + }); +} +#[test] +fn pool_migration_e2e() { + new_test_ext().execute_with(|| { + LegacyAdapter::set(true); + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool with TransferStake strategy. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // have three members join + let pre_20 = Balances::free_balance(20); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + let pre_21 = Balances::free_balance(21); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); + let pre_22 = Balances::free_balance(22); + assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1)); + + // verify members balance is moved to pool. + assert_eq!(Balances::free_balance(20), pre_20 - 10); + assert_eq!(Balances::free_balance(21), pre_21 - 10); + assert_eq!(Balances::free_balance(22), pre_22 - 10); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true }, + PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: 10, joined: true }, + ] + ); + + CurrentEra::::set(Some(2)); + // 20 is partially unbonding + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + + CurrentEra::::set(Some(3)); + // 21 is fully unbonding + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 5, points: 5, era: 5 }, + PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 6 }, + ] + ); + + // with `TransferStake`, we can't migrate. + assert!(!Pools::api_pool_needs_delegate_migration(1)); + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + PoolsError::::NotSupported + ); + + // we reset the adapter to `DelegateStake`. + LegacyAdapter::set(false); + + // cannot migrate the member delegation unless pool is migrated first. + assert!(!Pools::api_member_needs_delegate_migration(20)); + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 20), + PoolsError::::NotMigrated + ); + + // migrate the pool. + assert!(Pools::api_pool_needs_delegate_migration(1)); + assert_ok!(Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1)); + + // migrate again does not work. + assert!(!Pools::api_pool_needs_delegate_migration(1)); + assert_noop!( + Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1), + PoolsError::::AlreadyMigrated + ); + + // unclaimed delegations to the pool are stored in this account. + let proxy_delegator_1 = + DelegatedStaking::generate_proxy_delegator(Agent::from(POOL1_BONDED)).get(); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: proxy_delegator_1, + amount: 50 + 10 * 3 + }] + ); + + // move to era 5 when 20 can withdraw unbonded funds. + CurrentEra::::set(Some(5)); + // Unbond works even without claiming delegation. Lets unbond 22. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, 5)); + + // withdraw fails for 20 before claiming delegation + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 10), + DelegatedStakingError::::NotDelegator + ); + + let pre_claim_balance_20 = Balances::total_balance(&20); + assert_eq!(Balances::total_balance_on_hold(&20), 0); + + // migrate delegation for 20. This is permissionless and can be called by anyone. + assert!(Pools::api_member_needs_delegate_migration(20)); + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 20)); + + // tokens moved to 20's account and held there. + assert_eq!(Balances::total_balance(&20), pre_claim_balance_20 + 10); + assert_eq!(Balances::total_balance_on_hold(&20), 10); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 5)); + + // balance unlocked in 20's account + assert_eq!(Balances::total_balance_on_hold(&20), 5); + assert_eq!(Balances::total_balance(&20), pre_claim_balance_20 + 10); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: 5, points: 5, era: 8 }, + PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 5, points: 5 }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 20, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 20, amount: 5 } + ] + ); + + // MIGRATE 21 + let pre_migrate_balance_21 = Balances::total_balance(&21); + assert_eq!(Balances::total_balance_on_hold(&21), 0); + + // migrate delegation for 21. + assert!(Pools::api_member_needs_delegate_migration(21)); + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 21)); + + // tokens moved to 21's account and held there. + assert_eq!(Balances::total_balance(&21), pre_migrate_balance_21 + 10); + assert_eq!(Balances::total_balance_on_hold(&21), 10); + + // withdraw fails since 21 only unbonds at era 6. + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 10), + PoolsError::::CannotWithdrawAny + ); + + // go to era when 21 can unbond + CurrentEra::::set(Some(6)); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 10)); + + // all balance unlocked in 21's account + assert_eq!(Balances::total_balance_on_hold(&21), 0); + assert_eq!(Balances::total_balance(&21), pre_migrate_balance_21 + 10); + + // MIGRATE 22 + let pre_migrate_balance_22 = Balances::total_balance(&22); + assert_eq!(Balances::total_balance_on_hold(&22), 0); + + // migrate delegation for 22. + assert!(Pools::api_member_needs_delegate_migration(22)); + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 22)); + + // cannot migrate a pool member again. + assert!(!Pools::api_member_needs_delegate_migration(22)); + assert_noop!( + Pools::migrate_delegation(RuntimeOrigin::signed(10), 22), + PoolsError::::AlreadyMigrated + ); + + // tokens moved to 22's account and held there. + assert_eq!(Balances::total_balance(&22), pre_migrate_balance_22 + 10); + assert_eq!(Balances::total_balance_on_hold(&22), 10); + + // withdraw fails since 22 only unbonds at era 8. + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 5), + PoolsError::::CannotWithdrawAny + ); + + // go to era when 22 can unbond + CurrentEra::::set(Some(10)); + + // withdraw works now + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 10)); + + // balance of 5 unlocked in 22's account + assert_eq!(Balances::total_balance_on_hold(&22), 10 - 5); + + // assert events for 21 and 22. + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } + ] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 10, points: 10 }, + // 21 was fully unbonding and removed from pool. + PoolsEvent::MemberRemoved { member: 21, pool_id: 1 }, + PoolsEvent::Withdrawn { member: 22, pool_id: 1, balance: 5, points: 5 }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 21, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 21, amount: 10 }, + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 22, + amount: 10 + }, + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: 22, amount: 5 } + ] + ); + }) +} diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..50182326359813a19a90deeec465cbeb509cc36c --- /dev/null +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -0,0 +1,409 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_election_provider_support::VoteWeight; +use frame_support::{ + assert_ok, derive_impl, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, ConstU8}, + PalletId, +}; +use frame_system::EnsureRoot; +use pallet_nomination_pools::{ + adapter::{Member, Pool, StakeStrategyType}, + BondType, +}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + BuildStorage, FixedU128, Perbill, +}; + +type AccountId = u128; +type Nonce = u32; +type BlockNumber = u64; +type Balance = u128; + +pub(crate) type T = Runtime; + +pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; +pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = Nonce; + type RuntimeCall = RuntimeCall; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +parameter_types! { + pub static ExistentialDeposit: Balance = 5; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = ConstU32<1>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = (); +} + +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static BondingDuration: u32 = 3; +} + +impl pallet_staking::Config for Runtime { + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = pallet_timestamp::Pallet; + type CurrencyToVote = (); + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type AdminOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = (); + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = (); + type MaxExposurePageSize = ConstU32<64>; + type ElectionProvider = + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = VoterList; + type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type HistoryDepth = ConstU32<84>; + type EventListeners = (Pools, DelegatedStaking); + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; +} + +parameter_types! { + pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; +} + +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type BagThresholds = BagThresholds; + type ScoreProvider = Staking; + type Score = VoteWeight; +} + +pub struct BalanceToU256; +impl Convert for BalanceToU256 { + fn convert(n: Balance) -> sp_core::U256 { + n.into() + } +} + +pub struct U256ToBalance; +impl Convert for U256ToBalance { + fn convert(n: sp_core::U256) -> Balance { + n.try_into().unwrap() + } +} + +parameter_types! { + pub const PostUnbondingPoolsWindow: u32 = 10; + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); + pub static LegacyAdapter: bool = false; +} + +pub struct MockAdapter; +type DelegateStake = + pallet_nomination_pools::adapter::DelegateStake; +type TransferStake = pallet_nomination_pools::adapter::TransferStake; +impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { + type Balance = Balance; + type AccountId = AccountId; + type CoreStaking = Staking; + + fn strategy_type() -> StakeStrategyType { + if LegacyAdapter::get() { + return TransferStake::strategy_type() + } + DelegateStake::strategy_type() + } + fn transferable_balance(pool_account: Pool) -> Self::Balance { + if LegacyAdapter::get() { + return TransferStake::transferable_balance(pool_account) + } + DelegateStake::transferable_balance(pool_account) + } + + fn total_balance(pool_account: Pool) -> Option { + if LegacyAdapter::get() { + return TransferStake::total_balance(pool_account) + } + DelegateStake::total_balance(pool_account) + } + + fn member_delegation_balance(member_account: Member) -> Option { + if LegacyAdapter::get() { + return TransferStake::member_delegation_balance(member_account) + } + DelegateStake::member_delegation_balance(member_account) + } + + fn pledge_bond( + who: Member, + pool_account: Pool, + reward_account: &Self::AccountId, + amount: Self::Balance, + bond_type: BondType, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::pledge_bond(who, pool_account, reward_account, amount, bond_type) + } + DelegateStake::pledge_bond(who, pool_account, reward_account, amount, bond_type) + } + + fn member_withdraw( + who: Member, + pool_account: Pool, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::member_withdraw(who, pool_account, amount, num_slashing_spans) + } + DelegateStake::member_withdraw(who, pool_account, amount, num_slashing_spans) + } + + fn pending_slash(pool_account: Pool) -> Self::Balance { + if LegacyAdapter::get() { + return TransferStake::pending_slash(pool_account) + } + DelegateStake::pending_slash(pool_account) + } + + fn member_slash( + who: Member, + pool_account: Pool, + amount: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::member_slash(who, pool_account, amount, maybe_reporter) + } + DelegateStake::member_slash(who, pool_account, amount, maybe_reporter) + } + + fn migrate_nominator_to_agent( + agent: Pool, + reward_account: &Self::AccountId, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::migrate_nominator_to_agent(agent, reward_account) + } + DelegateStake::migrate_nominator_to_agent(agent, reward_account) + } + + fn migrate_delegation( + agent: Pool, + delegator: Member, + value: Self::Balance, + ) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::migrate_delegation(agent, delegator, value) + } + DelegateStake::migrate_delegation(agent, delegator, value) + } +} +impl pallet_nomination_pools::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type RuntimeFreezeReason = RuntimeFreezeReason; + type RewardCounter = FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type StakeAdapter = MockAdapter; + type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; + type MaxMetadataLen = ConstU32<256>; + type MaxUnbonding = ConstU32<8>; + type MaxPointsToBalance = ConstU8<10>; + type PalletId = PoolsPalletId; + type AdminOrigin = EnsureRoot; +} + +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + VoterList: pallet_bags_list::, + Pools: pallet_nomination_pools, + DelegatedStaking: pallet_delegated_staking, + } +); + +pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let _ = pallet_nomination_pools::GenesisConfig:: { + min_join_bond: 2, + min_create_bond: 2, + max_pools: Some(3), + max_members_per_pool: Some(5), + max_members: Some(3 * 5), + global_max_commission: Some(Perbill::from_percent(90)), + } + .assimilate_storage(&mut storage) + .unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![(10, 100), (20, 100), (21, 100), (22, 100)], + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // for events to be deposited. + frame_system::Pallet::::set_block_number(1); + + // set some limit for nominations. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + pallet_staking::ConfigOp::Set(10), // minimum nominator bond + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + pallet_staking::ConfigOp::Noop, + )); + }); + + ext +} + +parameter_types! { + static ObservedEventsPools: usize = 0; + static ObservedEventsStaking: usize = 0; + static ObservedEventsBalances: usize = 0; + static ObservedEventsDelegatedStaking: usize = 0; +} + +pub(crate) fn pool_events_since_last_call() -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::Pools(inner) = e { Some(inner) } else { None }) + .collect::>(); + let already_seen = ObservedEventsPools::get(); + ObservedEventsPools::set(events.len()); + events.into_iter().skip(already_seen).collect() +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) + .collect::>(); + let already_seen = ObservedEventsStaking::get(); + ObservedEventsStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} + +pub(crate) fn delegated_staking_events_since_last_call( +) -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map( + |e| if let RuntimeEvent::DelegatedStaking(inner) = e { Some(inner) } else { None }, + ) + .collect::>(); + let already_seen = ObservedEventsDelegatedStaking::get(); + ObservedEventsDelegatedStaking::set(events.len()); + events.into_iter().skip(already_seen).collect() +} diff --git a/substrate/frame/nomination-pools/test-staking/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml similarity index 89% rename from substrate/frame/nomination-pools/test-staking/Cargo.toml rename to substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml index 130a27752bf37768d34bcbc1012f51524c2df277..5f9bc9af3a214eb3d82da11786c93bf462565a26 100644 --- a/substrate/frame/nomination-pools/test-staking/Cargo.toml +++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-nomination-pools-test-staking" +name = "pallet-nomination-pools-test-transfer-stake" version = "1.0.0" authors.workspace = true edition.workspace = true @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = { version = "2.11.1", features = ["derive"] } sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/nomination-pools/test-staking/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs similarity index 90% rename from substrate/frame/nomination-pools/test-staking/src/lib.rs rename to substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs index d84e09e32ba353267e3a2629a372bd189137454d..aa91350259000b810eb63a0036754517eeb8421d 100644 --- a/substrate/frame/nomination-pools/test-staking/src/lib.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs @@ -193,6 +193,95 @@ fn pool_lifecycle_e2e() { }) } +#[test] +fn destroy_pool_with_erroneous_consumer() { + new_test_ext().execute_with(|| { + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // expect consumers on pool account to be 2 (staking lock and an explicit inc by staking). + assert_eq!(frame_system::Pallet::::consumers(&POOL1_BONDED), 2); + + // increment consumer by 1 reproducing the erroneous consumer bug. + // refer https://github.com/paritytech/polkadot-sdk/issues/4440. + assert_ok!(frame_system::Pallet::::inc_consumers(&POOL1_BONDED)); + assert_eq!(frame_system::Pallet::::consumers(&POOL1_BONDED), 3); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + // pool goes into destroying + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying },] + ); + + // move to era 1 + CurrentEra::::set(Some(1)); + + // depositor need to chill before unbonding + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), + pallet_staking::Error::::InsufficientBond + ); + + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: 10, + pool_id: 1, + points: 50, + balance: 50, + era: 1 + 3 + }] + ); + + // waiting bonding duration: + CurrentEra::::set(Some(1 + 3)); + // this should work even with an extra consumer count on pool account. + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); + + // pools is fully destroyed now. + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + }) +} + #[test] fn pool_chill_e2e() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs similarity index 98% rename from substrate/frame/nomination-pools/test-staking/src/mock.rs rename to substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs index 93a05ddfae990108c7277c2448ff1470ae11d2ae..0970570453b469138774eeb783914990deb0938b 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs @@ -180,7 +180,7 @@ impl pallet_nomination_pools::Config for Runtime { type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type Staking = Staking; + type StakeAdapter = pallet_nomination_pools::adapter::TransferStake; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index f8efc88bafc1aa3ec4bceb9150d62268499d62b3..a59ef9334f0bc3cba6a92943a1cbbdbe4d3c94d1 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 07905a1e0aa42d2aa859046b595569e275fe778a..bbd918a2883f30afb82310424375eb3ed5b5a8e8 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-election-provider-support = { path = "../../election-provider-support", default-features = false } diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 9a3120e41eaa0ae04ef4d211868c53782e3a4cbf..1725f4158d3338c4484676fcacf22be12cd4d321 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -24,7 +24,7 @@ use crate::Config; use codec::Encode; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64}, + traits::ConstU32, weights::{constants::RocksDbWeight, Weight}, }; use sp_core::H256; @@ -88,7 +88,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 26f3d7e48ced011697194abe734f87bee84756ee..f550e694349468efcca9bacf51242d378b64ba64 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index 5d06170aae7f991ae1e0b77c530272cbd5ef14dd..e086b4ba2b27b300b63711dbc6436bb7974a2423 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,10 +20,7 @@ #![cfg(feature = "std")] use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::{ - derive_impl, - traits::{ConstU16, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU16}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -56,7 +53,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index b718b391019a701194678e819a1c823371e15231..c4d6d189d2d2c2a33a428cdcf3290c629464662b 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Acala Developers", "Parity Technologies "] edition.workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index d67fc7bead04c4dc01aca6494d823c786d69715c..d420accbd6d914816d00a57879d7187f0d0d4ead 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -12,7 +12,7 @@ description = "FRAME pallet for storing preimages of hashes" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 0a3b39e471d429f6bc5cfdec203aa085d8f6144e..fcebbb5f3e8a077c8505f333b073d384fb1646b2 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 0a6595807750388d3a99016276f69d57772bbba7..05ce76cad2bbe133a77e9c318ce8f32a57f3a659 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 43608de37fc3c08ecb9a266388c2b2b8b07d0255..2fd63597da9caf7ede188eb4a0e70a7d809bae05 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index f4e0171443aa5181e13866edf07a98e6d1b0bf8e..dde522ff89b59a647e87dd8436cb32bf1bb0274f 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] assert_matches = { version = "1.5", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index e746b0382aea105009861aa14d4a9d9c63940556..d251aacfb5b2c204a29d3098b41cba4f0317b3ca 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index f4d83c237b9cb55a81a2ab027934dd9021981d6f..e7317d737fac125e5ebbafa2be46b0426014bd6e 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } pallet-session = { path = "../session", default-features = false, features = ["historical"] } diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index bf14516ee32281558cbfba28d23f1e9f8526350a..74a3b8f479fa30df155de60d306251a7f8e7e694 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index b6b7e5a67e481a75914207410afe64a8c5b38280..7ecbdb6eeda5b16d35be7a98885ba0a027fbb89a 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index fbfc16f4aa2881b25db9b27c3be3c53973a925ab..0beb911267dc5265bdf14cd1f46afff27b422e55 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -47,7 +47,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 8c77edcb173a03ed4ed8a8c039c3e670d4bf02ce..25911269a95ddd3b51e639f56c10a647d321ba77 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index c9a70a730d42b450bec5ceafa7a3fd567f705c15..82fb9a1d8c5f1bd57073d9212003f6f35da42178 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 40a71736447076d10fd6d8f668a019581f5911f3..e851f876112e8829a7914b18f5695436040ebbb8 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 92b70e01b9ab7e37d0274b434b357eb654773e83..f25bd1f1769ba458c8ea8c88ac9d0e18ce8fde2e 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 86814f8276e7bed8625f726b17d5ea18c6cf604f..42ea957ac1581fa8434d94a0737597d82f4b74e5 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index a00fbd8f6fdf5452b1e225332efaac7f0d09adfb..a306f9015c02913da0f2ebf96a252a86f4333c51 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } @@ -28,7 +28,7 @@ sp-session = { path = "../../../primitives/session", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } scale-info = "2.11.1" frame-election-provider-support = { path = "../../election-provider-support" } pallet-balances = { path = "../../balances" } diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index df71f79a29f29338f5fef48716b5e6b55858bf6c..ed7fea523bffbd15a4da83e16f167ba239ecfca8 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = { workspace = true } rand_chacha = { version = "0.3.1", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-std = { path = "../../primitives/std", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index ec19f4303f9f3433e0ef65f48b90f57794375fd7..fde5c63c6ffba13a7550a159366c90bcf1ff5734 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } diff --git a/substrate/frame/staking/reward-curve/src/lib.rs b/substrate/frame/staking/reward-curve/src/lib.rs index 1986357edabe20cca94d0d0e26e1bee54e08a0ac..cfb8b896f9394ad06223da2097a91b620fcf30d4 100644 --- a/substrate/frame/staking/reward-curve/src/lib.rs +++ b/substrate/frame/staking/reward-curve/src/lib.rs @@ -88,7 +88,13 @@ pub fn build(input: TokenStream) -> TokenStream { let ident = syn::Ident::new(&sp_runtime, Span::call_site()); quote!( #[doc(hidden)] pub use #ident as _sp_runtime; ) }, - Err(e) => syn::Error::new(Span::call_site(), e).to_compile_error(), + Err(e) => match crate_name("polkadot-sdk") { + Ok(FoundCrate::Name(polkadot_sdk)) => { + let ident = syn::Ident::new(&polkadot_sdk, Span::call_site()); + quote!( #[doc(hidden)] pub use #ident::sp_runtime as _sp_runtime; ) + }, + _ => syn::Error::new(Span::call_site(), e).to_compile_error(), + }, }; let const_name = input.ident; diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 50a19be92da8e6be4ddaf8fe29e649ee7b59c256..19da2f24ff00e80321e20506b2fe13a0ddd71978 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../../../primitives/api" } sp-staking = { default-features = false, path = "../../../primitives/staking" } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 63d06ea988dc5f027f849bcc08274bc4f0a9fb87..ac24e744c544fc5f6f88ee1830065a9380544054 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -361,7 +361,7 @@ pub type BalanceOf = ::CurrencyBalance; type PositiveImbalanceOf = <::Currency as Currency< ::AccountId, >>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency< +pub type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; @@ -376,7 +376,7 @@ pub struct ActiveEraInfo { /// /// Start can be none if start hasn't been set for the era yet, /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. - start: Option, + pub start: Option, } /// Reward points of an era. Used to split era total payout between validators. diff --git a/substrate/frame/staking/src/migrations/single_block.rs b/substrate/frame/staking/src/migrations/single_block.rs index 4a25dbdab4f7a334332c02dbac6bc29f9c723e12..f5a774cb19202283e9cccf15626fdbaf600fa7b7 100644 --- a/substrate/frame/staking/src/migrations/single_block.rs +++ b/substrate/frame/staking/src/migrations/single_block.rs @@ -379,7 +379,7 @@ pub mod v10 { StorageVersion::::put(ObsoleteReleases::V10_0_0); log!(info, "MigrateToV10 executed successfully"); - T::DbWeight::get().reads_writes(1, 1) + T::DbWeight::get().reads_writes(1, 2) } else { log!(warn, "MigrateToV10 should be removed."); T::DbWeight::get().reads(1) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 81a3c9c4a46a3a82dcc837ae809f7e12c727eaf3..ef376813119387b71716c10816ecda06e4eafd20 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1306,11 +1306,6 @@ impl Pallet { ) -> Exposure> { EraInfo::::get_full_exposure(era, account) } - - /// Whether `who` is a virtual staker whose funds are managed by another pallet. - pub(crate) fn is_virtual_staker(who: &T::AccountId) -> bool { - VirtualStakers::::contains_key(who) - } } impl Pallet { @@ -2019,6 +2014,11 @@ impl StakingInterface for Pallet { } } + /// Whether `who` is a virtual staker whose funds are managed by another pallet. + fn is_virtual_staker(who: &T::AccountId) -> bool { + VirtualStakers::::contains_key(who) + } + fn slash_reward_fraction() -> Perbill { SlashRewardFraction::::get() } diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index f831f625957d4c495fce3b239e68faed737cb3ab..1fe608cd3358bb8d7ab113ee96d720df4998c759 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -64,7 +64,7 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::EraIndex; +use sp_staking::{EraIndex, StakingInterface}; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 689b7e232e4bd815f9ef3d82c9b9c1e9ffcb051d..0226600b2a8f2bd71478c232e7302755301259e5 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -5444,6 +5444,7 @@ mod election_data_provider { // maybe_max_len`. #[test] #[should_panic] + #[cfg(debug_assertions)] fn only_iterates_max_2_times_max_allowed_len() { ExtBuilder::default() .nominate(false) @@ -6143,6 +6144,7 @@ fn min_commission_works() { #[test] #[should_panic] +#[cfg(debug_assertions)] fn change_of_absolute_max_nominations() { use frame_election_provider_support::ElectionDataProvider; ExtBuilder::default() diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 613308c308e1f15eace901af1a192202f1aa8cad..0870989d81f15df54cec9c41a0fa408edb45af36 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index 5c54c27966cd6951309ff320d942fac1ba15e9c7..4ec649f9080d463dcf72dbb96e9a34ce49124af0 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1103,11 +1103,7 @@ mod benchmarks { mod mock { use super::*; use crate as pallet_state_trie_migration; - use frame_support::{ - derive_impl, parameter_types, - traits::{ConstU32, Hooks}, - weights::Weight, - }; + use frame_support::{derive_impl, parameter_types, traits::Hooks, weights::Weight}; use frame_system::{EnsureRoot, EnsureSigned}; use sp_core::{ storage::{ChildInfo, StateVersion}, @@ -1134,7 +1130,6 @@ mod mock { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type BlockHashCount = ConstU32<250>; type AccountData = pallet_balances::AccountData; } diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index 92bc32191ab84d41e9dd18ef5f3863d2c5502edd..989f0c330fc10b19bde46d92d853fcbdef2b463a 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index 805f46a77f2eb0c3f7433a83659972a9d7da65c7..fcbb00087e26c7b08a00227c142a2565b2c0c04b 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 9c977125673762c4fdb65699e5cfed1e256ea42b..a6c4fd6ee309ec4fc07495b59df341b854dfb1a9 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.2.2", default-features = false } serde = { features = ["alloc", "derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 53f01329d1815d280031574ca232a166e7f8148e..e812ac071b2c9a2cf2e8ef199c2e1d656b7f0494 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1249,7 +1249,6 @@ pub fn import_section(attr: TokenStream, tokens: TokenStream) -> TokenStream { /// /// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = /// frame_system::Pallet` -#[cfg(feature = "experimental")] #[proc_macro_attribute] pub fn runtime(attr: TokenStream, item: TokenStream) -> TokenStream { runtime::runtime(attr, item) diff --git a/substrate/frame/support/procedural/src/runtime/mod.rs b/substrate/frame/support/procedural/src/runtime/mod.rs index aaae579eb086638f1bfacf02105a8fcd0e3d01b5..1d4242cd122eb798d82494ef2f4d630571f2d518 100644 --- a/substrate/frame/support/procedural/src/runtime/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/mod.rs @@ -200,8 +200,6 @@ //! +----------------------+ //! ``` -#![cfg(feature = "experimental")] - pub use parse::Def; use proc_macro::TokenStream; use syn::spanned::Spanned; diff --git a/substrate/frame/support/procedural/src/runtime/parse/mod.rs b/substrate/frame/support/procedural/src/runtime/parse/mod.rs index 893cb4726e2b6016fa2e728add86990e24087c42..dd83cd0da90a2185ebc5015daadba0a9d00ac438 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/mod.rs @@ -152,8 +152,7 @@ impl Def { let mut pallets = vec![]; for item in items.iter_mut() { - let mut pallet_item = None; - let mut pallet_index = 0; + let mut pallet_index_and_item = None; let mut disable_call = false; let mut disable_unsigned = false; @@ -170,9 +169,8 @@ impl Def { runtime_types = Some(types); }, RuntimeAttr::PalletIndex(span, index) => { - pallet_index = index; - pallet_item = if let syn::Item::Type(item) = item { - Some(item.clone()) + pallet_index_and_item = if let syn::Item::Type(item) = item { + Some((index, item.clone())) } else { let msg = "Invalid runtime::pallet_index, expected type definition"; return Err(syn::Error::new(span, msg)) @@ -187,7 +185,7 @@ impl Def { } } - if let Some(pallet_item) = pallet_item { + if let Some((pallet_index, pallet_item)) = pallet_index_and_item { match *pallet_item.ty.clone() { syn::Type::Path(ref path) => { let pallet_decl = @@ -230,6 +228,11 @@ impl Def { }, _ => continue, } + } else { + if let syn::Item::Type(item) = item { + let msg = "Missing pallet index for pallet declaration. Please add `#[runtime::pallet_index(...)]`"; + return Err(syn::Error::new(item.span(), &msg)) + } } } diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index 8952cd6011ff5bb717a9e731d09525d160d95739..ea53335a88fd4565e41680e2393b9affb248c975 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -70,6 +70,8 @@ pub fn is_using_frame_crate(path: &syn::Path) -> bool { pub fn generate_access_from_frame_or_crate(def_crate: &str) -> Result { if let Some(path) = get_frame_crate_path(def_crate) { Ok(path) + } else if let Some(path) = get_sdk_crate_path(def_crate) { + Ok(path) } else { let ident = match crate_name(def_crate) { Ok(FoundCrate::Itself) => { @@ -95,6 +97,13 @@ pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream pub use #path as hidden_include; } ) + } else if let Some(path) = get_sdk_crate_path(def_crate) { + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub use #path as hidden_include; + } + ) } else { match crate_name(def_crate) { Ok(FoundCrate::Itself) => quote!(), @@ -128,6 +137,15 @@ fn get_frame_crate_path(def_crate: &str) -> Option { } } +fn get_sdk_crate_path(def_crate: &str) -> Option { + if let Ok(FoundCrate::Name(name)) = crate_name(&"polkadot-sdk") { + let path = format!("{}::{}", name, def_crate.to_string()).replace("-", "_"); + Some(syn::parse_str::(&path).expect("is a valid path; qed")) + } else { + None + } +} + // fn to remove white spaces around string types // (basically whitespaces around tokens) pub fn clean_type_string(input: &str) -> String { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 7eddea1259d7d040e57084232feb53eb2b6b1270..8ae1f56b4d686e6d6aa637aac1f6f2a0973e6157 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -508,7 +508,6 @@ pub use frame_support_procedural::{ construct_runtime, match_and_insert, transactional, PalletError, RuntimeDebugNoBound, }; -#[cfg(feature = "experimental")] pub use frame_support_procedural::runtime; #[doc(hidden)] diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 88124e0a43b9cc10438e01e44d98e9f9130ca56c..6e861ad769cf71d0c1200b2039f28217a558b335 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" serde = { features = ["derive"], workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } sp-api = { path = "../../../primitives/api", default-features = false } diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 3f52b4664b187675afd7bd13c4c96db62523a73b..37c069247e1875b547baa047c71a7895c686abe6 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 7a20c3f2730629e709eb78c650b0ca4ca0d3e964..8607339a2b054530404499c23a402c2a673e2b5a 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 554c81ab43decf222b516bf8bbd7660cab399eaf..5b97db60c00bb7c8b5b40dfa9f823fcc27b73d95 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame = { package = "polkadot-sdk-frame", path = "../../..", default-features = false, features = ["experimental", "runtime"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 96504b7ce77525331aa55abf670df9b53bb1ad76..b28cae2ddefab062666fc6ba1334deb2b02ce9a8 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -107,7 +107,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -137,7 +137,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` @@ -286,7 +286,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.12/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -317,7 +317,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.12/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` diff --git a/substrate/frame/support/test/tests/final_keys.rs b/substrate/frame/support/test/tests/final_keys.rs index a777c20a1e98f7a38c4a4cc4dead81e5565b6ebc..64f56d5200354c8957d2d10915bc49d820e1c4cb 100644 --- a/substrate/frame/support/test/tests/final_keys.rs +++ b/substrate/frame/support/test/tests/final_keys.rs @@ -19,7 +19,7 @@ use codec::Encode; use frame_support::{derive_impl, storage::unhashed, StoragePrefixedMap}; use frame_system::pallet_prelude::BlockNumberFor; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, @@ -213,7 +213,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/genesisconfig.rs b/substrate/frame/support/test/tests/genesisconfig.rs index a82425cf6befe3c014543943ffb6a72e9f00770b..0673bcfdff3ca036d7be863ff1b169e3471ddb9e 100644 --- a/substrate/frame/support/test/tests/genesisconfig.rs +++ b/substrate/frame/support/test/tests/genesisconfig.rs @@ -17,7 +17,7 @@ use frame_support::derive_impl; use frame_system::pallet_prelude::BlockNumberFor; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, @@ -83,7 +83,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/instance.rs b/substrate/frame/support/test/tests/instance.rs index 332f5725e055eb2aa9ca70dff987f021a76d41ba..30b8338bc5c7052a6a50474cd44e46a7202ed77e 100644 --- a/substrate/frame/support/test/tests/instance.rs +++ b/substrate/frame/support/test/tests/instance.rs @@ -293,7 +293,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/issue2219.rs b/substrate/frame/support/test/tests/issue2219.rs index 1542c4a6c434cec98547c533ba3f3530a5aff853..20c2773406ff111ebd03d08a89924ddf30cccdba 100644 --- a/substrate/frame/support/test/tests/issue2219.rs +++ b/substrate/frame/support/test/tests/issue2219.rs @@ -165,7 +165,6 @@ pub type Block = generic::Block; impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU64<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/origin.rs b/substrate/frame/support/test/tests/origin.rs index a25c575cc5177a4511dec8a8b24918ffde6da8d3..4f14bda184c867b54aa10246a995943835abf375 100644 --- a/substrate/frame/support/test/tests/origin.rs +++ b/substrate/frame/support/test/tests/origin.rs @@ -23,7 +23,6 @@ use frame_support::{ derive_impl, traits::{Contains, OriginTrait}, }; -use sp_core::ConstU32; use sp_runtime::{generic, traits::BlakeTwo256}; mod nested { @@ -174,7 +173,6 @@ frame_support::construct_runtime!( impl frame_system::Config for RuntimeOriginTest { type BaseCallFilter = BaseCallFilter; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index f41e606ad7c3e927e9719afccde3d5e0266509e0..c441d4c371af0926a0508025ae5473c77fb98346 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -705,7 +705,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index c79cdf93e97dc5a2f257ab9ba62a5d26e078b53f..dfe4caa476d3b9e1fa2a1024adb48eea5cdf9be9 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -308,7 +308,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs index 6c71b544426512c54a4320452204b0a4b62e7764..326f3530e26ecc75c30420f5e1f4575f381fbf08 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_explicit.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{derive_impl, traits::ConstU32}; +use frame_support::derive_impl; mod common; @@ -29,7 +29,6 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs index 79828119742c106763b97096ffaed85acf7381da..4149c4880cca29585d5d4dc45ec190f34e90d858 100644 --- a/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs +++ b/substrate/frame/support/test/tests/pallet_outer_enums_implicit.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::{derive_impl, traits::ConstU32}; +use frame_support::derive_impl; mod common; @@ -29,7 +29,6 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index e4ea094d0692e2f39167da014e3c3185d997ac08..3386632c13a2af834651d765aecfa723c1d4d5f9 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -82,7 +82,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); type DbWeight = (); diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 7c2a8139a1345a4903766970a13bbf955aeef7c8..1f4d9110a24fc4e8f18fec0f1d7e2b714f920fa4 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -27,7 +27,7 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_core::{sr25519, ConstU64}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, ValidateUnsigned, Verify}, @@ -351,7 +351,6 @@ impl frame_system::Config for Runtime { type PalletInfo = PalletInfo; type OnSetCode = (); type Block = Block; - type BlockHashCount = ConstU64<10>; } impl module1::Config for Runtime { diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 4c7012dca14979f0ed7f560bdc339146a676caa3..5b74cc172c6eb8ed057284ef9dd4c2f91fbf8d7c 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -27,7 +27,7 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_core::{sr25519, ConstU64}; +use sp_core::sr25519; use sp_runtime::{ generic, traits::{BlakeTwo256, ValidateUnsigned, Verify}, @@ -351,7 +351,6 @@ impl frame_system::Config for Runtime { type PalletInfo = PalletInfo; type OnSetCode = (); type Block = Block; - type BlockHashCount = ConstU64<10>; } impl module1::Config for Runtime { diff --git a/substrate/frame/support/test/tests/runtime_metadata.rs b/substrate/frame/support/test/tests/runtime_metadata.rs index 819ec176d2b13c3fb5c90290339ef328a4c523ff..48e4d975eb083ec38baf6521cac1891b1e5e3d82 100644 --- a/substrate/frame/support/test/tests/runtime_metadata.rs +++ b/substrate/frame/support/test/tests/runtime_metadata.rs @@ -42,7 +42,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/utils/frame/frame-utilities-cli/src/lib.rs b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs similarity index 79% rename from substrate/utils/frame/frame-utilities-cli/src/lib.rs rename to substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs index 97129e36f7e9c04664a793d75d00e8bf8a86a413..469a7833e5afce4c3e8b2afda124ce92a05df43e 100644 --- a/substrate/utils/frame/frame-utilities-cli/src/lib.rs +++ b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.rs @@ -15,8 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! frame-system CLI utilities +#[frame_support::runtime] +mod runtime { + #[runtime::runtime] + #[runtime::derive(RuntimeCall)] + pub struct Runtime; -mod pallet_id; + pub type System = frame_system; +} -pub use pallet_id::PalletIdCmd; +fn main() {} diff --git a/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr new file mode 100644 index 0000000000000000000000000000000000000000..a2cbaa48199d915cef506af495751986e0509874 --- /dev/null +++ b/substrate/frame/support/test/tests/runtime_ui/missing_pallet_index.stderr @@ -0,0 +1,5 @@ +error: Missing pallet index for pallet declaration. Please add `#[runtime::pallet_index(...)]` + --> tests/runtime_ui/missing_pallet_index.rs:24:5 + | +24 | pub type System = frame_system; + | ^^^ diff --git a/substrate/frame/support/test/tests/storage_layers.rs b/substrate/frame/support/test/tests/storage_layers.rs index caa125153e9dc597f8ca859e2e5dd6ac16618992..0e8ef668531816c736cf4edb6807b612fcfe85b5 100644 --- a/substrate/frame/support/test/tests/storage_layers.rs +++ b/substrate/frame/support/test/tests/storage_layers.rs @@ -78,7 +78,6 @@ impl frame_system::Config for Runtime { type Lookup = sp_runtime::traits::IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/substrate/frame/support/test/tests/storage_transaction.rs b/substrate/frame/support/test/tests/storage_transaction.rs index a5bbfd24ab09a8084f58ad47e342174454133ef2..7f66a43b9afd735166847e2eb9c320a220458079 100644 --- a/substrate/frame/support/test/tests/storage_transaction.rs +++ b/substrate/frame/support/test/tests/storage_transaction.rs @@ -24,7 +24,7 @@ use frame_support::{ storage::{with_transaction, TransactionOutcome::*}, transactional, }; -use sp_core::{sr25519, ConstU32}; +use sp_core::sr25519; use sp_io::TestExternalities; use sp_runtime::{ generic, @@ -91,7 +91,6 @@ frame_support::construct_runtime!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU32<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/support/test/tests/versioned_migration.rs b/substrate/frame/support/test/tests/versioned_migration.rs index e7d146940cb92f31020e8b27fe014983f50396f0..c83dd6b71de9b3c72ee8c3f9b00cd81fecb0dfde 100644 --- a/substrate/frame/support/test/tests/versioned_migration.rs +++ b/substrate/frame/support/test/tests/versioned_migration.rs @@ -27,7 +27,6 @@ use frame_support::{ weights::constants::RocksDbWeight, }; use frame_system::Config; -use sp_core::ConstU64; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -75,7 +74,6 @@ construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Block = Block; - type BlockHashCount = ConstU64<10>; type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type RuntimeEvent = RuntimeEvent; diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 346aa05415925c60eae260479339232ffc2b103f..a2a8970814b0a7c7c2c3f5ba9536371236c16bbd 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] cfg-if = "1.0" -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive", "serde"] } serde = { features = ["alloc", "derive"], workspace = true } diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index 87c5581b2a3409f511f0a8f83a7c7aeae7e7b2c6..b3029630409f6afab1a6895e8ce918b931b6a80f 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -16,10 +16,7 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{ - derive_impl, - traits::{ConstU32, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU32}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -75,7 +72,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index 473a6bb132d741cf9485868201f114234be9f5d8..022f0ffce6b5ee23168db0ccaad1da5ea767ddf3 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../../benchmarking", default-features = false } frame-support = { path = "../../support", default-features = false } diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 70e66769a8b3de7c87e2350367ea4477d10d3776..b134cc3b617308265222d9dec6669dbbacf7f566 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } sp-api = { path = "../../../../primitives/api", default-features = false } [features] diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 70d1e75633278c665f4c06ca22f02a96668ef964..5d6c68989ed53bfec6d95f80f5c9b149aa4c3063 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{limits::BlockWeights, Config, Pallet, LOG_TARGET}; +use crate::{limits::BlockWeights, Config, DispatchClass, Pallet, LOG_TARGET}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, @@ -64,17 +64,6 @@ where } } - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. - /// - /// Upon successes, it returns the new block weight as a `Result`. - fn check_block_weight( - info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::BlockWeights::get(); - let all_weight = Pallet::::block_weight(); - calculate_consumed_weight::(maximum_weight, all_weight, info) - } - /// Checks if the current extrinsic can fit into the block with respect to block length limits. /// /// Upon successes, it returns the new block length as a `Result`. @@ -113,7 +102,12 @@ where len: usize, ) -> Result<(), TransactionValidityError> { let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_block_weight(info)?; + + let all_weight = Pallet::::block_weight(); + let maximum_weight = T::BlockWeights::get(); + let next_weight = + calculate_consumed_weight::(&maximum_weight, all_weight, info)?; + check_combined_proof_size::(info, &maximum_weight, next_len, &next_weight)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -136,8 +130,41 @@ where } } +/// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. +pub fn check_combined_proof_size( + info: &DispatchInfoOf, + maximum_weight: &BlockWeights, + next_len: u32, + next_weight: &crate::ConsumedWeight, +) -> Result<(), TransactionValidityError> +where + Call: Dispatchable, +{ + // This extra check ensures that the extrinsic length does not push the + // PoV over the limit. + let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); + if total_pov_size > maximum_weight.max_block.proof_size() { + log::debug!( + target: LOG_TARGET, + "Extrinsic exceeds total pov size. Still including if mandatory. size: {}kb, limit: {}kb, is_mandatory: {}", + total_pov_size as f64/1024.0, + maximum_weight.max_block.proof_size() as f64/1024.0, + info.class == DispatchClass::Mandatory + ); + return match info.class { + // Allow mandatory extrinsics + DispatchClass::Mandatory => Ok(()), + _ => Err(InvalidTransaction::ExhaustsResources.into()), + }; + } + Ok(()) +} + +/// Checks if the current extrinsic can fit into the block with respect to block weight limits. +/// +/// Upon successes, it returns the new block weight as a `Result`. pub fn calculate_consumed_weight( - maximum_weight: BlockWeights, + maximum_weight: &BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, ) -> Result @@ -172,7 +199,7 @@ where "Exceeded the per-class allowance.", ); - return Err(InvalidTransaction::ExhaustsResources.into()) + return Err(InvalidTransaction::ExhaustsResources.into()); }, // There is no `max_total` limit (`None`), // or we are below the limit. @@ -190,7 +217,7 @@ where "Total block weight is exceeded.", ); - return Err(InvalidTransaction::ExhaustsResources.into()) + return Err(InvalidTransaction::ExhaustsResources.into()); }, // There is either no limit in reserved pool (`None`), // or we are below the limit. @@ -742,17 +769,171 @@ mod tests { // when assert_ok!(calculate_consumed_weight::<::RuntimeCall>( - maximum_weight.clone(), + &maximum_weight, all_weight.clone(), - &mandatory1 + &mandatory1, )); assert_err!( calculate_consumed_weight::<::RuntimeCall>( - maximum_weight, + &maximum_weight, all_weight, - &mandatory2 + &mandatory2, + ), + InvalidTransaction::ExhaustsResources + ); + } + + #[test] + fn maximum_proof_size_includes_length() { + let maximum_weight = BlockWeights::builder() + .base_block(Weight::zero()) + .for_class(DispatchClass::non_mandatory(), |w| { + w.base_extrinsic = Weight::zero(); + w.max_total = Some(Weight::from_parts(20, 10)); + }) + .for_class(DispatchClass::Mandatory, |w| { + w.base_extrinsic = Weight::zero(); + w.reserved = Some(Weight::from_parts(5, 10)); + w.max_total = None; + }) + .build_or_panic(); + + assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + + let info = DispatchInfo { class: DispatchClass::Normal, ..Default::default() }; + let mandatory = DispatchInfo { class: DispatchClass::Mandatory, ..Default::default() }; + // We have 10 reftime and 5 proof size left over. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 5), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + + // Simple checks for the length + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 5, + &next_weight + )); + assert_err!( + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 6, + &next_weight + ), + InvalidTransaction::ExhaustsResources + ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 6, + &next_weight + )); + + // We have 10 reftime and 0 proof size left over. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 10), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_err!( + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 1, + &next_weight ), InvalidTransaction::ExhaustsResources ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 1, + &next_weight + )); + + // We have 10 reftime and 2 proof size left over. + // Used weight is spread across dispatch classes this time. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 5), + DispatchClass::Operational => Weight::from_parts(0, 3), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 2, + &next_weight + )); + assert_err!( + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 3, + &next_weight + ), + InvalidTransaction::ExhaustsResources + ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 3, + &next_weight + )); + + // Ref time is over the limit. Should not happen, but we should make sure that it is + // ignored. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(30, 5), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 0, + &next_weight + )); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 5, + &next_weight + )); + assert_err!( + check_combined_proof_size::<::RuntimeCall>( + &info, + &maximum_weight, + 6, + &next_weight + ), + InvalidTransaction::ExhaustsResources + ); + assert_ok!(check_combined_proof_size::<::RuntimeCall>( + &mandatory, + &maximum_weight, + 6, + &next_weight + )); } } diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 30df4dcfd43e9058cb3acc1a8f9b1863bf925e11..84d00a1e917ec033d4fe534f932236db76569625 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -262,7 +262,19 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::{inject_runtime_type, DefaultConfig}; - use frame_support::derive_impl; + use frame_support::{derive_impl, traits::Get}; + + /// A predefined adapter that covers `BlockNumberFor` for `Config::Block::BlockNumber` of + /// the types `u32`, `u64`, and `u128`. + /// + /// NOTE: Avoids overriding `BlockHashCount` when using `mocking::{MockBlock, MockBlockU32, + /// MockBlockU128}`. + pub struct TestBlockHashCount>(sp_std::marker::PhantomData); + impl, C: Get> Get for TestBlockHashCount { + fn get() -> I { + C::get().into() + } + } /// Provides a viable default config that can be used with /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config @@ -300,7 +312,7 @@ pub mod pallet { #[inject_runtime_type] type RuntimeTask = (); type BaseCallFilter = frame_support::traits::Everything; - type BlockHashCount = frame_support::traits::ConstU64<10>; + type BlockHashCount = TestBlockHashCount>; type OnSetCode = (); type SingleBlockMigrations = (); type MultiBlockMigrator = (); @@ -397,7 +409,7 @@ pub mod pallet { /// Maximum number of block number to block hash mappings to keep (oldest pruned first). /// Using 256 as default. - type BlockHashCount = frame_support::traits::ConstU32<256>; + type BlockHashCount = TestBlockHashCount>; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); @@ -1780,7 +1792,7 @@ impl Pallet { "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ {} ({}%) op weight {} ({}%) / mandatory weight {} ({}%)", Self::block_number(), - Self::extrinsic_index().unwrap_or_default(), + Self::extrinsic_count(), Self::all_extrinsics_len(), sp_runtime::Percent::from_rational( Self::all_extrinsics_len(), diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index da49b29c89b78f26b3f5eee35e81b7d2dacf8bb8..93ce09611b55dc49746ad5e99fa98b21cf25ea6d 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index a2acf0638ffb0717ece5c132009b983c86ef2072..bcd54461406ead0f1bd67ab427c023d91b63fa0e 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 24e5a714f0fe3aee0a5c2d74d3d837e8b969cdb1..4f7da9ae46fabe7b4dcb92bd40eab2ea339175a8 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index fef9afdee05f37cbd2999b1bbfecb07f2601d3bd..177621d9adbd102369fe0466cf962441f7591e42 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } pallet-transaction-payment = { path = "..", default-features = false } -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } [dev-dependencies] diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index 9a2b22b817096916193db338df62269415ec7a52..0cafb35d52e1c85e3a24a72869319da712298500 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -100,7 +100,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index fc4f1aecc15ee910ef90ffd18b39173d5de096e9..a4a8efad869c84ef9fe8b905ae5c38a9ed94b614 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -28,7 +28,7 @@ pallet-transaction-payment = { path = "..", default-features = false } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index b04d4ffd9e0b794f56fd7be0219f8150f45e7078..f27fcd53fecd2d9006314c18155d9b4ac89d7eca 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -86,7 +86,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 7f5e0d0b466df117e27de498004b487488695487..2c9f814460f7cd1502779cc1645c0379611dc3d5 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } sp-api = { path = "../../../primitives/api" } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 913dccc05c496807a7698dd3445eaaba5431002a..6c0241ec5c03c4ceb757620d6ae4f528e379c74e 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } pallet-transaction-payment = { path = "../..", default-features = false } sp-api = { path = "../../../../primitives/api", default-features = false } sp-runtime = { path = "../../../../primitives/runtime", default-features = false } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml index 8a6ee09f8dd0a4d17566ef573f242e9762cf6b1e..4d32a5123cf3fc0ef1322cedca7d2c8ef65ba51a 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -22,7 +22,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } # Other dependencies -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } [features] diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index c1bb05ab5c7eb8e03c49e5f8d789276de296bba9..1ef95128f2a840a2349728d5576560067b45bb34 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -85,7 +85,6 @@ impl frame_system::Config for Runtime { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index f5a964207eab780e7ea82408a94d6fd07bf05b49..bf647ca13ec1cf355e35bc9307239979ba8c8460 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.2.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index f1e9e0591f6f521bad4fda1cb4ae4aa879efc483..73174b73dbacc7c803ad947c3108165688832767 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -23,7 +23,7 @@ use crate::{ }; use frame_support::{ derive_impl, - traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, + traits::{ConstU32, OnFinalize, OnInitialize}, }; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -44,7 +44,6 @@ impl frame_system::Config for Test { type Block = Block; type AccountData = pallet_balances::AccountData; type AccountId = u64; - type BlockHashCount = ConstU64<250>; type Lookup = IdentityLookup; } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 34037338a52bac4d18130e6a3c88b3c3a5d3dc60..c93272af11d4664495cac40984f99fbc60b23f11 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index 15c8ca5d27a71f9d4172d5d8bd31808d7badcc3b..e4e5f1940b25b67a97ef6026daa13a83cab791a4 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } frame-support = { path = "../support", default-features = false } sp-api = { path = "../../primitives/api", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 5f028179037d1214aadb460fa464b79f55c05a23..e44bb90dd7f8438673f7b5a23b1e7b1c15da03b5 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } docify = "0.2.8" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 5206023838b95290d1a153b37074dccb0b5635d9..7245fe7d5d72a68b1ff00078ea11a755f206744e 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -33,9 +33,6 @@ use sp_runtime::{ BuildStorage, }; -parameter_types! { - pub const BlockHashCount: u64 = 250; -} #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; @@ -50,7 +47,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type RuntimeEvent = RuntimeEvent; type Block = Block; - type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index ee6af191d33f31142a109ad7b11b6d2ff1104759..65b727b40b254567d46bff7704b4f124b78820f0 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 2ad575ed51ffe708b9369a81ea0a4aa5d50e600b..00e8be75a3de600eada40f33afd4af94156dd554 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index e71731e397788a956bd72a74e93b9ac10d0fd7a6..7372b84240364aff99bf4ecbe35929b219f8b50e 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } log = { workspace = true } diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index 5c28fe29142c786621eee8fc4355983e990a8981..61bbb278019de8b4c012c460ab4d836cdf8a556d 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 2f553819b1bc09b911ad3664aa555c661863a40b..f48480f398d00729a5fb10e0c9bcfba5d62f9776 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } sp-api-proc-macro = { path = "proc-macro", default-features = false } sp-core = { path = "../core", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index d90b56058648ba8653992c210586238028629689..36577670a40ce48df9ea805adb2fde5f02399599 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -40,6 +40,10 @@ pub fn generate_crate_access() -> TokenStream { let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) + } else if let Ok(FoundCrate::Name(name)) = crate_name(&"polkadot-sdk") { + let path = format!("{}::sp_api::__private", name); + let path = syn::parse_str::(&path).expect("is a valid path; qed"); + quote!( #path ) } else { let err = Error::new(Span::call_site(), e).to_compile_error(); quote!( #err ) diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index a4af08c4b896e22eca26d67472902e4b1202897c..b49f774161fd3f593872452a54e4f186a6743f6c 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -22,7 +22,7 @@ sp-tracing = { path = "../../tracing" } sp-runtime = { path = "../../runtime" } sp-consensus = { path = "../../consensus/common" } sc-block-builder = { path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } sp-state-machine = { path = "../../state-machine" } trybuild = "1.0.88" rustversion = "1.0.6" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 20e2be4d1552f4874dad7e0902ccd9c00a660976..cbb9f2133577b93234c052dba329ed1d5f453be7 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-core = { path = "../core", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 8acb1e1992c629a6de2550675cbade0068f7a60a..a9f2b80156f5ec121eea75abb389e02bbf99884e 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", "max-encoded-len", ] } diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index 88d93f40059625dd271b734c23fd635113dcbac9..72a8bb7fc47d0f76a49de45a3828424eb7a0a240 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index e716b61bfeb1668a42434aed147fa0810a1b7dca..5e51a2d06ed7a8f1de6fda7b2b19bf49fa515e51 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } futures = "0.3.30" log = { workspace = true, default-features = true } parking_lot = "0.12.1" diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index 7a09865f858d3faa4ec1ee1fd5a2beb84e9267d7..06e5b682964a4e7672575c9e8b5460d7acb2de96 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -21,14 +21,17 @@ use log::warn; use parking_lot::RwLock; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justifications, }; -use std::collections::btree_set::BTreeSet; +use std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::header_metadata::HeaderMetadata; -use crate::error::{Error, Result}; +use crate::{ + error::{Error, Result}, + tree_route, TreeRoute, +}; /// Blockchain database header backend. Does not perform any validation. pub trait HeaderBackend: Send + Sync { @@ -89,62 +92,32 @@ pub trait HeaderBackend: Send + Sync { pub trait ForkBackend: HeaderMetadata + HeaderBackend + Send + Sync { - /// Best effort to get all the header hashes that are part of the provided forks - /// starting only from the fork heads. + /// Returns block hashes for provided fork heads. It skips the fork if when blocks are missing + /// (e.g. warp-sync) and internal `tree_route` function fails. /// - /// The function tries to reconstruct the route from the fork head to the canonical chain. - /// If any of the hashes on the route can't be found in the db, the function won't be able - /// to reconstruct the route anymore. In this case it will give up expanding the current fork, - /// move on to the next ones and at the end it will return an error that also contains - /// the partially expanded forks. + /// Example: + /// G --- A1 --- A2 --- A3 --- A4 ( < fork1 ) + /// \-----C4 --- C5 ( < fork2 ) + /// We finalize A3 and call expand_fork(C5). Result = (C5,C4). fn expand_forks( &self, fork_heads: &[Block::Hash], - ) -> std::result::Result, (BTreeSet, Error)> { - let mut missing_blocks = vec![]; + ) -> std::result::Result, Error> { let mut expanded_forks = BTreeSet::new(); for fork_head in fork_heads { - let mut route_head = *fork_head; - // Insert stale blocks hashes until canonical chain is reached. - // If we reach a block that is already part of the `expanded_forks` we can stop - // processing the fork. - while expanded_forks.insert(route_head) { - match self.header_metadata(route_head) { - Ok(meta) => { - // If the parent is part of the canonical chain or there doesn't exist a - // block hash for the parent number (bug?!), we can abort adding blocks. - let parent_number = meta.number.saturating_sub(1u32.into()); - match self.hash(parent_number) { - Ok(Some(parent_hash)) => - if parent_hash == meta.parent { - break - }, - Ok(None) | Err(_) => { - missing_blocks.push(BlockId::::Number(parent_number)); - break - }, - } - - route_head = meta.parent; - }, - Err(_e) => { - missing_blocks.push(BlockId::::Hash(route_head)); - break - }, - } + match tree_route(self, *fork_head, self.info().finalized_hash) { + Ok(tree_route) => { + for block in tree_route.retracted() { + expanded_forks.insert(block.hash); + } + continue + }, + Err(_) => { + // There are cases when blocks are missing (e.g. warp-sync). + }, } } - if !missing_blocks.is_empty() { - return Err(( - expanded_forks, - Error::UnknownBlocks(format!( - "Missing stale headers {:?} while expanding forks {:?}.", - fork_heads, missing_blocks - )), - )) - } - Ok(expanded_forks) } } @@ -172,14 +145,6 @@ pub trait Backend: /// Results must be ordered best (longest, highest) chain first. fn leaves(&self) -> Result>; - /// Returns displaced leaves after the given block would be finalized. - /// - /// The returned leaves do not contain the leaves from the same height as `block_number`. - fn displaced_leaves_after_finalizing( - &self, - block_number: NumberFor, - ) -> Result>; - /// Return hashes of all blocks that are children of the block with `parent_hash`. fn children(&self, parent_hash: Block::Hash) -> Result>; @@ -255,6 +220,67 @@ pub trait Backend: } fn block_indexed_body(&self, hash: Block::Hash) -> Result>>>; + + /// Returns all leaves that will be displaced after the block finalization. + fn displaced_leaves_after_finalizing( + &self, + finalized_block_hash: Block::Hash, + finalized_block_number: NumberFor, + ) -> std::result::Result, Error> { + let mut result = DisplacedLeavesAfterFinalization::default(); + + if finalized_block_number == Zero::zero() { + return Ok(result) + } + + // For each leaf determine whether it belongs to a non-canonical branch. + for leaf_hash in self.leaves()? { + let leaf_block_header = self.expect_header(leaf_hash)?; + let leaf_number = *leaf_block_header.number(); + + let leaf_tree_route = match tree_route(self, leaf_hash, finalized_block_hash) { + Ok(tree_route) => tree_route, + Err(Error::UnknownBlock(_)) => { + // Sometimes routes can't be calculated. E.g. after warp sync. + continue; + }, + Err(e) => Err(e)?, + }; + + // Is it a stale fork? + let needs_pruning = leaf_tree_route.common_block().hash != finalized_block_hash; + + if needs_pruning { + result.displaced_leaves.insert(leaf_hash, leaf_number); + result.tree_routes.insert(leaf_hash, leaf_tree_route); + } + } + + Ok(result) + } +} + +/// Result of [`Backend::displaced_leaves_after_finalizing`]. +#[derive(Clone, Debug)] +pub struct DisplacedLeavesAfterFinalization { + /// A collection of hashes and block numbers for displaced leaves. + pub displaced_leaves: BTreeMap>, + + /// A collection of tree routes from the leaves to finalized block. + pub tree_routes: BTreeMap>, +} + +impl Default for DisplacedLeavesAfterFinalization { + fn default() -> Self { + Self { displaced_leaves: Default::default(), tree_routes: Default::default() } + } +} + +impl DisplacedLeavesAfterFinalization { + /// Returns a collection of hashes for the displaced leaves. + pub fn hashes(&self) -> impl Iterator + '_ { + self.displaced_leaves.keys().cloned() + } } /// Blockchain info diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index ccd640c0567afc6c9f9341fca754471a15a2829c..27caaae71add1a440b8a772db131329c951befbc 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -97,7 +97,7 @@ pub fn lowest_common_ancestor + ?Sized>( } /// Compute a tree-route between two blocks. See tree-route docs for more details. -pub fn tree_route>( +pub fn tree_route + ?Sized>( backend: &T, from: Block::Hash, to: Block::Hash, diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index b689c84f158c7bddc1e845449654056b67e78b6f..a54499178171dab260e953dd1082d512d48f6df0 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 799d474aebe45e26a94392899251b054cd39d84b..46c032ba61a6067bd7cfa9f786706701c7e429c3 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index c38d004cf9bc762c7c4e226babddc487df836bc5..a682939a02f95064f8fed2fe7f6be0a9ad228ede 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 390c0ff71273ad4b9993a5a3e7be0265148a8c5b..913184402aef7bf9d1ee906faea935262e177a93 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -50,9 +50,12 @@ use alloc::vec::Vec; use codec::{Codec, Decode, Encode}; use core::fmt::{Debug, Display}; use scale_info::TypeInfo; -use sp_application_crypto::{AppCrypto, AppPublic, ByteArray, RuntimeAppPublic}; +use sp_application_crypto::{AppPublic, RuntimeAppPublic}; use sp_core::H256; -use sp_runtime::traits::{Hash, Keccak256, NumberFor}; +use sp_runtime::{ + traits::{Hash, Keccak256, NumberFor}, + OpaqueValue, +}; /// Key type for BEEFY module. pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BEEFY; @@ -73,17 +76,13 @@ pub type BeefySignatureHasher = sp_runtime::traits::Keccak256; /// A trait bound which lists all traits which are required to be implemented by /// a BEEFY AuthorityId type in order to be able to be used in BEEFY Keystore pub trait AuthorityIdBound: - Codec - + Debug - + Clone - + AsRef<[u8]> - + ByteArray + Ord + AppPublic - + AppCrypto - + RuntimeAppPublic + Display - + BeefyAuthorityId + + BeefyAuthorityId { + /// Necessary bounds on the Signature associated with the AuthorityId + type BoundedSignature: Debug + Eq + PartialEq + Clone + TypeInfo + Codec + Send + Sync; } /// BEEFY cryptographic types for ECDSA crypto @@ -124,7 +123,9 @@ pub mod ecdsa_crypto { } } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// BEEFY cryptographic types for BLS crypto @@ -165,7 +166,9 @@ pub mod bls_crypto { BlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// BEEFY cryptographic types for (ECDSA,BLS) crypto pair @@ -213,7 +216,9 @@ pub mod ecdsa_bls_crypto { } } - impl AuthorityIdBound for AuthorityId {} + impl AuthorityIdBound for AuthorityId { + type BoundedSignature = Signature; + } } /// The `ConsensusEngineId` of BEEFY. @@ -399,21 +404,7 @@ impl OnNewValidatorSet for () { /// the runtime API boundary this type is unknown and as such we keep this /// opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq, TypeInfo)] -pub struct OpaqueKeyOwnershipProof(Vec); -impl OpaqueKeyOwnershipProof { - /// Create a new `OpaqueKeyOwnershipProof` using the given encoded - /// representation. - pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { - OpaqueKeyOwnershipProof(inner) - } - - /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key - /// ownership proof type. - pub fn decode(self) -> Option { - codec::Decode::decode(&mut &self.0[..]).ok() - } -} +pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// API necessary for BEEFY voters. diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 6c228383d003fbafa879a88577b37609ce9d0278..f63f5f3122f41f76640dcebb8dca592f31268577 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index 75ed81894c259ad7102b9b98bb3e6e29bea4cdcd..5320c9434041ce5c6f235719e200ee4e4cba5c7d 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -31,7 +31,7 @@ use scale_info::TypeInfo; use sp_keystore::KeystorePtr; use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, - ConsensusEngineId, RuntimeDebug, + ConsensusEngineId, OpaqueValue, RuntimeDebug, }; /// The log target to be used by client code. @@ -465,22 +465,7 @@ where /// the runtime API boundary this type is unknown and as such we keep this /// opaque representation, implementors of the runtime API will have to make /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. -#[derive(Decode, Encode, PartialEq, TypeInfo)] -pub struct OpaqueKeyOwnershipProof(Vec); - -impl OpaqueKeyOwnershipProof { - /// Create a new `OpaqueKeyOwnershipProof` using the given encoded - /// representation. - pub fn new(inner: Vec) -> OpaqueKeyOwnershipProof { - OpaqueKeyOwnershipProof(inner) - } - - /// Try to decode this `OpaqueKeyOwnershipProof` into the given concrete key - /// ownership proof type. - pub fn decode(self) -> Option { - codec::Decode::decode(&mut &self.0[..]).ok() - } -} +pub type OpaqueKeyOwnershipProof = OpaqueValue; sp_api::decl_runtime_apis! { /// APIs for integrating the GRANDPA finality gadget into runtimes. diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 7a884f865fbeea0d6e7c0fd6a6699e0a5b513769..0700e2c4f8b9f1595ca820ab0cc3fe5e963dec91 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } sp-api = { path = "../../api", default-features = false } sp-core = { path = "../../core", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 50348054da0118ca52137a5f1c62040e6dd86bea..c8eb9b76b93b0e65de11aa65116353d1e429cfb7 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,7 +18,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +scale-codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index a8b12900617960f953726d1b4eac5bf585b1f6bf..dd519eab46475fb16f4768de2fab6138c5a059b4 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index b7f3a999765a96050b008bc5f38669bdcb2e50e1..f931faf8bd043406d2d30943a594d94ad745ae05 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { workspace = true } serde = { optional = true, features = ["alloc", "derive"], workspace = true } diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 20fa3e3e397c23632a67ca86cecf1a593e48a0e2..3a0d0315e9178a0e54ba001064fb6feb65c1b731 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } environmental = { version = "1.1.3", default-features = false } sp-storage = { path = "../storage", default-features = false } diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 96e99553294120698ae69d70e9da609326fc3489..4fc8a0416fbe5f2ffdbd021dd42a3d072c61b241 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index c08ac459de530e55d0064561fe7171e811840c8f..c63aca801a0d7e513963b06d2ce4375f17194d3d 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { optional = true, workspace = true } diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index dddea4ffa232541023c91d354aa6fb2013332a98..abb16d163da060ae1d152276189157b77fb6970d 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } sp-keystore = { path = "../keystore", default-features = false, optional = true } diff --git a/substrate/primitives/keystore/Cargo.toml b/substrate/primitives/keystore/Cargo.toml index 3f1a71b62ac15a50de964cbc66f5df243f4b5a6d..313b9e1c0059cb86915ca6bfe60ddff21b2c2ffb 100644 --- a/substrate/primitives/keystore/Cargo.toml +++ b/substrate/primitives/keystore/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } parking_lot = { version = "0.12.1", default-features = false, optional = true } sp-core = { path = "../core", default-features = false } sp-externalities = { path = "../externalities", default-features = false } diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index d8610ecfa5b60d8891b6690e00ce6efb55342983..1403e4745ff11120606295ef3c69cbb116843e8b 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -516,7 +516,7 @@ mod tests { let suri = "//Alice"; let pair = ecdsa_bls377::Pair::from_string(suri, None).unwrap(); - let msg = b"this should be a normal unhashed message not "; + let msg = b"this should be a normal unhashed message not a hash of a message because bls scheme comes with its own hashing"; // insert key, sign again store.insert(ECDSA_BLS377, suri, pair.public().as_ref()).unwrap(); diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index b97cef138ed4a2c83ff921bc0cef702c97070d49..7b043355c723b0627b574284782a0252e0b536dc 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -15,10 +15,10 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } +mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/src/lib.rs b/substrate/primitives/merkle-mountain-range/src/lib.rs index c76d66bb08ea700bd6bcc1272d56310bda51881f..3740047e027829eb471a61244b460cdd58fe8cd5 100644 --- a/substrate/primitives/merkle-mountain-range/src/lib.rs +++ b/substrate/primitives/merkle-mountain-range/src/lib.rs @@ -352,15 +352,29 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); /// An MMR proof data for a group of leaves. #[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct Proof { +pub struct LeafProof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, /// Number of leaves in MMR, when the proof was generated. pub leaf_count: NodeIndex, - /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + /// Proof elements (hashes of siblings of inner nodes on the path to the leafs). pub items: Vec, } +/// An MMR ancestry proof for a prior mmr root. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] +pub struct AncestryProof { + /// Peaks of the ancestor's mmr + pub prev_peaks: Vec, + /// Number of leaves in the ancestor's MMR. + pub prev_leaf_count: u64, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: NodeIndex, + /// Proof elements + /// (positions and hashes of siblings of inner nodes on the path to the previous peaks). + pub items: Vec<(u64, Hash)>, +} + /// Merkle Mountain Range operation error. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq, TypeInfo)] @@ -437,14 +451,14 @@ sp_api::decl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option - ) -> Result<(Vec, Proof), Error>; + ) -> Result<(Vec, LeafProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// /// Note this function will use on-chain MMR root hash and check if the proof matches the hash. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof(leaves: Vec, proof: Proof) -> Result<(), Error>; + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [LeafProof] + fn verify_proof(leaves: Vec, proof: LeafProof) -> Result<(), Error>; /// Verify MMR proof against given root hash for a batch of leaves. /// @@ -452,8 +466,8 @@ sp_api::decl_runtime_apis! { /// proof is verified against given MMR root hash. /// /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof_stateless(root: Hash, leaves: Vec, proof: Proof) + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [LeafProof] + fn verify_proof_stateless(root: Hash, leaves: Vec, proof: LeafProof) -> Result<(), Error>; } } @@ -472,12 +486,12 @@ mod tests { type Test = DataOrHash; type TestCompact = Compact; - type TestProof = Proof<::Output>; + type TestProof = LeafProof<::Output>; #[test] fn should_encode_decode_proof() { // given - let proof: TestProof = Proof { + let proof: TestProof = LeafProof { leaf_indices: vec![5], leaf_count: 10, items: vec![ diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index ca8408d0ad97c0c5b880b8b60b59d693e7652b2f..90ecd1dfb13df810abbfd0ff460fa798a58e0033 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 166609ad922c96aaa62cde3bc42156e125635686..ef32503000d95b2c3c2026c2bb8aeb1eedc573cc 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { default-features = false, path = "../api" } sp-application-crypto = { default-features = false, path = "../application-crypto" } diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index afa59af64d6d8599a9835f448f9a2031831dc694..2da74429a4813e8fd3d3834682a49acce5f950f3 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index b4fab17eeb7c1872404cbc9d03bc775be6736fe9..f853a532515bb2d2406fe00e82e5803d4e7a056e 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -23,7 +23,7 @@ sp-std = { path = "../std", default-features = false } sp-tracing = { path = "../tracing", default-features = false } sp-runtime-interface-proc-macro = { path = "proc-macro" } sp-externalities = { path = "../externalities", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.12.0", default-features = false } sp-storage = { path = "../storage", default-features = false } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 0389c9f5b2f4e2181fd4eebb239e5664c15c9c0f..4d298b7ce5e3df4169945092d6ca53e8b616a11a 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive", "max-encoded-len"] } either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 8f6c0c6f650daa36f18f132e3fc0fa13eaa76621..df1f5645f04827873fdee3e984f851d40b836c25 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -57,7 +57,7 @@ type UncheckedSignaturePayload = (Address, Signature, /// could in principle be any other interaction. Transactions are either signed or unsigned. A /// sensible transaction pool should ensure that only transactions that are worthwhile are /// considered for block-building. -#[cfg_attr(feature = "std", doc = simple_mermaid::mermaid!("../../docs/mermaid/extrinsics.mmd"))] +#[cfg_attr(all(feature = "std", not(windows)), doc = simple_mermaid::mermaid!("../../docs/mermaid/extrinsics.mmd"))] /// This type is by no means enforced within Substrate, but given its genericness, it is highly /// likely that for most use-cases it will suffice. Thus, the encoding of this type will dictate /// exactly what bytes should be sent to a runtime to transact with it. diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index e4e6b98ff77cf786ce9fb37958bdaeff9a36c76c..046909b9a38d732a80f5178d228090fe10939c8d 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -1009,6 +1009,21 @@ pub enum ExtrinsicInclusionMode { OnlyInherents, } +/// Simple blob that hold a value in an encoded form without committing to its type. +#[derive(Decode, Encode, PartialEq, TypeInfo)] +pub struct OpaqueValue(Vec); +impl OpaqueValue { + /// Create a new `OpaqueValue` using the given encoded representation. + pub fn new(inner: Vec) -> OpaqueValue { + OpaqueValue(inner) + } + + /// Try to decode this `OpaqueValue` into the given concrete type. + pub fn decode(&self) -> Option { + Decode::decode(&mut &self.0[..]).ok() + } +} + #[cfg(test)] mod tests { use crate::traits::BlakeTwo256; diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 5314ccd6d965c44de5371a4fbc0888821e725236..9355ab420107147883b545467b31fcbf95be6191 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 8be375fda0e565936bd509ed945aea58590aa0b0..0d54b57f20f5875f209b1f88a18cb02c26cac363 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { features = ["alloc", "derive"], optional = true, workspace = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 7a61c452b6e741d28175622c9d9b823134650c03..e7bb03bb6eecfc095d01df44de8b31cc19b45a07 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -377,6 +377,13 @@ pub trait StakingInterface { Self::status(who).map(|s| matches!(s, StakerStatus::Validator)).unwrap_or(false) } + /// Checks whether the staker is a virtual account. + /// + /// A virtual staker is an account whose locks are not managed by the [`StakingInterface`] + /// implementation but by an external pallet. See [`StakingUnchecked::virtual_bond`] for more + /// details. + fn is_virtual_staker(who: &Self::AccountId) -> bool; + /// Get the nominations of a stash, if they are a nominator, `None` otherwise. fn nominations(who: &Self::AccountId) -> Option> { match Self::status(who) { @@ -548,4 +555,160 @@ pub struct PagedExposureMetadata { pub page_count: Page, } +/// A type that belongs only in the context of an `Agent`. +/// +/// `Agent` is someone that manages delegated funds from [`Delegator`] accounts. It can +/// then use these funds to participate in the staking system. It can never use its own funds to +/// stake. They instead (virtually bond)[`StakingUnchecked::virtual_bond`] into the staking system +/// and are also called `Virtual Stakers`. +/// +/// The `Agent` is also responsible for managing rewards and slashing for all the `Delegators` that +/// have delegated funds to it. +#[derive(Clone, Debug)] +pub struct Agent(T); +impl From for Agent { + fn from(acc: T) -> Self { + Agent(acc) + } +} + +impl Agent { + pub fn get(self) -> T { + self.0 + } +} + +/// A type that belongs only in the context of a `Delegator`. +/// +/// `Delegator` is someone that delegates funds to an `Agent`, allowing them to pool funds +/// along with other delegators and participate in the staking system. +#[derive(Clone, Debug)] +pub struct Delegator(T); +impl From for Delegator { + fn from(acc: T) -> Self { + Delegator(acc) + } +} + +impl Delegator { + pub fn get(self) -> T { + self.0 + } +} + +/// Trait to provide delegation functionality for stakers. +pub trait DelegationInterface { + /// Balance type used by the staking system. + type Balance: Sub + + Ord + + PartialEq + + Default + + Copy + + MaxEncodedLen + + FullCodec + + TypeInfo + + Saturating; + + /// AccountId type used by the staking system. + type AccountId: Clone + core::fmt::Debug; + + /// Returns effective balance of the `Agent` account. `None` if not an `Agent`. + /// + /// This takes into account any pending slashes to `Agent` against the delegated balance. + fn agent_balance(agent: Agent) -> Option; + + /// Returns the total amount of funds delegated. `None` if not a `Delegator`. + fn delegator_balance(delegator: Delegator) -> Option; + + /// Delegate funds to `Agent`. + /// + /// Only used for the initial delegation. Use [`Self::delegate_extra`] to add more delegation. + fn delegate( + delegator: Delegator, + agent: Agent, + reward_account: &Self::AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + /// Add more delegation to the `Agent`. + /// + /// If this is the first delegation, use [`Self::delegate`] instead. + fn delegate_extra( + delegator: Delegator, + agent: Agent, + amount: Self::Balance, + ) -> DispatchResult; + + /// Withdraw or revoke delegation to `Agent`. + /// + /// If there are `Agent` funds upto `amount` available to withdraw, then those funds would + /// be released to the `delegator` + fn withdraw_delegation( + delegator: Delegator, + agent: Agent, + amount: Self::Balance, + num_slashing_spans: u32, + ) -> DispatchResult; + + /// Returns pending slashes posted to the `Agent` account. None if not an `Agent`. + /// + /// Slashes to `Agent` account are not immediate and are applied lazily. Since `Agent` + /// has an unbounded number of delegators, immediate slashing is not possible. + fn pending_slash(agent: Agent) -> Option; + + /// Apply a pending slash to an `Agent` by slashing `value` from `delegator`. + /// + /// A reporter may be provided (if one exists) in order for the implementor to reward them, + /// if applicable. + fn delegator_slash( + agent: Agent, + delegator: Delegator, + value: Self::Balance, + maybe_reporter: Option, + ) -> DispatchResult; +} + +/// Trait to provide functionality for direct stakers to migrate to delegation agents. +/// See [`DelegationInterface`] for more details on delegation. +pub trait DelegationMigrator { + /// Balance type used by the staking system. + type Balance: Sub + + Ord + + PartialEq + + Default + + Copy + + MaxEncodedLen + + FullCodec + + TypeInfo + + Saturating; + + /// AccountId type used by the staking system. + type AccountId: Clone + core::fmt::Debug; + + /// Migrate an existing `Nominator` to `Agent` account. + /// + /// The implementation should ensure the `Nominator` account funds are moved to an escrow + /// from which `Agents` can later release funds to its `Delegators`. + fn migrate_nominator_to_agent( + agent: Agent, + reward_account: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate `value` of delegation to `delegator` from a migrating agent. + /// + /// When a direct `Nominator` migrates to `Agent`, the funds are kept in escrow. This function + /// allows the `Agent` to release the funds to the `delegator`. + fn migrate_delegation( + agent: Agent, + delegator: Delegator, + value: Self::Balance, + ) -> DispatchResult; + + /// Drop the `Agent` account and its associated delegators. + /// + /// Also removed from [`StakingUnchecked`] as a Virtual Staker. Useful for testing. + #[cfg(feature = "runtime-benchmarks")] + fn drop_agent(agent: Agent); +} + sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index e00ff5c27dddd6590ba59c30d84a2d9688b8e2a9..c383a17cb006e3c4433172aedde957465fbb569f 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } parking_lot = { version = "0.12.1", optional = true } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index b36bff69a007c669724950ca94f2cebc859890ed..bb893b25dc443f617ab5c0dd3d0d22c94646b689 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", default-features = false } sp-crypto-hashing = { path = "../crypto/hashing", default-features = false } diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index acedc8d0004970fc10f5771ba4b4e2cb87a55198..c3318943d0d481a430da46502cdcb3a3c8aa22a7 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" serde = { features = ["alloc", "derive"], optional = true, workspace = true } diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 0513555431561a4adb12f5e2b373859df1272af2..b7be614860910eb63f12eadaa3676f69b0e65ada 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 5a1d4fcc985c520eede135b59d8fe5311fd8feac..c1bf9b3255eab83dea9eb9f80b93aa5440e816e1 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index ce30302d4bb0a7166ceb820e72bdc1de0b31cbc3..8adec1670dc2da99c315f58dbc37a92a3cf947f5 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -21,7 +21,7 @@ features = ["with-tracing"] targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] -codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = [ +codec = { version = "3.6.12", package = "parity-scale-codec", default-features = false, features = [ "derive", ] } tracing = { version = "0.1.29", default-features = false } diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 6cce469d3f915786ef93ba0ee0e170a94d40fbeb..1e874c3595acd4723e34c0cb8568256ae2444317 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.79", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-core = { path = "../core", optional = true, default-features = false } sp-inherents = { path = "../inherents", default-features = false } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 29c3c787087a735b9d0f082e17d68d5c108c1260..45459c180d40d076b0d7b59232e9dc76cd0b2d27 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -22,7 +22,7 @@ harness = false [dependencies] ahash = { version = "0.8.2", optional = true } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } hash-db = { version = "0.16.0", default-features = false } lazy_static = { version = "1.4.0", optional = true } memory-db = { version = "0.32.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index d686b0c75512dac9931c46ffb41c4e4a11522718..f8ef8f66c5355634dbb4a7e1f08adbdb2f035766 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index f7abf88c9a67b7e3102d44254b520a9bbeb7a8a8..3abd5c0910694cf14a8885ecebf4cc855e9e8266 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } proc-macro2 = "1.0.56" quote = { workspace = true } syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index 15a20fab5e5d83aa4635a38c43738a3007847481..a0c8342d2d3c5014527487faf187fc48fd5344e1 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { optional = true, workspace = true, default-features = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index e73d4a702b42e4ac5b0cd0a37f1c15eb70bec5e2..d2d72a7cb019fa5ad5c8fd66a32e953f9076068f 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bounded-collections = { version = "0.2.0", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index c4f876710005533dfe644bb6b2ee3cf736a7994e..87c595c66f3484cbbaa8d8a6d83ea09453ee99b9 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] substrate-rpc-client = { path = "../../utils/frame/rpc/client" } sp-rpc = { path = "../../primitives/rpc" } assert_cmd = "2.0.10" -nix = { version = "0.27.1", features = ["signal"] } +nix = { version = "0.28.0", features = ["signal"] } regex = "1.7.3" tokio = { version = "1.22.0", features = ["full"] } node-primitives = { path = "../../bin/node/primitives" } diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index a5f000057de781823a1ea278093a09f47cd9a62e..5871f1bf5b4d05ad35f0998e7b4ab9fb3b953d58 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.2.2" async-trait = "0.1.79" -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 1568ee500bdccc2ec08dfeb14f6ce20d03cfeb01..8733ff9fcebb36199c341cd6d90a96623212944c 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -21,7 +21,7 @@ sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../primitives/block-builder", default-features = false } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-inherents = { path = "../../primitives/inherents", default-features = false } sp-keyring = { path = "../../primitives/keyring", default-features = false } @@ -37,6 +37,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat pallet-babe = { path = "../../frame/babe", default-features = false } pallet-balances = { path = "../../frame/balances", default-features = false } frame-executive = { path = "../../frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../frame/metadata-hash-extension", default-features = false } frame-system = { path = "../../frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } pallet-timestamp = { path = "../../frame/timestamp", default-features = false } @@ -67,7 +68,7 @@ serde = { features = ["alloc", "derive"], workspace = true } serde_json = { features = ["alloc"], workspace = true } [build-dependencies] -substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } +substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true, features = ["metadata-hash"] } [features] default = ["std"] @@ -76,6 +77,7 @@ std = [ "array-bytes", "codec/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", @@ -112,5 +114,6 @@ std = [ "substrate-wasm-builder", "trie-db/std", ] + # Special feature to disable logging disable-logging = ["sp-api/disable-logging"] diff --git a/substrate/test-utils/runtime/build.rs b/substrate/test-utils/runtime/build.rs index dd79ce2c5ae842ad7fc1c759a14fa8dedd200a1a..d38173fcfcb4eff6debb4b23c1341d334025d7ed 100644 --- a/substrate/test-utils/runtime/build.rs +++ b/substrate/test-utils/runtime/build.rs @@ -25,6 +25,7 @@ fn main() { // to this value by default. This is because some of our tests // (`restoration_of_globals`) depend on the stack-size. .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") + .enable_metadata_hash("TOKEN", 10) .import_memory() .build(); } diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index e355e5d099ad5a01c74512f79d722fdf968af02e..5ae0d8f8f6eca2517b3a2c4db6fc8f4501744511 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -22,10 +22,11 @@ use crate::{ CheckSubstrateCall, Extrinsic, Nonce, Pair, RuntimeCall, SignedPayload, TransferData, }; use codec::Encode; +use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionPriority, Perbill}; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. #[derive(Clone)] @@ -81,17 +82,23 @@ pub struct ExtrinsicBuilder { function: RuntimeCall, signer: Option, nonce: Option, + metadata_hash: Option<[u8; 32]>, } impl ExtrinsicBuilder { /// Create builder for given `RuntimeCall`. By default `Extrinsic` will be signed by `Alice`. pub fn new(function: impl Into) -> Self { - Self { function: function.into(), signer: Some(AccountKeyring::Alice.pair()), nonce: None } + Self { + function: function.into(), + signer: Some(AccountKeyring::Alice.pair()), + nonce: None, + metadata_hash: None, + } } /// Create builder for given `RuntimeCall`. `Extrinsic` will be unsigned. pub fn new_unsigned(function: impl Into) -> Self { - Self { function: function.into(), signer: None, nonce: None } + Self { function: function.into(), signer: None, nonce: None, metadata_hash: None } } /// Create builder for `pallet_call::bench_transfer` from given `TransferData`. @@ -105,6 +112,7 @@ impl ExtrinsicBuilder { Self { nonce: Some(transfer.nonce), signer: Some(transfer.from.clone()), + metadata_hash: None, ..Self::new(BalancesCall::transfer_allow_death { dest: transfer.to, value: transfer.amount, @@ -186,6 +194,12 @@ impl ExtrinsicBuilder { self } + /// Metadata hash to put into the signed data of the extrinsic. + pub fn metadata_hash(mut self, metadata_hash: [u8; 32]) -> Self { + self.metadata_hash = Some(metadata_hash); + self + } + /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { @@ -193,9 +207,15 @@ impl ExtrinsicBuilder { CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new(), CheckSubstrateCall {}, + self.metadata_hash + .map(CheckMetadataHash::new_with_custom_hash) + .unwrap_or_else(|| CheckMetadataHash::new(false)), + ); + let raw_payload = SignedPayload::from_raw( + self.function.clone(), + extra.clone(), + extra.additional_signed().unwrap(), ); - let raw_payload = - SignedPayload::from_raw(self.function.clone(), extra.clone(), ((), (), ())); let signature = raw_payload.using_encoded(|e| signer.sign(e)); Extrinsic::new_signed(self.function, signer.public(), signature, extra) diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 370aa0034fcd10db18007050c8b0701810655ada..ab87db0e70065e0a9ab55bbe92bfa36e2238043f 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -149,7 +149,12 @@ pub type Signature = sr25519::Signature; pub type Pair = sp_core::sr25519::Pair; /// The SignedExtension to the basic transaction logic. -pub type SignedExtra = (CheckNonce, CheckWeight, CheckSubstrateCall); +pub type SignedExtra = ( + CheckNonce, + CheckWeight, + CheckSubstrateCall, + frame_metadata_hash_extension::CheckMetadataHash, +); /// The payload being signed in transactions. pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. @@ -494,14 +499,14 @@ impl_runtime_apis! { impl sp_api::Metadata for Runtime { fn metadata() -> OpaqueMetadata { - unimplemented!() + OpaqueMetadata::new(Runtime::metadata().into()) } - fn metadata_at_version(_version: u32) -> Option { - unimplemented!() + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) } fn metadata_versions() -> alloc::vec::Vec { - unimplemented!() + Runtime::metadata_versions() } } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index 9b52706c739915690c7f734c399a3071b30a3630..360e2b7b810d1f40d4ac4884eedb60266e41cc5c 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } futures = "0.3.30" parking_lot = "0.12.1" thiserror = { workspace = true } diff --git a/substrate/utils/fork-tree/Cargo.toml b/substrate/utils/fork-tree/Cargo.toml index 87135ef2afb884249fd4dfb2683e453c2328180d..275f44623bd1455cb34caa544fbfa4a6e140111b 100644 --- a/substrate/utils/fork-tree/Cargo.toml +++ b/substrate/utils/fork-tree/Cargo.toml @@ -17,4 +17,4 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.12", features = ["derive"] } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index fa270759c912c91564669db5523059f80cff36c4..7cfacdc2e5edea443de8742fbe23cf9245872f08 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.2.2" chrono = "0.4" clap = { version = "4.5.3", features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } comfy-table = { version = "7.1.0", default-features = false } handlebars = "5.1.0" Inflector = "0.11.4" diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md index 648908010ba04cf6b19cf2aa50c0ea68491a1e28..cee095fb8cadd4fab0cc34c53bc8f0f7df6fc830 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md @@ -108,7 +108,7 @@ The complete command for Polkadot looks like this: cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` -This will overwrite the the +This will overwrite the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml deleted file mode 100644 index 3952c9fd219f495270c811f0f849eb4178a96a92..0000000000000000000000000000000000000000 --- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "substrate-frame-cli" -version = "32.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "cli interface for FRAME" -documentation = "https://docs.rs/substrate-frame-cli" -readme = "README.md" - -[lints] -workspace = true - -[dependencies] -clap = { version = "4.5.3", features = ["derive"] } -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -sc-cli = { path = "../../../client/cli" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } - -[features] -default = [] diff --git a/substrate/utils/frame/frame-utilities-cli/README.md b/substrate/utils/frame/frame-utilities-cli/README.md deleted file mode 100644 index 54467a3ad7704451f4064f8334ea374b8c7f6bca..0000000000000000000000000000000000000000 --- a/substrate/utils/frame/frame-utilities-cli/README.md +++ /dev/null @@ -1,3 +0,0 @@ -frame-system CLI utilities - -License: Apache-2.0 diff --git a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs b/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs deleted file mode 100644 index abc0cdb3ff52b355596e4b1cd25c67ee0508d951..0000000000000000000000000000000000000000 --- a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ /dev/null @@ -1,88 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of the `palletid` subcommand - -use clap::Parser; -use frame_support::PalletId; -use sc_cli::{ - utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, - OutputTypeFlag, -}; -use sp_core::crypto::{unwrap_or_default_ss58_version, Ss58AddressFormat, Ss58Codec}; -use sp_runtime::traits::AccountIdConversion; - -/// The `palletid` command -#[derive(Debug, Parser)] -#[command(name = "palletid", about = "Inspect a module ID address")] -pub struct PalletIdCmd { - /// The module ID used to derive the account - id: String, - - /// network address format - #[arg( - long, - value_name = "NETWORK", - value_parser = sc_cli::parse_ss58_address_format, - ignore_case = true, - )] - pub network: Option, - - #[allow(missing_docs)] - #[command(flatten)] - pub output_scheme: OutputTypeFlag, - - #[allow(missing_docs)] - #[command(flatten)] - pub crypto_scheme: CryptoSchemeFlag, - - #[allow(missing_docs)] - #[command(flatten)] - pub keystore_params: KeystoreParams, -} - -impl PalletIdCmd { - /// runs the command - pub fn run(&self) -> Result<(), Error> - where - R: frame_system::Config, - R::AccountId: Ss58Codec, - { - if self.id.len() != 8 { - return Err("a module id must be a string of 8 characters".into()) - } - let password = self.keystore_params.read_password()?; - - let id_fixed_array: [u8; 8] = self.id.as_bytes().try_into().map_err(|_| { - "Cannot convert argument to palletid: argument should be 8-character string" - })?; - - let account_id: R::AccountId = PalletId(id_fixed_array).into_account_truncating(); - - with_crypto_scheme!( - self.crypto_scheme.scheme, - print_from_uri( - &account_id.to_ss58check_with_version(unwrap_or_default_ss58_version(self.network)), - password, - self.network, - self.output_scheme.output_type - ) - ); - - Ok(()) - } -} diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 82b019154832aee4202d60d3a1a2b9015b7b774a..2911d5eef65902af9a0bec6a679a89db9eb6fb7a 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["http-client"] } -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } log = { workspace = true, default-features = true } serde = { workspace = true, default-features = true } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 58cb901470c17df20f877ed96930b9be05f7d88d..0ecb98f31343aa050d9daa8f33445e858631813d 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -36,7 +36,7 @@ use sp_core::{ }, }; use sp_runtime::{ - traits::{Block as BlockT, Hash, HashingFor}, + traits::{Block as BlockT, HashingFor}, StateVersion, }; use sp_state_machine::TestExternalities; @@ -58,37 +58,39 @@ type ChildKeyValues = Vec<(ChildInfo, Vec)>; type SnapshotVersion = Compact; const LOG_TARGET: &str = "remote-ext"; -const DEFAULT_HTTP_ENDPOINT: &str = "https://rpc.polkadot.io:443"; -const SNAPSHOT_VERSION: SnapshotVersion = Compact(3); +const DEFAULT_HTTP_ENDPOINT: &str = "https://polkadot-try-runtime-node.parity-chains.parity.io:443"; +const SNAPSHOT_VERSION: SnapshotVersion = Compact(4); /// The snapshot that we store on disk. #[derive(Decode, Encode)] -struct Snapshot { +struct Snapshot { snapshot_version: SnapshotVersion, state_version: StateVersion, - block_hash: H, // > raw_storage: Vec<(Vec, (Vec, i32))>, - storage_root: H, + // The storage root of the state. This may vary from the storage root in the header, if not the + // entire state was fetched. + storage_root: B::Hash, + header: B::Header, } -impl Snapshot { +impl Snapshot { pub fn new( state_version: StateVersion, - block_hash: H, raw_storage: Vec<(Vec, (Vec, i32))>, - storage_root: H, + storage_root: B::Hash, + header: B::Header, ) -> Self { Self { snapshot_version: SNAPSHOT_VERSION, state_version, - block_hash, raw_storage, storage_root, + header, } } - fn load(path: &PathBuf) -> Result, &'static str> { + fn load(path: &PathBuf) -> Result, &'static str> { let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; // The first item in the SCALE encoded struct bytes is the snapshot version. We decode and // check that first, before proceeding to decode the rest of the snapshot. @@ -105,21 +107,21 @@ impl Snapshot { /// An externalities that acts exactly the same as [`sp_io::TestExternalities`] but has a few extra /// bits and pieces to it, and can be loaded remotely. -pub struct RemoteExternalities { +pub struct RemoteExternalities { /// The inner externalities. - pub inner_ext: TestExternalities, - /// The block hash with which we created this externality env. - pub block_hash: H::Out, + pub inner_ext: TestExternalities>, + /// The block header which we created this externality env. + pub header: B::Header, } -impl Deref for RemoteExternalities { - type Target = TestExternalities; +impl Deref for RemoteExternalities { + type Target = TestExternalities>; fn deref(&self) -> &Self::Target { &self.inner_ext } } -impl DerefMut for RemoteExternalities { +impl DerefMut for RemoteExternalities { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner_ext } @@ -832,34 +834,55 @@ where ) -> Result, &'static str> { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); - let get_child_keys_closure = || { - #[allow(deprecated)] - substrate_rpc_client::ChildStateApi::storage_keys( - client, - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix.clone(), - Some(at), - ) - }; - let child_keys = - Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let mut all_child_keys = Vec::new(); + let mut start_key = None; + + loop { + let get_child_keys_closure = || { + let top_key = PrefixedStorageKey::new(prefixed_top_key.0.clone()); + substrate_rpc_client::ChildStateApi::storage_keys_paged( + client, + top_key, + Some(child_prefix.clone()), + Self::DEFAULT_KEY_DOWNLOAD_PAGE, + start_key.clone(), + Some(at), + ) + }; + + let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; + + let keys_count = child_keys.len(); + if keys_count == 0 { + break; + } + + start_key = child_keys.last().cloned(); + all_child_keys.extend(child_keys); + + if keys_count < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { + break; + } + } debug!( target: LOG_TARGET, "[thread = {:?}] scraped {} child-keys of the child-bearing top key: {}", std::thread::current().id(), - child_keys.len(), + all_child_keys.len(), HexDisplay::from(prefixed_top_key) ); - Ok(child_keys) + Ok(all_child_keys) } } -impl Builder +impl Builder where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, @@ -1030,6 +1053,21 @@ where Ok(()) } + async fn load_header(&self) -> Result { + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); + let get_header_closure = || { + ChainApi::<(), _, B::Header, ()>::header( + self.as_online().rpc_client(), + Some(self.as_online().at_expected()), + ) + }; + Retry::spawn(retry_strategy, get_header_closure) + .await + .map_err(|_| "Failed to fetch header for block from network")? + .ok_or("Network returned None block header") + } + /// Load the data from a remote server. The main code path is calling into `load_top_remote` and /// `load_child_remote`. /// @@ -1058,13 +1096,11 @@ where // If we need to save a snapshot, save the raw storage and root hash to the snapshot. if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) { let (raw_storage, storage_root) = pending_ext.into_raw_snapshot(); - let snapshot = Snapshot::::new( + let snapshot = Snapshot::::new( state_version, - self.as_online() - .at - .expect("set to `Some` in `init_remote_client`; must be called before; qed"), raw_storage.clone(), storage_root, + self.load_header().await?, ); let encoded = snapshot.encode(); log::info!( @@ -1086,22 +1122,21 @@ where Ok(pending_ext) } - async fn do_load_remote(&mut self) -> Result>, &'static str> { + async fn do_load_remote(&mut self) -> Result, &'static str> { self.init_remote_client().await?; - let block_hash = self.as_online().at_expected(); let inner_ext = self.load_remote_and_maybe_save().await?; - Ok(RemoteExternalities { block_hash, inner_ext }) + Ok(RemoteExternalities { header: self.load_header().await?, inner_ext }) } fn do_load_offline( &mut self, config: OfflineConfig, - ) -> Result>, &'static str> { + ) -> Result, &'static str> { let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into()); let start = Instant::now(); info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); - let Snapshot { snapshot_version: _, block_hash, state_version, raw_storage, storage_root } = - Snapshot::::load(&config.state_snapshot.path)?; + let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } = + Snapshot::::load(&config.state_snapshot.path)?; let inner_ext = TestExternalities::from_raw_snapshot( raw_storage, @@ -1110,12 +1145,10 @@ where ); sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32())); - Ok(RemoteExternalities { inner_ext, block_hash }) + Ok(RemoteExternalities { inner_ext, header }) } - pub(crate) async fn pre_build( - mut self, - ) -> Result>, &'static str> { + pub(crate) async fn pre_build(mut self) -> Result, &'static str> { let mut ext = match self.mode.clone() { Mode::Offline(config) => self.do_load_offline(config)?, Mode::Online(_) => self.do_load_remote().await?, @@ -1154,7 +1187,7 @@ where } // Public methods -impl Builder +impl Builder where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, @@ -1191,7 +1224,7 @@ where self } - pub async fn build(self) -> Result>, &'static str> { + pub async fn build(self) -> Result, &'static str> { let mut ext = self.pre_build().await?; ext.commit_all().unwrap(); @@ -1226,7 +1259,7 @@ mod tests { init_logger(); Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .build() .await @@ -1241,7 +1274,7 @@ mod tests { // get the first key from the snapshot file. let some_key = Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .build() .await @@ -1255,7 +1288,7 @@ mod tests { Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .blacklist_hashed_key(&some_key) .build() @@ -1341,7 +1374,7 @@ mod remote_tests { .await .unwrap(); - assert_eq!(ext.block_hash, cached_ext.block_hash); + assert_eq!(ext.header.hash(), cached_ext.header.hash()); } #[tokio::test] diff --git a/substrate/utils/frame/remote-externalities/test_data/proxy_test b/substrate/utils/frame/remote-externalities/test_data/proxy_test deleted file mode 100644 index f0b1b4f5af40bc8a159c9ee250bee7849cababae..0000000000000000000000000000000000000000 Binary files a/substrate/utils/frame/remote-externalities/test_data/proxy_test and /dev/null differ diff --git a/substrate/utils/frame/remote-externalities/test_data/test.snap b/substrate/utils/frame/remote-externalities/test_data/test.snap new file mode 100644 index 0000000000000000000000000000000000000000..28f2012d0f2a1b6a63ee8825deaf70257625279a Binary files /dev/null and b/substrate/utils/frame/remote-externalities/test_data/test.snap differ diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 3673b2790c524117f2984fab60bdd2b53395f648..ee3bf5eb68d716548a4a738f80254cfe811c651f 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false } serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../../../../primitives/core" } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 84db06da7b0d26fa52f066dc7ef9f6d879bcc34c..bf566f909ecb712d8b00a26717739243adb06129 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } serde = { workspace = true, default-features = true } frame-support = { path = "../../../../frame/support" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 3e623daa14bbcd7f3c80ae9039ddedb885f46601..6829d753ed71327dea8ad4e65d91af67c14285f1 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } +codec = { package = "parity-scale-codec", version = "3.6.12" } jsonrpsee = { version = "0.22.5", features = ["client-core", "macros", "server-core"] } futures = "0.3.30" log = { workspace = true, default-features = true } diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index bac323e2e6a0905cf1d956589235c196ef2881a4..090955494f0a7572a08407c2aed26919863b3837 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -27,3 +27,34 @@ filetime = "0.2.16" wasm-opt = "0.116" parity-wasm = "0.45" polkavm-linker = { workspace = true } + +# Dependencies required for the `metadata-hash` feature. +merkleized-metadata = { version = "0.1.0", optional = true } +sc-executor = { path = "../../client/executor", optional = true } +sp-core = { path = "../../primitives/core", optional = true } +sp-io = { path = "../../primitives/io", optional = true } +sp-version = { path = "../../primitives/version", optional = true } +frame-metadata = { version = "16.0.0", features = ["current"], optional = true } +codec = { package = "parity-scale-codec", version = "3.1.5", optional = true } +array-bytes = { version = "6.1", optional = true } +sp-tracing = { path = "../../primitives/tracing", optional = true } + +[features] +# Enable support for generating the metadata hash. +# +# To generate the metadata hash the runtime is build once, executed to build the metadata and then +# build a second time with the `RUNTIME_METADATA_HASH` environment variable set. The environment +# variable then contains the hash and can be used inside the runtime. +# +# This pulls in quite a lot of dependencies and thus, is disabled by default. +metadata-hash = [ + "array-bytes", + "codec", + "frame-metadata", + "merkleized-metadata", + "sc-executor", + "sp-core", + "sp-io", + "sp-tracing", + "sp-version", +] diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index 163703fbec628327c0ba3dc0683972285af71e30..37c6c4aa74319321122dabb6d3da7295d753732e 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -23,6 +23,13 @@ use std::{ use crate::RuntimeTarget; +/// Extra information when generating the `metadata-hash`. +#[cfg(feature = "metadata-hash")] +pub(crate) struct MetadataExtraInfo { + pub decimals: u8, + pub token_symbol: String, +} + /// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. fn get_manifest_dir() -> PathBuf { env::var("CARGO_MANIFEST_DIR") @@ -53,6 +60,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, } } @@ -71,6 +80,8 @@ impl WasmBuilderSelectProject { disable_runtime_version_section_check: false, export_heap_base: false, import_memory: false, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: None, }) } else { Err("Project path must point to the `Cargo.toml` of the project") @@ -108,6 +119,10 @@ pub struct WasmBuilder { export_heap_base: bool, /// Whether `--import-memory` should be added to the link args (WASM-only). import_memory: bool, + + /// Whether to enable the metadata hash generation. + #[cfg(feature = "metadata-hash")] + enable_metadata_hash: Option, } impl WasmBuilder { @@ -191,6 +206,22 @@ impl WasmBuilder { self } + /// Enable generation of the metadata hash. + /// + /// This will compile the runtime once, fetch the metadata, build the metadata hash and + /// then compile again with the env `RUNTIME_METADATA_HASH` set. For more information + /// about the metadata hash see [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). + /// + /// - `token_symbol`: The symbol of the main native token of the chain. + /// - `decimals`: The number of decimals of the main native token. + #[cfg(feature = "metadata-hash")] + pub fn enable_metadata_hash(mut self, token_symbol: impl Into, decimals: u8) -> Self { + self.enable_metadata_hash = + Some(MetadataExtraInfo { token_symbol: token_symbol.into(), decimals }); + + self + } + /// Disable the check for the `runtime_version` wasm section. /// /// By default the `wasm-builder` will ensure that the `runtime_version` section will @@ -237,6 +268,8 @@ impl WasmBuilder { self.features_to_enable, self.file_name, !self.disable_runtime_version_section_check, + #[cfg(feature = "metadata-hash")] + self.enable_metadata_hash, ); // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't @@ -311,6 +344,7 @@ fn build_project( features_to_enable: Vec, wasm_binary_name: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) { let cargo_cmd = match crate::prerequisites::check(target) { Ok(cmd) => cmd, @@ -328,6 +362,8 @@ fn build_project( features_to_enable, wasm_binary_name, check_for_runtime_version_section, + #[cfg(feature = "metadata-hash")] + enable_metadata_hash, ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 9ebab38b9cb2f727594e310f1e6d27c221f528dc..07de4c15831b842e6ac559ebf445901744d7d98f 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -116,6 +116,8 @@ use std::{ use version::Version; mod builder; +#[cfg(feature = "metadata-hash")] +mod metadata_hash; mod prerequisites; mod version; mod wasm_project; @@ -238,7 +240,7 @@ fn get_rustup_command(target: RuntimeTarget) -> Option { } /// Wraps a specific command which represents a cargo invocation. -#[derive(Debug)] +#[derive(Debug, Clone)] struct CargoCommand { program: String, args: Vec, @@ -350,6 +352,7 @@ impl CargoCommand { } /// Wraps a [`CargoCommand`] and the version of `rustc` the cargo command uses. +#[derive(Clone)] struct CargoCommandVersioned { command: CargoCommand, version: String, diff --git a/substrate/utils/wasm-builder/src/metadata_hash.rs b/substrate/utils/wasm-builder/src/metadata_hash.rs new file mode 100644 index 0000000000000000000000000000000000000000..1003f2d18eafdab2c619f260c10ba5e30e0d589b --- /dev/null +++ b/substrate/utils/wasm-builder/src/metadata_hash.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::builder::MetadataExtraInfo; +use codec::{Decode, Encode}; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; +use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; +use sc_executor::WasmExecutor; +use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode}; +use std::path::Path; + +/// The host functions that we provide when calling into the wasm file. +/// +/// Any other host function will return an error. +type HostFunctions = ( + // The allocator functions. + sp_io::allocator::HostFunctions, + // Logging is good to have for debugging issues. + sp_io::logging::HostFunctions, + // Give access to the "state", actually the state will be empty, but some chains put constants + // into the state and this would panic at metadata generation. Thus, we give them an empty + // state to not panic. + sp_io::storage::HostFunctions, + // The hashing functions. + sp_io::hashing::HostFunctions, +); + +/// Generate the metadata hash. +/// +/// The metadata hash is generated as specced in +/// [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). +/// +/// Returns the metadata hash. +pub fn generate_metadata_hash(wasm: &Path, extra_info: MetadataExtraInfo) -> [u8; 32] { + sp_tracing::try_init_simple(); + + let wasm = std::fs::read(wasm).expect("Wasm file was just created and should be readable."); + + let executor = WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(); + + let runtime_code = RuntimeCode { + code_fetcher: &WrappedRuntimeCode(wasm.into()), + heap_pages: None, + // The hash is only used for caching and thus, not that important for our use case here. + hash: vec![1, 2, 3], + }; + + let metadata = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Metadata_metadata_at_version", + &15u32.encode(), + CallContext::Offchain, + ) + .0 + .expect("`Metadata::metadata_at_version` should exist."); + + let metadata = Option::>::decode(&mut &metadata[..]) + .ok() + .flatten() + .expect("Metadata V15 support is required."); + + let metadata = RuntimeMetadataPrefixed::decode(&mut &metadata[..]) + .expect("Invalid encoded metadata?") + .1; + + let runtime_version = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Core_version", + &[], + CallContext::Offchain, + ) + .0 + .expect("`Core_version` should exist."); + let runtime_version = sp_version::RuntimeVersion::decode(&mut &runtime_version[..]) + .expect("Invalid `RuntimeVersion` encoding"); + + let base58_prefix = extract_ss58_prefix(&metadata); + + let extra_info = ExtraInfo { + spec_version: runtime_version.spec_version, + spec_name: runtime_version.spec_name.into(), + base58_prefix, + decimals: extra_info.decimals, + token_symbol: extra_info.token_symbol, + }; + + generate_metadata_digest(&metadata, extra_info) + .expect("Failed to generate the metadata digest") + .hash() +} + +/// Extract the `SS58` from the constants in the given `metadata`. +fn extract_ss58_prefix(metadata: &RuntimeMetadata) -> u16 { + let RuntimeMetadata::V15(ref metadata) = metadata else { + panic!("Metadata version 15 required") + }; + + let system = metadata + .pallets + .iter() + .find(|p| p.name == "System") + .expect("Each FRAME runtime has the `System` pallet; qed"); + + system + .constants + .iter() + .find_map(|c| { + (c.name == "SS58Prefix") + .then(|| u16::decode(&mut &c.value[..]).expect("SS58 is an `u16`; qed")) + }) + .expect("`SS58PREFIX` exists in the `System` constants; qed") +} diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index b58e6bfa36b478b91995789a148b042d167d9f3f..ff6c8e38a332121d6e4c7fcdfa6fe4128867ac5a 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "metadata-hash")] +use crate::builder::MetadataExtraInfo; use crate::{write_file_if_changed, CargoCommandVersioned, RuntimeTarget, OFFLINE}; use build_helper::rerun_if_changed; @@ -113,57 +115,103 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { /// The path to the compact runtime binary and the bloaty runtime binary. pub(crate) fn create_and_compile( target: RuntimeTarget, - project_cargo_toml: &Path, + orig_project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, features_to_enable: Vec, - bloaty_blob_out_name_override: Option, + blob_out_name_override: Option, check_for_runtime_version_section: bool, + #[cfg(feature = "metadata-hash")] enable_metadata_hash: Option, ) -> (Option, WasmBinaryBloaty) { let runtime_workspace_root = get_wasm_workspace_root(); let runtime_workspace = runtime_workspace_root.join(target.build_subdirectory()); - let crate_metadata = crate_metadata(project_cargo_toml); + let crate_metadata = crate_metadata(orig_project_cargo_toml); let project = create_project( target, - project_cargo_toml, + orig_project_cargo_toml, &runtime_workspace, &crate_metadata, crate_metadata.workspace_root.as_ref(), features_to_enable, ); + let wasm_project_cargo_toml = project.join("Cargo.toml"); let build_config = BuildConfiguration::detect(target, &project); - // Build the bloaty runtime blob - let raw_blob_path = build_bloaty_blob( - target, - &build_config.blob_build_profile, - &project, - default_rustflags, - cargo_cmd, - ); + #[cfg(feature = "metadata-hash")] + let raw_blob_path = match enable_metadata_hash { + Some(extra_info) => { + // When the metadata hash is enabled we need to build the runtime twice. + let raw_blob_path = build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd.clone(), + None, + ); - let (final_blob_binary, bloaty_blob_binary) = match target { - RuntimeTarget::Wasm => compile_wasm( - project_cargo_toml, + let hash = crate::metadata_hash::generate_metadata_hash(&raw_blob_path, extra_info); + + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + Some(hash), + ) + }, + None => build_bloaty_blob( + target, + &build_config.blob_build_profile, &project, - bloaty_blob_out_name_override, - check_for_runtime_version_section, - &build_config, + default_rustflags, + cargo_cmd, + None, ), + }; + + // If the feature is not enabled, we only need to do it once. + #[cfg(not(feature = "metadata-hash"))] + let raw_blob_path = { + build_bloaty_blob( + target, + &build_config.blob_build_profile, + &project, + default_rustflags, + cargo_cmd, + ) + }; + + let blob_name = + blob_out_name_override.unwrap_or_else(|| get_blob_name(target, &wasm_project_cargo_toml)); + + let (final_blob_binary, bloaty_blob_binary) = match target { + RuntimeTarget::Wasm => { + let out_path = project.join(format!("{blob_name}.wasm")); + fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); + + maybe_compact_and_compress_wasm( + &wasm_project_cargo_toml, + &project, + WasmBinaryBloaty(out_path), + &blob_name, + check_for_runtime_version_section, + &build_config, + ) + }, RuntimeTarget::Riscv => { - let out_name = bloaty_blob_out_name_override - .unwrap_or_else(|| get_blob_name(target, project_cargo_toml)); - let out_path = project.join(format!("{out_name}.polkavm")); + let out_path = project.join(format!("{blob_name}.polkavm")); fs::copy(raw_blob_path, &out_path).expect("copying the runtime blob should never fail"); (None, WasmBinaryBloaty(out_path)) }, }; generate_rerun_if_changed_instructions( - project_cargo_toml, + orig_project_cargo_toml, &project, &runtime_workspace, final_blob_binary.as_ref(), @@ -177,25 +225,14 @@ pub(crate) fn create_and_compile( (final_blob_binary, bloaty_blob_binary) } -fn compile_wasm( - project_cargo_toml: &Path, +fn maybe_compact_and_compress_wasm( + wasm_project_cargo_toml: &Path, project: &Path, - bloaty_blob_out_name_override: Option, + bloaty_blob_binary: WasmBinaryBloaty, + blob_name: &str, check_for_runtime_version_section: bool, build_config: &BuildConfiguration, ) -> (Option, WasmBinaryBloaty) { - // Get the name of the bloaty runtime blob. - let bloaty_blob_default_name = get_blob_name(RuntimeTarget::Wasm, project_cargo_toml); - let bloaty_blob_out_name = - bloaty_blob_out_name_override.unwrap_or_else(|| bloaty_blob_default_name.clone()); - - let bloaty_blob_binary = copy_bloaty_blob( - &project, - &build_config.blob_build_profile, - &bloaty_blob_default_name, - &bloaty_blob_out_name, - ); - // Try to compact and compress the bloaty blob, if the *outer* profile wants it. // // This is because, by default the inner profile will be set to `Release` even when the outer @@ -203,15 +240,9 @@ fn compile_wasm( // development activities. let (compact_blob_path, compact_compressed_blob_path) = if build_config.outer_build_profile.wants_compact() { - let compact_blob_path = compact_wasm( - &project, - &build_config.blob_build_profile, - project_cargo_toml, - &bloaty_blob_out_name, - ); - let compact_compressed_blob_path = compact_blob_path - .as_ref() - .and_then(|p| try_compress_blob(&p.0, &bloaty_blob_out_name)); + let compact_blob_path = compact_wasm(&project, blob_name, &bloaty_blob_binary); + let compact_compressed_blob_path = + compact_blob_path.as_ref().and_then(|p| try_compress_blob(&p.0, blob_name)); (compact_blob_path, compact_compressed_blob_path) } else { (None, None) @@ -221,15 +252,12 @@ fn compile_wasm( ensure_runtime_version_wasm_section_exists(bloaty_blob_binary.bloaty_path()); } - compact_blob_path - .as_ref() - .map(|wasm_binary| copy_blob_to_target_directory(project_cargo_toml, wasm_binary)); + let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); - compact_compressed_blob_path.as_ref().map(|wasm_binary_compressed| { - copy_blob_to_target_directory(project_cargo_toml, wasm_binary_compressed) - }); + final_blob_binary + .as_ref() + .map(|binary| copy_blob_to_target_directory(wasm_project_cargo_toml, binary)); - let final_blob_binary = compact_compressed_blob_path.or(compact_blob_path); (final_blob_binary, bloaty_blob_binary) } @@ -347,12 +375,25 @@ fn get_crate_name(cargo_manifest: &Path) -> String { .expect("Package name exists; qed") } +/// Extract the `lib.name` from the given `Cargo.toml`. +fn get_lib_name(cargo_manifest: &Path) -> Option { + let cargo_toml: Table = toml::from_str( + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); + + let lib = cargo_toml.get("lib").and_then(|t| t.as_table())?; + + lib.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned) +} + /// Returns the name for the blob binary. fn get_blob_name(target: RuntimeTarget, cargo_manifest: &Path) -> String { - let crate_name = get_crate_name(cargo_manifest); match target { - RuntimeTarget::Wasm => crate_name.replace('-', "_"), - RuntimeTarget::Riscv => crate_name, + RuntimeTarget::Wasm => get_lib_name(cargo_manifest) + .expect("The wasm project should have a `lib.name`; qed") + .replace('-', "_"), + RuntimeTarget::Riscv => get_crate_name(cargo_manifest), } } @@ -379,7 +420,6 @@ fn create_project_cargo_toml( workspace_root_path: &Path, crate_name: &str, crate_path: &Path, - wasm_binary: &str, enabled_features: impl Iterator, ) { let mut workspace_toml: Table = toml::from_str( @@ -443,7 +483,7 @@ fn create_project_cargo_toml( if target == RuntimeTarget::Wasm { let mut lib = Table::new(); - lib.insert("name".into(), wasm_binary.into()); + lib.insert("name".into(), crate_name.replace("-", "_").into()); lib.insert("crate-type".into(), vec!["cdylib".to_string()].into()); wasm_workspace_toml.insert("lib".into(), lib.into()); } @@ -588,7 +628,6 @@ fn create_project( ) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); - let wasm_binary = get_blob_name(target, project_cargo_toml); let wasm_project_folder = wasm_workspace.join(&crate_name); fs::create_dir_all(wasm_project_folder.join("src")) @@ -610,7 +649,6 @@ fn create_project( workspace_root_path, &crate_name, crate_path, - &wasm_binary, enabled_features.into_iter(), ); @@ -775,12 +813,15 @@ fn offline_build() -> bool { } /// Build the project and create the bloaty runtime blob. +/// +/// Returns the path to the generated bloaty runtime blob. fn build_bloaty_blob( target: RuntimeTarget, blob_build_profile: &Profile, project: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, + #[cfg(feature = "metadata-hash")] metadata_hash: Option<[u8; 32]>, ) -> PathBuf { let manifest_path = project.join("Cargo.toml"); let mut build_cmd = cargo_cmd.command(); @@ -820,6 +861,11 @@ fn build_bloaty_blob( // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, ""); + #[cfg(feature = "metadata-hash")] + if let Some(hash) = metadata_hash { + build_cmd.env("RUNTIME_METADATA_HASH", array_bytes::bytes2hex("0x", &hash)); + } + if super::color_output_enabled() { build_cmd.arg("--color=always"); } @@ -908,23 +954,16 @@ fn build_bloaty_blob( fn compact_wasm( project: &Path, - inner_profile: &Profile, - cargo_manifest: &Path, - out_name: &str, + blob_name: &str, + bloaty_binary: &WasmBinaryBloaty, ) -> Option { - let default_out_name = get_blob_name(RuntimeTarget::Wasm, cargo_manifest); - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", default_out_name)); - - let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name)); + let wasm_compact_path = project.join(format!("{blob_name}.compact.wasm")); let start = std::time::Instant::now(); wasm_opt::OptimizationOptions::new_opt_level_0() .mvp_features_only() .debug_info(true) .add_pass(wasm_opt::Pass::StripDwarf) - .run(&in_path, &wasm_compact_path) + .run(bloaty_binary.bloaty_path(), &wasm_compact_path) .expect("Failed to compact generated WASM binary."); println!( "{} {}", @@ -934,22 +973,6 @@ fn compact_wasm( Some(WasmBinary(wasm_compact_path)) } -fn copy_bloaty_blob( - project: &Path, - inner_profile: &Profile, - in_name: &str, - out_name: &str, -) -> WasmBinaryBloaty { - let in_path = project - .join("target/wasm32-unknown-unknown") - .join(inner_profile.directory()) - .join(format!("{}.wasm", in_name)); - - let bloaty_path = project.join(format!("{}.wasm", out_name)); - fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); - WasmBinaryBloaty(bloaty_path) -} - fn try_compress_blob(compact_blob_path: &Path, out_name: &str) -> Option { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; diff --git a/templates/minimal/.dockerignore b/templates/minimal/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..da6a8f2620d64f3761669047dfbda397b685493c --- /dev/null +++ b/templates/minimal/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml index 6cd28c5a49364a911c9b93fa1269456cf07527d5..ca00cb84284524fc7d43d3b95f9cf4bb795ec506 100644 --- a/templates/minimal/Cargo.toml +++ b/templates/minimal/Cargo.toml @@ -2,16 +2,13 @@ name = "minimal-template" description = "A minimal template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [dependencies] minimal-template-node = { path = "./node" } minimal-template-runtime = { path = "./runtime" } diff --git a/templates/minimal/Dockerfile b/templates/minimal/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..0c59192208fe66961613f7adaec16284078b9d9f --- /dev/null +++ b/templates/minimal/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/minimal-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/minimal-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/minimal-template-node"] diff --git a/templates/minimal/LICENSE b/templates/minimal/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/templates/minimal/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/templates/minimal/README.md b/templates/minimal/README.md index 0541e393db93bd9a67ddfaefe208c7ef22627f44..3488bc43cc902e8cd89f06bcfd419cfb211c9ec7 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -1,13 +1,95 @@ -# Minimal Template +
-This is a minimal template for creating a blockchain using the Polkadot SDK. +# Polkadot SDK's Minimal Template -# Docs +Polkadot SDK Logo +Polkadot SDK Logo -You can generate and view the [Rust -Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template -with this command: +> This is a minimal template for creating a blockchain based on Polkadot SDK. +> +> This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). + +
+ +🤏 This template is a minimal (in terms of complexity and the number of components) template for building a blockchain node. + +🔧 It's runtime is configured of a single custom pallet as a staring point, and a handful of ready-made pallets such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html). + +👤 The template has no consensus configured - it is best for experimenting with a single node network. + +## Template Structure + +A Polkadot SDK based project such as this one consists of: + +- 💿 a [Node](./node/README.md) - the binary application. +- 🧮 the [Runtime](./runtime/README.md) - the core logic of the blockchain. +- 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed. + +## Getting Started + +🦀 The template is using the Rust language. + +👉 Check the +[Rust installation instructions](https://www.rust-lang.org/tools/install) for your system. + +🛠️ Depending on your operating system and Rust version, there might be additional +packages required to compile this template - please take note of the Rust compiler output. + +### Build + +🔨 Use the following command to build the node without launching it: ```sh -cargo doc -p minimal-template --open +cargo build --release ``` + +🐳 Alternatively, build the docker image: + +```sh +docker build . -t polkadot-sdk-minimal-template +``` + +### Single-Node Development Chain + +👤 The following command starts a single-node development chain: + +```sh +./target/release/minimal-template-node --dev + +# docker version: +docker run --rm polkadot-sdk-minimal-template --dev +``` + +Development chains: + +- 🧹 Do not persist the state. +- 💰 Are preconfigured with a genesis state that includes several prefunded development accounts. +- 🧑‍⚖️ Development accounts are used as `sudo` accounts. + +### Connect with the Polkadot-JS Apps Front-End + +🌐 You can interact with your local node using the +hosted version of the [Polkadot/Substrate +Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944). + +🪐 A hosted version is also +available on [IPFS](https://dotapps.io/). + +🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the +[`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. + +## Contributing + +🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk). + +➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/minimal). + +😇 Please refer to the monorepo's [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md). + +## Getting Help + +🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point. + +🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are the Polkadot SDK documentation resources. + +👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and [Substrate StackExchange](https://substrate.stackexchange.com/). diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 606fd05803562abe7dc85b35c6a4c0b7c15a320c..d07c7b6dd9b5ebaf71d4c2cfaac8de2053a1463a 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -2,7 +2,7 @@ name = "minimal-template-node" description = "A minimal Substrate-based Substrate node, ready for hacking." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true @@ -10,9 +10,6 @@ edition.workspace = true publish = false build = "build.rs" -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/minimal/node/README.md b/templates/minimal/node/README.md new file mode 100644 index 0000000000000000000000000000000000000000..04a916f5053b495ff6f1f5ff7320807c8565bd6c --- /dev/null +++ b/templates/minimal/node/README.md @@ -0,0 +1,14 @@ +# Node + +ℹ️ A node - in Polkadot - is a binary executable, whose primary purpose is to execute the [runtime](../runtime/README.md). + +🔗 It communicates with other nodes in the network, and aims for [consensus](https://wiki.polkadot.network/docs/learn-consensus) among them. + +⚙️ It acts as a remote procedure call (RPC) server, allowing interaction with the blockchain. + +👉 Learn more about the architecture, and a difference between a node and a runtime [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/wasm_meta_protocol/index.html). + +👇 Here are the most important files in this node template: + +- [`chain_spec.rs`](./src/chain_spec.rs): A chain specification is a source code file that defines the chain's initial (genesis) state. +- [`service.rs`](./src/service.rs): This file defines the node implementation. It's a place to configure consensus-related topics. In favor of minimalism, this template has no consensus configured. diff --git a/templates/minimal/node/src/chain_spec.rs b/templates/minimal/node/src/chain_spec.rs index 6b721deb6d1df402d811ba40cd6c848c01a83754..7a3475bb167334e05f570ad4b46e8d471ec5a9a8 100644 --- a/templates/minimal/node/src/chain_spec.rs +++ b/templates/minimal/node/src/chain_spec.rs @@ -21,7 +21,7 @@ use serde_json::{json, Value}; use sp_keyring::AccountKeyring; /// This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec<()>; +pub type ChainSpec = sc_service::GenericChainSpec; fn props() -> Properties { let mut properties = Properties::new(); diff --git a/templates/minimal/node/src/cli.rs b/templates/minimal/node/src/cli.rs index e464fa7d6caa36ce41b45ae79a3883713ef50aca..22726b7eb9a36c0254c2f8423086e5206b730570 100644 --- a/templates/minimal/node/src/cli.rs +++ b/templates/minimal/node/src/cli.rs @@ -32,7 +32,7 @@ impl std::str::FromStr for Consensus { } else if let Some(block_time) = s.strip_prefix("manual-seal-") { Consensus::ManualSeal(block_time.parse().map_err(|_| "invalid block time")?) } else { - return Err("incorrect consensus identifier".into()) + return Err("incorrect consensus identifier".into()); }) } } diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index d84df95dc1924edf8077783aed4f2ce80a803f14..5a92627621bfcba71667ed198786a8f5e5aa9b5a 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -61,7 +61,7 @@ pub fn new_partial(config: &Configuration) -> Result { }) .transpose()?; - let executor = sc_service::new_wasm_executor(&config); + let executor = sc_service::new_wasm_executor(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( diff --git a/templates/minimal/pallets/README.md b/templates/minimal/pallets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..26003638e9acbf3a45d6c0e676a78d2c5b4bde38 --- /dev/null +++ b/templates/minimal/pallets/README.md @@ -0,0 +1,9 @@ +# Pallets + +ℹ️ A pallet is a unit of encapsulated logic, with a clearly defined responsibility. A pallet is analogous to a module in the runtime. + +💁 In this template, there is a simple custom pallet based on the FRAME framework. + +👉 Learn more about FRAME [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). + +🧑‍🏫 Please refer to [this guide](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/your_first_pallet/index.html) to learn how to write a basic pallet. diff --git a/templates/minimal/pallets/template/Cargo.toml b/templates/minimal/pallets/template/Cargo.toml index 909ba03445483e53fe108a096256d0df5d1cdc40..f0abe3c6942de634e603f939aaa5ad8c19bcd20d 100644 --- a/templates/minimal/pallets/template/Cargo.toml +++ b/templates/minimal/pallets/template/Cargo.toml @@ -2,21 +2,18 @@ name = "pallet-minimal-template" description = "A minimal pallet built with FRAME, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", features = [ "derive", ], default-features = false } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/minimal/runtime/Cargo.toml b/templates/minimal/runtime/Cargo.toml index ceac8a49853369a6a882e32f13cc4728aa3c29ad..ab6a48b73f3cc9a2d4564f8e680072db097e24d4 100644 --- a/templates/minimal/runtime/Cargo.toml +++ b/templates/minimal/runtime/Cargo.toml @@ -2,18 +2,15 @@ name = "minimal-template-runtime" description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false } +parity-scale-codec = { version = "3.6.12", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. diff --git a/templates/minimal/runtime/README.md b/templates/minimal/runtime/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2fdfef8bc35b10961da8e14748bb751cf7801904 --- /dev/null +++ b/templates/minimal/runtime/README.md @@ -0,0 +1,8 @@ +# Runtime + +ℹ️ The runtime (in other words, a state transition function), refers to the core logic of the blockchain that is responsible for +validating blocks and executing the state changes they define. + +💁 The runtime in this template is constructed using ready-made FRAME pallets that ship with [Polkadot SDK](https://github.com/paritytech/polkadot-sdk), and a [template for a custom pallet](../pallets/README.md). + +👉 Learn more about FRAME [here](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). diff --git a/templates/parachain/.dockerignore b/templates/parachain/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..da6a8f2620d64f3761669047dfbda397b685493c --- /dev/null +++ b/templates/parachain/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/parachain/Dockerfile b/templates/parachain/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..72a8f19fe79ae0ff870f19e90206c9aa7a4309fb --- /dev/null +++ b/templates/parachain/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/parachain-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/parachain-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/parachain-template-node"] diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index ed857b4e4b9ff4fa05a7f76f5e831be103bf7935..94873cf1faea6056fea8d333fdf893283eaf3379 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -2,7 +2,7 @@ name = "parachain-template-node" description = "A parachain node template built with Substrate and Cumulus, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true @@ -10,16 +10,13 @@ edition.workspace = true publish = false build = "build.rs" -[lints] -workspace = true - # [[bin]] # name = "parachain-template-node" [dependencies] clap = { version = "4.5.3", features = ["derive"] } log = { workspace = true, default-features = true } -codec = { package = "parity-scale-codec", version = "3.0.0" } +codec = { package = "parity-scale-codec", version = "3.6.12" } serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.28" diff --git a/templates/parachain/node/src/chain_spec.rs b/templates/parachain/node/src/chain_spec.rs index 16c91865cdb4aa9ae3c5882652100f5ef56a988c..3fa91c0261622cd71dbf2b473eb70748f3a29b96 100644 --- a/templates/parachain/node/src/chain_spec.rs +++ b/templates/parachain/node/src/chain_spec.rs @@ -8,7 +8,7 @@ use sp_core::{sr25519, Pair, Public}; use sp_runtime::traits::{IdentifyAccount, Verify}; /// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type ChainSpec = sc_service::GenericChainSpec; /// The default XCM version to set in genesis config. const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; @@ -22,11 +22,12 @@ pub fn get_from_seed(seed: &str) -> ::Pu /// The extensions for the [`ChainSpec`]. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] -#[serde(deny_unknown_fields)] pub struct Extensions { /// The relay chain of the Parachain. + #[serde(alias = "relayChain", alias = "RelayChain")] pub relay_chain: String, /// The id of the Parachain. + #[serde(alias = "paraId", alias = "ParaId")] pub para_id: u32, } diff --git a/templates/parachain/node/src/cli.rs b/templates/parachain/node/src/cli.rs index cffbfbc1db23f8b28d234e78f781977d7face5ab..f008e856d99b8ba4dceac5835681920713d2d3f9 100644 --- a/templates/parachain/node/src/cli.rs +++ b/templates/parachain/node/src/cli.rs @@ -1,6 +1,7 @@ use std::path::PathBuf; /// Sub-commands supported by the collator. +#[allow(clippy::large_enum_variant)] #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// Build a chain specification. diff --git a/templates/parachain/node/src/command.rs b/templates/parachain/node/src/command.rs index 56ae022cad2b22757af87cef08d179433e9db509..eba7fdcdae7185186f2ec12f27efec5b1fca720b 100644 --- a/templates/parachain/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -194,13 +194,11 @@ pub fn run() -> Result<()> { cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] - BenchmarkCmd::Storage(_) => - return Err(sc_cli::Error::Input( - "Compile with --features=runtime-benchmarks \ + BenchmarkCmd::Storage(_) => Err(sc_cli::Error::Input( + "Compile with --features=runtime-benchmarks \ to enable storage benchmarks." - .into(), - ) - .into()), + .into(), + )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { let partials = new_partial(&config)?; diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index ad4689c6e55dc7d9bb96e0390182d7e7c8bf1591..ce630891587107445c57112f26315f2b8139fb34 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -160,6 +160,7 @@ fn build_import_queue( ) } +#[allow(clippy::too_many_arguments)] fn start_consensus( client: Arc, backend: Arc, diff --git a/templates/parachain/pallets/template/Cargo.toml b/templates/parachain/pallets/template/Cargo.toml index 199da2f12d2c691ba6d51d009c6b20734b1c4c46..6c549c2c4a9b64f7bffd07f27bde04bcdb32442d 100644 --- a/templates/parachain/pallets/template/Cargo.toml +++ b/templates/parachain/pallets/template/Cargo.toml @@ -2,21 +2,18 @@ name = "pallet-parachain-template" description = "FRAME pallet template for defining custom runtime logic." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/parachain/pallets/template/src/benchmarking.rs b/templates/parachain/pallets/template/src/benchmarking.rs index 5a262417629c579c6ecf5ada30ae803217623766..d1a9554aed6dc0533f914e18661942ed56bcd514 100644 --- a/templates/parachain/pallets/template/src/benchmarking.rs +++ b/templates/parachain/pallets/template/src/benchmarking.rs @@ -13,7 +13,7 @@ mod benchmarks { #[benchmark] fn do_something() { - let value = 100u32.into(); + let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] do_something(RawOrigin::Signed(caller), value); diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index 8a88be3e3e9f6d08137ae1bb16767145e2d84aa9..9a907f61660530c1788b555ac1d1475bbc08827a 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -18,7 +18,6 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const BlockHashCount: u64 = 250; pub const SS58Prefix: u8 = 42; } @@ -37,7 +36,6 @@ impl system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml index d15ff2807a66569a447d774ea8b38e7c6ee3c0f8..059c793679694bd9c11b10384383f6e20301e5c5 100644 --- a/templates/parachain/runtime/Cargo.toml +++ b/templates/parachain/runtime/Cargo.toml @@ -2,24 +2,22 @@ name = "parachain-template-runtime" description = "A parachain runtime template built with Substrate and Cumulus, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } +docify = "0.2.8" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } hex-literal = { version = "0.4.1", optional = true } @@ -36,6 +34,7 @@ pallet-parachain-template = { path = "../pallets/template", default-features = f # Substrate / FRAME frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-metadata-hash-extension = { path = "../../../substrate/frame/metadata-hash-extension", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } @@ -77,9 +76,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm # Cumulus cumulus-pallet-aura-ext = { path = "../../../cumulus/pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-session-benchmarking = { path = "../../../cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../cumulus/pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../cumulus/pallets/xcmp-queue", default-features = false } @@ -106,6 +103,7 @@ std = [ "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", + "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", @@ -197,3 +195,16 @@ try-runtime = [ "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", ] + +# Enable the metadata hash generation. +# +# This is hidden behind a feature because it increases the compile time. +# The wasm binary needs to be compiled twice, once to fetch the metadata, +# generate the metadata hash and then a second time with the +# `RUNTIME_METADATA_HASH` environment variable set for the `CheckMetadataHash` +# extension. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A convenience feature for enabling things when doing a build +# for an on-chain release. +on-chain-release-build = ["metadata-hash"] diff --git a/templates/parachain/runtime/build.rs b/templates/parachain/runtime/build.rs index bb05afe02b1fc526d1c7a2c64514e7f25f33c7be..4f33752ca6b2d47b31270ca885a1c42f10ed8410 100644 --- a/templates/parachain/runtime/build.rs +++ b/templates/parachain/runtime/build.rs @@ -1,4 +1,12 @@ -#[cfg(feature = "std")] +#[cfg(all(feature = "std", feature = "metadata-hash"))] +#[docify::export(template_enable_metadata_hash)] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("UNIT", 12) + .build(); +} + +#[cfg(all(feature = "std", not(feature = "metadata-hash")))] fn main() { substrate_wasm_builder::WasmBuilder::build_using_defaults(); } diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 0aec332feaf65ff7ee75f572513960519af29bc4..63e6a67a90638266820bce44e8aee75a544681bc 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -221,7 +221,7 @@ impl pallet_message_queue::Config for Runtime { // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: type QueueChangeHandler = NarrowOriginToSibling; type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; type MaxStale = sp_core::ConstU32<8>; type ServiceWeight = MessageQueueServiceWeight; type IdleMaxServiceWeight = (); @@ -236,6 +236,8 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { // Enqueue XCMP messages from siblings for later processing. type XcmpQueue = TransformOrigin; type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + type MaxPageSize = ConstU32<{ 1 << 16 }>; type ControllerOrigin = EnsureRoot; type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; type WeightInfo = (); diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 179a425ca04165a53ab3c14caaeeb638adb358b0..987b88af8444dac73fa1b8972e78973f35ea869d 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -86,6 +86,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, + frame_metadata_hash_extension::CheckMetadataHash, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/templates/solochain/.dockerignore b/templates/solochain/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..da6a8f2620d64f3761669047dfbda397b685493c --- /dev/null +++ b/templates/solochain/.dockerignore @@ -0,0 +1,3 @@ +target/ +Dockerfile +.dockerignore diff --git a/templates/solochain/Dockerfile b/templates/solochain/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..97e6dd29107ace0fdcc2f83bb8c4b0dbc124523c --- /dev/null +++ b/templates/solochain/Dockerfile @@ -0,0 +1,28 @@ +FROM docker.io/paritytech/ci-unified:latest as builder + +WORKDIR /polkadot +COPY . /polkadot + +RUN cargo fetch +RUN cargo build --locked --release + +FROM docker.io/parity/base-bin:latest + +COPY --from=builder /polkadot/target/release/solochain-template-node /usr/local/bin + +USER root +RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \ + mkdir -p /data /polkadot/.local/share && \ + chown -R polkadot:polkadot /data && \ + ln -s /data /polkadot/.local/share/polkadot && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# check if executable works in this container + /usr/local/bin/solochain-template-node --version + +USER polkadot + +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] + +ENTRYPOINT ["/usr/local/bin/solochain-template-node"] diff --git a/templates/solochain/LICENSE b/templates/solochain/LICENSE index ffa0b3f2df035abdd789f1f205357f7318bc5498..cf1ab25da0349f84a3fdd40032f0ce99db813b8b 100644 --- a/templates/solochain/LICENSE +++ b/templates/solochain/LICENSE @@ -1,16 +1,24 @@ -MIT No Attribution +This is free and unencumbered software released into the public domain. -Copyright Parity Technologies (UK) Ltd. +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so. +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 37c65797dcb00a8aed0a4f4566eaaacab37c8359..2e3b1146a8fdebbdb4488af29831662a951733c9 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -103,9 +103,8 @@ After you start the node template locally, you can interact with it using the hosted version of the [Polkadot/Substrate Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) front-end by connecting to the local node endpoint. A hosted version is also -available on [IPFS (redirect) here](https://dotapps.io/) or [IPNS (direct) -here](ipns://dotapps.io/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/explorer). You can -also find the source code and instructions for hosting your own instance on the +available on [IPFS](https://dotapps.io/). You can +also find the source code and instructions for hosting your own instance in the [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository. ### Multi-Node Local Testnet @@ -131,7 +130,7 @@ capabilities: the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation - research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). + research](https://research.web3.foundation/Polkadot/protocols/NPoS). - RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 9332da3a6549c90fe3eebfcda3fbb51dc49c57d6..4e8b81840900d3365f569ce96ecb642eb560d6d9 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -2,7 +2,7 @@ name = "solochain-template-node" description = "A solochain node template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true @@ -11,9 +11,6 @@ publish = false build = "build.rs" -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/templates/solochain/node/src/chain_spec.rs b/templates/solochain/node/src/chain_spec.rs index be49f2c1fc731ee4e5ece7841151068abf0f0790..651025e68ded916485f03b377bb44482d89ac5ba 100644 --- a/templates/solochain/node/src/chain_spec.rs +++ b/templates/solochain/node/src/chain_spec.rs @@ -1,5 +1,5 @@ use sc_service::ChainType; -use solochain_template_runtime::{AccountId, RuntimeGenesisConfig, Signature, WASM_BINARY}; +use solochain_template_runtime::{AccountId, Signature, WASM_BINARY}; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; @@ -9,7 +9,7 @@ use sp_runtime::traits::{IdentifyAccount, Verify}; // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; +pub type ChainSpec = sc_service::GenericChainSpec; /// Generate a crypto pair from seed. pub fn get_from_seed(seed: &str) -> ::Public { diff --git a/templates/solochain/node/src/command.rs b/templates/solochain/node/src/command.rs index e46fedc91f0e26fb9e4e36dbbc45a3cc0c0c7310..624ace1bf350a1d5b36d0f409d0ca3868fd127fc 100644 --- a/templates/solochain/node/src/command.rs +++ b/templates/solochain/node/src/command.rs @@ -114,7 +114,7 @@ pub fn run() -> sc_cli::Result<()> { "Runtime benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." .into(), - ) + ); } cmd.run_with_spec::, ()>(Some( diff --git a/templates/solochain/pallets/template/Cargo.toml b/templates/solochain/pallets/template/Cargo.toml index 24519f1d22e0d214e418974de65c7f77588498c4..5b8349b5d678cdcf72befebf77b8d4598d61b261 100644 --- a/templates/solochain/pallets/template/Cargo.toml +++ b/templates/solochain/pallets/template/Cargo.toml @@ -2,21 +2,18 @@ name = "pallet-template" description = "FRAME pallet template for defining custom runtime logic." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/templates/solochain/pallets/template/src/benchmarking.rs b/templates/solochain/pallets/template/src/benchmarking.rs index 5a262417629c579c6ecf5ada30ae803217623766..d1a9554aed6dc0533f914e18661942ed56bcd514 100644 --- a/templates/solochain/pallets/template/src/benchmarking.rs +++ b/templates/solochain/pallets/template/src/benchmarking.rs @@ -13,7 +13,7 @@ mod benchmarks { #[benchmark] fn do_something() { - let value = 100u32.into(); + let value = 100u32; let caller: T::AccountId = whitelisted_caller(); #[extrinsic_call] do_something(RawOrigin::Signed(caller), value); diff --git a/templates/solochain/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs index 3f1fd2dd6d4441c2177d3f44a0f377b3ab2edc72..09081dae0625c595b31967c2b36b244715fa3b0b 100644 --- a/templates/solochain/pallets/template/src/mock.rs +++ b/templates/solochain/pallets/template/src/mock.rs @@ -1,8 +1,5 @@ use crate as pallet_template; -use frame_support::{ - derive_impl, - traits::{ConstU16, ConstU64}, -}; +use frame_support::{derive_impl, traits::ConstU16}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -35,7 +32,6 @@ impl frame_system::Config for Test { type Lookup = IdentityLookup; type Block = Block; type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; type AccountData = (); diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 7a81f192043f5bc516ca774e1cebf8c039f06ceb..0af3899a666995120738459be823a70debd23168 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -2,21 +2,18 @@ name = "solochain-template-runtime" description = "A solochain runtime template built with Substrate, part of Polkadot Sdk." version = "0.0.0" -license = "MIT-0" +license = "Unlicense" authors.workspace = true homepage.workspace = true repository.workspace = true edition.workspace = true publish = false -[lints] -workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ +codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = [ "derive", ] } scale-info = { version = "2.11.1", default-features = false, features = [ diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..d790b4f5949ca5259bc4749636b90f5f33f1412c --- /dev/null +++ b/umbrella/Cargo.toml @@ -0,0 +1,2454 @@ +[package] +name = "polkadot-sdk" +version = "0.1.0" +description = "Polkadot SDK umbrella crate." +license = "Apache-2.0" + +[features] +default = ["std"] +std = [ + "asset-test-utils?/std", + "assets-common?/std", + "binary-merkle-tree?/std", + "bp-asset-hub-rococo?/std", + "bp-asset-hub-westend?/std", + "bp-bridge-hub-cumulus?/std", + "bp-bridge-hub-kusama?/std", + "bp-bridge-hub-polkadot?/std", + "bp-bridge-hub-rococo?/std", + "bp-bridge-hub-westend?/std", + "bp-header-chain?/std", + "bp-kusama?/std", + "bp-messages?/std", + "bp-parachains?/std", + "bp-polkadot-bulletin?/std", + "bp-polkadot-core?/std", + "bp-polkadot?/std", + "bp-relayers?/std", + "bp-rococo?/std", + "bp-runtime?/std", + "bp-test-utils?/std", + "bp-westend?/std", + "bp-xcm-bridge-hub-router?/std", + "bp-xcm-bridge-hub?/std", + "bridge-hub-common?/std", + "bridge-hub-test-utils?/std", + "bridge-runtime-common?/std", + "cumulus-pallet-aura-ext?/std", + "cumulus-pallet-dmp-queue?/std", + "cumulus-pallet-parachain-system-proc-macro?/std", + "cumulus-pallet-parachain-system?/std", + "cumulus-pallet-session-benchmarking?/std", + "cumulus-pallet-solo-to-para?/std", + "cumulus-pallet-xcm?/std", + "cumulus-pallet-xcmp-queue?/std", + "cumulus-ping?/std", + "cumulus-primitives-aura?/std", + "cumulus-primitives-core?/std", + "cumulus-primitives-parachain-inherent?/std", + "cumulus-primitives-proof-size-hostfunction?/std", + "cumulus-primitives-storage-weight-reclaim?/std", + "cumulus-primitives-timestamp?/std", + "cumulus-primitives-utility?/std", + "cumulus-test-relay-sproof-builder?/std", + "frame-benchmarking-pallet-pov?/std", + "frame-benchmarking?/std", + "frame-election-provider-support?/std", + "frame-executive?/std", + "frame-metadata-hash-extension?/std", + "frame-support-procedural?/std", + "frame-support?/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api?/std", + "frame-system?/std", + "frame-try-runtime?/std", + "pallet-alliance?/std", + "pallet-asset-conversion-ops?/std", + "pallet-asset-conversion-tx-payment?/std", + "pallet-asset-conversion?/std", + "pallet-asset-rate?/std", + "pallet-asset-tx-payment?/std", + "pallet-assets?/std", + "pallet-atomic-swap?/std", + "pallet-aura?/std", + "pallet-authority-discovery?/std", + "pallet-authorship?/std", + "pallet-babe?/std", + "pallet-bags-list?/std", + "pallet-balances?/std", + "pallet-beefy-mmr?/std", + "pallet-beefy?/std", + "pallet-bounties?/std", + "pallet-bridge-grandpa?/std", + "pallet-bridge-messages?/std", + "pallet-bridge-parachains?/std", + "pallet-bridge-relayers?/std", + "pallet-broker?/std", + "pallet-child-bounties?/std", + "pallet-collator-selection?/std", + "pallet-collective-content?/std", + "pallet-collective?/std", + "pallet-contracts-mock-network?/std", + "pallet-contracts?/std", + "pallet-conviction-voting?/std", + "pallet-core-fellowship?/std", + "pallet-delegated-staking?/std", + "pallet-democracy?/std", + "pallet-dev-mode?/std", + "pallet-election-provider-multi-phase?/std", + "pallet-election-provider-support-benchmarking?/std", + "pallet-elections-phragmen?/std", + "pallet-fast-unstake?/std", + "pallet-glutton?/std", + "pallet-grandpa?/std", + "pallet-identity?/std", + "pallet-im-online?/std", + "pallet-indices?/std", + "pallet-insecure-randomness-collective-flip?/std", + "pallet-lottery?/std", + "pallet-membership?/std", + "pallet-message-queue?/std", + "pallet-migrations?/std", + "pallet-mixnet?/std", + "pallet-mmr?/std", + "pallet-multisig?/std", + "pallet-nft-fractionalization?/std", + "pallet-nfts-runtime-api?/std", + "pallet-nfts?/std", + "pallet-nis?/std", + "pallet-node-authorization?/std", + "pallet-nomination-pools-benchmarking?/std", + "pallet-nomination-pools-runtime-api?/std", + "pallet-nomination-pools?/std", + "pallet-offences-benchmarking?/std", + "pallet-offences?/std", + "pallet-paged-list?/std", + "pallet-parameters?/std", + "pallet-preimage?/std", + "pallet-proxy?/std", + "pallet-ranked-collective?/std", + "pallet-recovery?/std", + "pallet-referenda?/std", + "pallet-remark?/std", + "pallet-root-offences?/std", + "pallet-root-testing?/std", + "pallet-safe-mode?/std", + "pallet-salary?/std", + "pallet-scheduler?/std", + "pallet-scored-pool?/std", + "pallet-session-benchmarking?/std", + "pallet-session?/std", + "pallet-skip-feeless-payment?/std", + "pallet-society?/std", + "pallet-staking-reward-fn?/std", + "pallet-staking-runtime-api?/std", + "pallet-staking?/std", + "pallet-state-trie-migration?/std", + "pallet-statement?/std", + "pallet-sudo?/std", + "pallet-timestamp?/std", + "pallet-tips?/std", + "pallet-transaction-payment-rpc-runtime-api?/std", + "pallet-transaction-payment?/std", + "pallet-transaction-storage?/std", + "pallet-treasury?/std", + "pallet-tx-pause?/std", + "pallet-uniques?/std", + "pallet-utility?/std", + "pallet-vesting?/std", + "pallet-whitelist?/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub-router?/std", + "pallet-xcm-bridge-hub?/std", + "pallet-xcm?/std", + "parachains-common?/std", + "parachains-runtimes-test-utils?/std", + "polkadot-core-primitives?/std", + "polkadot-parachain-primitives?/std", + "polkadot-primitives?/std", + "polkadot-runtime-common?/std", + "polkadot-runtime-metrics?/std", + "polkadot-runtime-parachains?/std", + "polkadot-sdk-frame?/std", + "rococo-runtime-constants?/std", + "sc-executor?/std", + "slot-range-helper?/std", + "snowbridge-beacon-primitives?/std", + "snowbridge-core?/std", + "snowbridge-ethereum?/std", + "snowbridge-outbound-queue-merkle-tree?/std", + "snowbridge-outbound-queue-runtime-api?/std", + "snowbridge-pallet-ethereum-client-fixtures?/std", + "snowbridge-pallet-ethereum-client?/std", + "snowbridge-pallet-inbound-queue-fixtures?/std", + "snowbridge-pallet-inbound-queue?/std", + "snowbridge-pallet-outbound-queue?/std", + "snowbridge-pallet-system?/std", + "snowbridge-router-primitives?/std", + "snowbridge-runtime-common?/std", + "snowbridge-runtime-test-common?/std", + "snowbridge-system-runtime-api?/std", + "sp-api-proc-macro?/std", + "sp-api?/std", + "sp-application-crypto?/std", + "sp-arithmetic?/std", + "sp-authority-discovery?/std", + "sp-block-builder?/std", + "sp-consensus-aura?/std", + "sp-consensus-babe?/std", + "sp-consensus-beefy?/std", + "sp-consensus-grandpa?/std", + "sp-consensus-pow?/std", + "sp-consensus-slots?/std", + "sp-core-hashing?/std", + "sp-core?/std", + "sp-crypto-ec-utils?/std", + "sp-crypto-hashing?/std", + "sp-debug-derive?/std", + "sp-externalities?/std", + "sp-genesis-builder?/std", + "sp-inherents?/std", + "sp-io?/std", + "sp-keyring?/std", + "sp-keystore?/std", + "sp-metadata-ir?/std", + "sp-mixnet?/std", + "sp-mmr-primitives?/std", + "sp-npos-elections?/std", + "sp-offchain?/std", + "sp-runtime-interface?/std", + "sp-runtime?/std", + "sp-session?/std", + "sp-staking?/std", + "sp-state-machine?/std", + "sp-statement-store?/std", + "sp-std?/std", + "sp-storage?/std", + "sp-timestamp?/std", + "sp-tracing?/std", + "sp-transaction-pool?/std", + "sp-transaction-storage-proof?/std", + "sp-trie?/std", + "sp-version?/std", + "sp-wasm-interface?/std", + "sp-weights?/std", + "staging-parachain-info?/std", + "staging-xcm-builder?/std", + "staging-xcm-executor?/std", + "staging-xcm?/std", + "substrate-bip39?/std", + "testnet-parachains-constants?/std", + "westend-runtime-constants?/std", + "xcm-fee-payment-runtime-api?/std", +] +runtime-benchmarks = [ + "assets-common?/runtime-benchmarks", + "bridge-hub-common?/runtime-benchmarks", + "bridge-runtime-common?/runtime-benchmarks", + "cumulus-pallet-dmp-queue?/runtime-benchmarks", + "cumulus-pallet-parachain-system?/runtime-benchmarks", + "cumulus-pallet-session-benchmarking?/runtime-benchmarks", + "cumulus-pallet-xcmp-queue?/runtime-benchmarks", + "cumulus-primitives-core?/runtime-benchmarks", + "cumulus-primitives-utility?/runtime-benchmarks", + "frame-benchmarking-cli?/runtime-benchmarks", + "frame-benchmarking-pallet-pov?/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support?/runtime-benchmarks", + "frame-support?/runtime-benchmarks", + "frame-system-benchmarking?/runtime-benchmarks", + "frame-system?/runtime-benchmarks", + "pallet-alliance?/runtime-benchmarks", + "pallet-asset-conversion-ops?/runtime-benchmarks", + "pallet-asset-conversion?/runtime-benchmarks", + "pallet-asset-rate?/runtime-benchmarks", + "pallet-asset-tx-payment?/runtime-benchmarks", + "pallet-assets?/runtime-benchmarks", + "pallet-babe?/runtime-benchmarks", + "pallet-bags-list?/runtime-benchmarks", + "pallet-balances?/runtime-benchmarks", + "pallet-bounties?/runtime-benchmarks", + "pallet-bridge-grandpa?/runtime-benchmarks", + "pallet-bridge-messages?/runtime-benchmarks", + "pallet-bridge-parachains?/runtime-benchmarks", + "pallet-bridge-relayers?/runtime-benchmarks", + "pallet-broker?/runtime-benchmarks", + "pallet-child-bounties?/runtime-benchmarks", + "pallet-collator-selection?/runtime-benchmarks", + "pallet-collective-content?/runtime-benchmarks", + "pallet-collective?/runtime-benchmarks", + "pallet-contracts-mock-network?/runtime-benchmarks", + "pallet-contracts?/runtime-benchmarks", + "pallet-conviction-voting?/runtime-benchmarks", + "pallet-core-fellowship?/runtime-benchmarks", + "pallet-delegated-staking?/runtime-benchmarks", + "pallet-democracy?/runtime-benchmarks", + "pallet-election-provider-multi-phase?/runtime-benchmarks", + "pallet-election-provider-support-benchmarking?/runtime-benchmarks", + "pallet-elections-phragmen?/runtime-benchmarks", + "pallet-fast-unstake?/runtime-benchmarks", + "pallet-glutton?/runtime-benchmarks", + "pallet-grandpa?/runtime-benchmarks", + "pallet-identity?/runtime-benchmarks", + "pallet-im-online?/runtime-benchmarks", + "pallet-indices?/runtime-benchmarks", + "pallet-lottery?/runtime-benchmarks", + "pallet-membership?/runtime-benchmarks", + "pallet-message-queue?/runtime-benchmarks", + "pallet-migrations?/runtime-benchmarks", + "pallet-mixnet?/runtime-benchmarks", + "pallet-mmr?/runtime-benchmarks", + "pallet-multisig?/runtime-benchmarks", + "pallet-nft-fractionalization?/runtime-benchmarks", + "pallet-nfts?/runtime-benchmarks", + "pallet-nis?/runtime-benchmarks", + "pallet-nomination-pools-benchmarking?/runtime-benchmarks", + "pallet-nomination-pools?/runtime-benchmarks", + "pallet-offences-benchmarking?/runtime-benchmarks", + "pallet-offences?/runtime-benchmarks", + "pallet-paged-list?/runtime-benchmarks", + "pallet-parameters?/runtime-benchmarks", + "pallet-preimage?/runtime-benchmarks", + "pallet-proxy?/runtime-benchmarks", + "pallet-ranked-collective?/runtime-benchmarks", + "pallet-recovery?/runtime-benchmarks", + "pallet-referenda?/runtime-benchmarks", + "pallet-remark?/runtime-benchmarks", + "pallet-root-offences?/runtime-benchmarks", + "pallet-safe-mode?/runtime-benchmarks", + "pallet-salary?/runtime-benchmarks", + "pallet-scheduler?/runtime-benchmarks", + "pallet-session-benchmarking?/runtime-benchmarks", + "pallet-skip-feeless-payment?/runtime-benchmarks", + "pallet-society?/runtime-benchmarks", + "pallet-staking?/runtime-benchmarks", + "pallet-state-trie-migration?/runtime-benchmarks", + "pallet-sudo?/runtime-benchmarks", + "pallet-timestamp?/runtime-benchmarks", + "pallet-tips?/runtime-benchmarks", + "pallet-transaction-storage?/runtime-benchmarks", + "pallet-treasury?/runtime-benchmarks", + "pallet-tx-pause?/runtime-benchmarks", + "pallet-uniques?/runtime-benchmarks", + "pallet-utility?/runtime-benchmarks", + "pallet-vesting?/runtime-benchmarks", + "pallet-whitelist?/runtime-benchmarks", + "pallet-xcm-benchmarks?/runtime-benchmarks", + "pallet-xcm-bridge-hub-router?/runtime-benchmarks", + "pallet-xcm-bridge-hub?/runtime-benchmarks", + "pallet-xcm?/runtime-benchmarks", + "parachains-common?/runtime-benchmarks", + "polkadot-cli?/runtime-benchmarks", + "polkadot-node-metrics?/runtime-benchmarks", + "polkadot-parachain-primitives?/runtime-benchmarks", + "polkadot-primitives?/runtime-benchmarks", + "polkadot-runtime-common?/runtime-benchmarks", + "polkadot-runtime-parachains?/runtime-benchmarks", + "polkadot-sdk-frame?/runtime-benchmarks", + "polkadot-service?/runtime-benchmarks", + "sc-client-db?/runtime-benchmarks", + "sc-service?/runtime-benchmarks", + "snowbridge-core?/runtime-benchmarks", + "snowbridge-pallet-ethereum-client-fixtures?/runtime-benchmarks", + "snowbridge-pallet-ethereum-client?/runtime-benchmarks", + "snowbridge-pallet-inbound-queue-fixtures?/runtime-benchmarks", + "snowbridge-pallet-inbound-queue?/runtime-benchmarks", + "snowbridge-pallet-outbound-queue?/runtime-benchmarks", + "snowbridge-pallet-system?/runtime-benchmarks", + "snowbridge-router-primitives?/runtime-benchmarks", + "snowbridge-runtime-common?/runtime-benchmarks", + "snowbridge-runtime-test-common?/runtime-benchmarks", + "sp-runtime?/runtime-benchmarks", + "sp-staking?/runtime-benchmarks", + "staging-node-inspect?/runtime-benchmarks", + "staging-xcm-builder?/runtime-benchmarks", + "staging-xcm-executor?/runtime-benchmarks", + "xcm-fee-payment-runtime-api?/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext?/try-runtime", + "cumulus-pallet-dmp-queue?/try-runtime", + "cumulus-pallet-parachain-system?/try-runtime", + "cumulus-pallet-solo-to-para?/try-runtime", + "cumulus-pallet-xcm?/try-runtime", + "cumulus-pallet-xcmp-queue?/try-runtime", + "cumulus-ping?/try-runtime", + "frame-benchmarking-pallet-pov?/try-runtime", + "frame-election-provider-support?/try-runtime", + "frame-executive?/try-runtime", + "frame-support?/try-runtime", + "frame-system?/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-alliance?/try-runtime", + "pallet-asset-conversion-ops?/try-runtime", + "pallet-asset-conversion-tx-payment?/try-runtime", + "pallet-asset-conversion?/try-runtime", + "pallet-asset-rate?/try-runtime", + "pallet-asset-tx-payment?/try-runtime", + "pallet-assets?/try-runtime", + "pallet-atomic-swap?/try-runtime", + "pallet-aura?/try-runtime", + "pallet-authority-discovery?/try-runtime", + "pallet-authorship?/try-runtime", + "pallet-babe?/try-runtime", + "pallet-bags-list?/try-runtime", + "pallet-balances?/try-runtime", + "pallet-beefy-mmr?/try-runtime", + "pallet-beefy?/try-runtime", + "pallet-bounties?/try-runtime", + "pallet-bridge-grandpa?/try-runtime", + "pallet-bridge-messages?/try-runtime", + "pallet-bridge-parachains?/try-runtime", + "pallet-bridge-relayers?/try-runtime", + "pallet-broker?/try-runtime", + "pallet-child-bounties?/try-runtime", + "pallet-collator-selection?/try-runtime", + "pallet-collective-content?/try-runtime", + "pallet-collective?/try-runtime", + "pallet-contracts?/try-runtime", + "pallet-conviction-voting?/try-runtime", + "pallet-core-fellowship?/try-runtime", + "pallet-delegated-staking?/try-runtime", + "pallet-democracy?/try-runtime", + "pallet-dev-mode?/try-runtime", + "pallet-election-provider-multi-phase?/try-runtime", + "pallet-elections-phragmen?/try-runtime", + "pallet-fast-unstake?/try-runtime", + "pallet-glutton?/try-runtime", + "pallet-grandpa?/try-runtime", + "pallet-identity?/try-runtime", + "pallet-im-online?/try-runtime", + "pallet-indices?/try-runtime", + "pallet-insecure-randomness-collective-flip?/try-runtime", + "pallet-lottery?/try-runtime", + "pallet-membership?/try-runtime", + "pallet-message-queue?/try-runtime", + "pallet-migrations?/try-runtime", + "pallet-mixnet?/try-runtime", + "pallet-mmr?/try-runtime", + "pallet-multisig?/try-runtime", + "pallet-nft-fractionalization?/try-runtime", + "pallet-nfts?/try-runtime", + "pallet-nis?/try-runtime", + "pallet-node-authorization?/try-runtime", + "pallet-nomination-pools?/try-runtime", + "pallet-offences?/try-runtime", + "pallet-paged-list?/try-runtime", + "pallet-parameters?/try-runtime", + "pallet-preimage?/try-runtime", + "pallet-proxy?/try-runtime", + "pallet-ranked-collective?/try-runtime", + "pallet-recovery?/try-runtime", + "pallet-referenda?/try-runtime", + "pallet-remark?/try-runtime", + "pallet-root-offences?/try-runtime", + "pallet-root-testing?/try-runtime", + "pallet-safe-mode?/try-runtime", + "pallet-salary?/try-runtime", + "pallet-scheduler?/try-runtime", + "pallet-scored-pool?/try-runtime", + "pallet-session?/try-runtime", + "pallet-skip-feeless-payment?/try-runtime", + "pallet-society?/try-runtime", + "pallet-staking?/try-runtime", + "pallet-state-trie-migration?/try-runtime", + "pallet-statement?/try-runtime", + "pallet-sudo?/try-runtime", + "pallet-timestamp?/try-runtime", + "pallet-tips?/try-runtime", + "pallet-transaction-payment?/try-runtime", + "pallet-transaction-storage?/try-runtime", + "pallet-treasury?/try-runtime", + "pallet-tx-pause?/try-runtime", + "pallet-uniques?/try-runtime", + "pallet-utility?/try-runtime", + "pallet-vesting?/try-runtime", + "pallet-whitelist?/try-runtime", + "pallet-xcm-bridge-hub-router?/try-runtime", + "pallet-xcm-bridge-hub?/try-runtime", + "pallet-xcm?/try-runtime", + "polkadot-cli?/try-runtime", + "polkadot-runtime-common?/try-runtime", + "polkadot-runtime-parachains?/try-runtime", + "polkadot-sdk-frame?/try-runtime", + "polkadot-service?/try-runtime", + "snowbridge-pallet-ethereum-client?/try-runtime", + "snowbridge-pallet-inbound-queue?/try-runtime", + "snowbridge-pallet-outbound-queue?/try-runtime", + "snowbridge-pallet-system?/try-runtime", + "sp-runtime?/try-runtime", + "staging-parachain-info?/try-runtime", +] +serde = [ + "bp-polkadot-core?/serde", + "frame-benchmarking?/serde", + "pallet-asset-tx-payment?/serde", + "pallet-beefy-mmr?/serde", + "pallet-beefy?/serde", + "pallet-contracts?/serde", + "pallet-conviction-voting?/serde", + "pallet-democracy?/serde", + "pallet-message-queue?/serde", + "pallet-offences?/serde", + "pallet-parameters?/serde", + "pallet-referenda?/serde", + "pallet-remark?/serde", + "pallet-state-trie-migration?/serde", + "pallet-tips?/serde", + "pallet-transaction-payment?/serde", + "pallet-transaction-storage?/serde", + "pallet-treasury?/serde", + "pallet-xcm?/serde", + "snowbridge-beacon-primitives?/serde", + "snowbridge-core?/serde", + "snowbridge-ethereum?/serde", + "snowbridge-pallet-ethereum-client?/serde", + "snowbridge-pallet-inbound-queue?/serde", + "sp-application-crypto?/serde", + "sp-arithmetic?/serde", + "sp-authority-discovery?/serde", + "sp-consensus-aura?/serde", + "sp-consensus-babe?/serde", + "sp-consensus-beefy?/serde", + "sp-consensus-grandpa?/serde", + "sp-consensus-slots?/serde", + "sp-core?/serde", + "sp-mmr-primitives?/serde", + "sp-npos-elections?/serde", + "sp-runtime?/serde", + "sp-staking?/serde", + "sp-statement-store?/serde", + "sp-storage?/serde", + "sp-version?/serde", + "sp-weights?/serde", +] +experimental = [ + "frame-support-procedural?/experimental", + "frame-support?/experimental", + "frame-system?/experimental", + "polkadot-sdk-frame?/experimental", +] +with-tracing = [ + "frame-executive?/with-tracing", + "frame-executive?/with-tracing", + "sp-io?/with-tracing", + "sp-io?/with-tracing", + "sp-tracing?/with-tracing", + "sp-tracing?/with-tracing", +] +runtime = ["assets-common", "binary-merkle-tree", "bp-asset-hub-rococo", "bp-asset-hub-westend", "bp-bridge-hub-cumulus", "bp-bridge-hub-kusama", "bp-bridge-hub-polkadot", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "bp-header-chain", "bp-kusama", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-bulletin", "bp-polkadot-core", "bp-relayers", "bp-rococo", "bp-runtime", "bp-test-utils", "bp-westend", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "rococo-runtime-constants", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "westend-runtime-constants", "xcm-fee-payment-runtime-api", "xcm-procedural"] +node = ["asset-test-utils", "bridge-hub-test-utils", "cumulus-client-cli", "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", "cumulus-relay-chain-rpc-interface", "cumulus-test-relay-sproof-builder", "emulated-integration-tests-common", "fork-tree", "frame-benchmarking-cli", "frame-remote-externalities", "frame-support-procedural-tools", "generate-bags", "mmr-gadget", "mmr-rpc", "pallet-contracts-mock-network", "pallet-transaction-payment-rpc", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-availability-recovery", "polkadot-cli", "polkadot-collator-protocol", "polkadot-dispute-distribution", "polkadot-erasure-coding", "polkadot-gossip-support", "polkadot-network-bridge", "polkadot-node-collation-generation", "polkadot-node-core-approval-voting", "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-validation", "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", "polkadot-node-core-pvf-common", "polkadot-node-core-pvf-execute-worker", "polkadot-node-core-pvf-prepare-worker", "polkadot-node-core-runtime-api", "polkadot-node-jaeger", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-rpc", "polkadot-service", "polkadot-statement-distribution", "polkadot-statement-table", "sc-allocator", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", "sc-client-db", "sc-consensus", "sc-consensus-aura", "sc-consensus-babe", "sc-consensus-babe-rpc", "sc-consensus-beefy", "sc-consensus-beefy-rpc", "sc-consensus-epochs", "sc-consensus-grandpa", "sc-consensus-grandpa-rpc", "sc-consensus-manual-seal", "sc-consensus-pow", "sc-consensus-slots", "sc-executor", "sc-executor-common", "sc-executor-polkavm", "sc-executor-wasmtime", "sc-informant", "sc-keystore", "sc-mixnet", "sc-network", "sc-network-common", "sc-network-gossip", "sc-network-light", "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-network-types", "sc-offchain", "sc-proposer-metrics", "sc-rpc", "sc-rpc-api", "sc-rpc-server", "sc-rpc-spec-v2", "sc-service", "sc-state-db", "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", "snowbridge-runtime-test-common", "sp-blockchain", "sp-consensus", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-database", "sp-maybe-compressed-blob", "sp-panic-handler", "sp-rpc", "staging-chain-spec-builder", "staging-node-inspect", "staging-tracking-allocator", "std", "subkey", "substrate-build-script-utils", "substrate-frame-rpc-support", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-rpc-client", "substrate-state-trie-migration-rpc", "substrate-wasm-builder", "tracing-gum", "xcm-emulator", "xcm-simulator"] +tuples-96 = [ + "frame-support-procedural?/tuples-96", + "frame-support?/tuples-96", +] + +[package.edition] +workspace = true + +[package.authors] +workspace = true + +[dependencies.assets-common] +path = "../cumulus/parachains/runtimes/assets/common" +default-features = false +optional = true + +[dependencies.binary-merkle-tree] +path = "../substrate/utils/binary-merkle-tree" +default-features = false +optional = true + +[dependencies.bp-asset-hub-rococo] +path = "../bridges/chains/chain-asset-hub-rococo" +default-features = false +optional = true + +[dependencies.bp-asset-hub-westend] +path = "../bridges/chains/chain-asset-hub-westend" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-cumulus] +path = "../bridges/chains/chain-bridge-hub-cumulus" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-kusama] +path = "../bridges/chains/chain-bridge-hub-kusama" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-polkadot] +path = "../bridges/chains/chain-bridge-hub-polkadot" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-rococo] +path = "../bridges/chains/chain-bridge-hub-rococo" +default-features = false +optional = true + +[dependencies.bp-bridge-hub-westend] +path = "../bridges/chains/chain-bridge-hub-westend" +default-features = false +optional = true + +[dependencies.bp-header-chain] +path = "../bridges/primitives/header-chain" +default-features = false +optional = true + +[dependencies.bp-kusama] +path = "../bridges/chains/chain-kusama" +default-features = false +optional = true + +[dependencies.bp-messages] +path = "../bridges/primitives/messages" +default-features = false +optional = true + +[dependencies.bp-parachains] +path = "../bridges/primitives/parachains" +default-features = false +optional = true + +[dependencies.bp-polkadot] +path = "../bridges/chains/chain-polkadot" +default-features = false +optional = true + +[dependencies.bp-polkadot-bulletin] +path = "../bridges/chains/chain-polkadot-bulletin" +default-features = false +optional = true + +[dependencies.bp-polkadot-core] +path = "../bridges/primitives/polkadot-core" +default-features = false +optional = true + +[dependencies.bp-relayers] +path = "../bridges/primitives/relayers" +default-features = false +optional = true + +[dependencies.bp-rococo] +path = "../bridges/chains/chain-rococo" +default-features = false +optional = true + +[dependencies.bp-runtime] +path = "../bridges/primitives/runtime" +default-features = false +optional = true + +[dependencies.bp-test-utils] +path = "../bridges/primitives/test-utils" +default-features = false +optional = true + +[dependencies.bp-westend] +path = "../bridges/chains/chain-westend" +default-features = false +optional = true + +[dependencies.bp-xcm-bridge-hub] +path = "../bridges/primitives/xcm-bridge-hub" +default-features = false +optional = true + +[dependencies.bp-xcm-bridge-hub-router] +path = "../bridges/primitives/xcm-bridge-hub-router" +default-features = false +optional = true + +[dependencies.bridge-hub-common] +path = "../cumulus/parachains/runtimes/bridge-hubs/common" +default-features = false +optional = true + +[dependencies.bridge-runtime-common] +path = "../bridges/bin/runtime-common" +default-features = false +optional = true + +[dependencies.cumulus-pallet-aura-ext] +path = "../cumulus/pallets/aura-ext" +default-features = false +optional = true + +[dependencies.cumulus-pallet-dmp-queue] +path = "../cumulus/pallets/dmp-queue" +default-features = false +optional = true + +[dependencies.cumulus-pallet-parachain-system] +path = "../cumulus/pallets/parachain-system" +default-features = false +optional = true + +[dependencies.cumulus-pallet-parachain-system-proc-macro] +path = "../cumulus/pallets/parachain-system/proc-macro" +default-features = false +optional = true + +[dependencies.cumulus-pallet-session-benchmarking] +path = "../cumulus/pallets/session-benchmarking" +default-features = false +optional = true + +[dependencies.cumulus-pallet-solo-to-para] +path = "../cumulus/pallets/solo-to-para" +default-features = false +optional = true + +[dependencies.cumulus-pallet-xcm] +path = "../cumulus/pallets/xcm" +default-features = false +optional = true + +[dependencies.cumulus-pallet-xcmp-queue] +path = "../cumulus/pallets/xcmp-queue" +default-features = false +optional = true + +[dependencies.cumulus-ping] +path = "../cumulus/parachains/pallets/ping" +default-features = false +optional = true + +[dependencies.cumulus-primitives-aura] +path = "../cumulus/primitives/aura" +default-features = false +optional = true + +[dependencies.cumulus-primitives-core] +path = "../cumulus/primitives/core" +default-features = false +optional = true + +[dependencies.cumulus-primitives-parachain-inherent] +path = "../cumulus/primitives/parachain-inherent" +default-features = false +optional = true + +[dependencies.cumulus-primitives-proof-size-hostfunction] +path = "../cumulus/primitives/proof-size-hostfunction" +default-features = false +optional = true + +[dependencies.cumulus-primitives-storage-weight-reclaim] +path = "../cumulus/primitives/storage-weight-reclaim" +default-features = false +optional = true + +[dependencies.cumulus-primitives-timestamp] +path = "../cumulus/primitives/timestamp" +default-features = false +optional = true + +[dependencies.cumulus-primitives-utility] +path = "../cumulus/primitives/utility" +default-features = false +optional = true + +[dependencies.frame-benchmarking] +path = "../substrate/frame/benchmarking" +default-features = false +optional = true + +[dependencies.frame-benchmarking-pallet-pov] +path = "../substrate/frame/benchmarking/pov" +default-features = false +optional = true + +[dependencies.frame-election-provider-solution-type] +path = "../substrate/frame/election-provider-support/solution-type" +default-features = false +optional = true + +[dependencies.frame-election-provider-support] +path = "../substrate/frame/election-provider-support" +default-features = false +optional = true + +[dependencies.frame-executive] +path = "../substrate/frame/executive" +default-features = false +optional = true + +[dependencies.frame-metadata-hash-extension] +path = "../substrate/frame/metadata-hash-extension" +default-features = false +optional = true + +[dependencies.frame-support] +path = "../substrate/frame/support" +default-features = false +optional = true + +[dependencies.frame-support-procedural] +path = "../substrate/frame/support/procedural" +default-features = false +optional = true + +[dependencies.frame-support-procedural-tools-derive] +path = "../substrate/frame/support/procedural/tools/derive" +default-features = false +optional = true + +[dependencies.frame-system] +path = "../substrate/frame/system" +default-features = false +optional = true + +[dependencies.frame-system-benchmarking] +path = "../substrate/frame/system/benchmarking" +default-features = false +optional = true + +[dependencies.frame-system-rpc-runtime-api] +path = "../substrate/frame/system/rpc/runtime-api" +default-features = false +optional = true + +[dependencies.frame-try-runtime] +path = "../substrate/frame/try-runtime" +default-features = false +optional = true + +[dependencies.pallet-alliance] +path = "../substrate/frame/alliance" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion] +path = "../substrate/frame/asset-conversion" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion-ops] +path = "../substrate/frame/asset-conversion/ops" +default-features = false +optional = true + +[dependencies.pallet-asset-conversion-tx-payment] +path = "../substrate/frame/transaction-payment/asset-conversion-tx-payment" +default-features = false +optional = true + +[dependencies.pallet-asset-rate] +path = "../substrate/frame/asset-rate" +default-features = false +optional = true + +[dependencies.pallet-asset-tx-payment] +path = "../substrate/frame/transaction-payment/asset-tx-payment" +default-features = false +optional = true + +[dependencies.pallet-assets] +path = "../substrate/frame/assets" +default-features = false +optional = true + +[dependencies.pallet-atomic-swap] +path = "../substrate/frame/atomic-swap" +default-features = false +optional = true + +[dependencies.pallet-aura] +path = "../substrate/frame/aura" +default-features = false +optional = true + +[dependencies.pallet-authority-discovery] +path = "../substrate/frame/authority-discovery" +default-features = false +optional = true + +[dependencies.pallet-authorship] +path = "../substrate/frame/authorship" +default-features = false +optional = true + +[dependencies.pallet-babe] +path = "../substrate/frame/babe" +default-features = false +optional = true + +[dependencies.pallet-bags-list] +path = "../substrate/frame/bags-list" +default-features = false +optional = true + +[dependencies.pallet-balances] +path = "../substrate/frame/balances" +default-features = false +optional = true + +[dependencies.pallet-beefy] +path = "../substrate/frame/beefy" +default-features = false +optional = true + +[dependencies.pallet-beefy-mmr] +path = "../substrate/frame/beefy-mmr" +default-features = false +optional = true + +[dependencies.pallet-bounties] +path = "../substrate/frame/bounties" +default-features = false +optional = true + +[dependencies.pallet-bridge-grandpa] +path = "../bridges/modules/grandpa" +default-features = false +optional = true + +[dependencies.pallet-bridge-messages] +path = "../bridges/modules/messages" +default-features = false +optional = true + +[dependencies.pallet-bridge-parachains] +path = "../bridges/modules/parachains" +default-features = false +optional = true + +[dependencies.pallet-bridge-relayers] +path = "../bridges/modules/relayers" +default-features = false +optional = true + +[dependencies.pallet-broker] +path = "../substrate/frame/broker" +default-features = false +optional = true + +[dependencies.pallet-child-bounties] +path = "../substrate/frame/child-bounties" +default-features = false +optional = true + +[dependencies.pallet-collator-selection] +path = "../cumulus/pallets/collator-selection" +default-features = false +optional = true + +[dependencies.pallet-collective] +path = "../substrate/frame/collective" +default-features = false +optional = true + +[dependencies.pallet-collective-content] +path = "../cumulus/parachains/pallets/collective-content" +default-features = false +optional = true + +[dependencies.pallet-contracts] +path = "../substrate/frame/contracts" +default-features = false +optional = true + +[dependencies.pallet-contracts-proc-macro] +path = "../substrate/frame/contracts/proc-macro" +default-features = false +optional = true + +[dependencies.pallet-contracts-uapi] +path = "../substrate/frame/contracts/uapi" +default-features = false +optional = true + +[dependencies.pallet-conviction-voting] +path = "../substrate/frame/conviction-voting" +default-features = false +optional = true + +[dependencies.pallet-core-fellowship] +path = "../substrate/frame/core-fellowship" +default-features = false +optional = true + +[dependencies.pallet-delegated-staking] +path = "../substrate/frame/delegated-staking" +default-features = false +optional = true + +[dependencies.pallet-democracy] +path = "../substrate/frame/democracy" +default-features = false +optional = true + +[dependencies.pallet-dev-mode] +path = "../substrate/frame/examples/dev-mode" +default-features = false +optional = true + +[dependencies.pallet-election-provider-multi-phase] +path = "../substrate/frame/election-provider-multi-phase" +default-features = false +optional = true + +[dependencies.pallet-election-provider-support-benchmarking] +path = "../substrate/frame/election-provider-support/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-elections-phragmen] +path = "../substrate/frame/elections-phragmen" +default-features = false +optional = true + +[dependencies.pallet-fast-unstake] +path = "../substrate/frame/fast-unstake" +default-features = false +optional = true + +[dependencies.pallet-glutton] +path = "../substrate/frame/glutton" +default-features = false +optional = true + +[dependencies.pallet-grandpa] +path = "../substrate/frame/grandpa" +default-features = false +optional = true + +[dependencies.pallet-identity] +path = "../substrate/frame/identity" +default-features = false +optional = true + +[dependencies.pallet-im-online] +path = "../substrate/frame/im-online" +default-features = false +optional = true + +[dependencies.pallet-indices] +path = "../substrate/frame/indices" +default-features = false +optional = true + +[dependencies.pallet-insecure-randomness-collective-flip] +path = "../substrate/frame/insecure-randomness-collective-flip" +default-features = false +optional = true + +[dependencies.pallet-lottery] +path = "../substrate/frame/lottery" +default-features = false +optional = true + +[dependencies.pallet-membership] +path = "../substrate/frame/membership" +default-features = false +optional = true + +[dependencies.pallet-message-queue] +path = "../substrate/frame/message-queue" +default-features = false +optional = true + +[dependencies.pallet-migrations] +path = "../substrate/frame/migrations" +default-features = false +optional = true + +[dependencies.pallet-mixnet] +path = "../substrate/frame/mixnet" +default-features = false +optional = true + +[dependencies.pallet-mmr] +path = "../substrate/frame/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.pallet-multisig] +path = "../substrate/frame/multisig" +default-features = false +optional = true + +[dependencies.pallet-nft-fractionalization] +path = "../substrate/frame/nft-fractionalization" +default-features = false +optional = true + +[dependencies.pallet-nfts] +path = "../substrate/frame/nfts" +default-features = false +optional = true + +[dependencies.pallet-nfts-runtime-api] +path = "../substrate/frame/nfts/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-nis] +path = "../substrate/frame/nis" +default-features = false +optional = true + +[dependencies.pallet-node-authorization] +path = "../substrate/frame/node-authorization" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools] +path = "../substrate/frame/nomination-pools" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools-benchmarking] +path = "../substrate/frame/nomination-pools/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-nomination-pools-runtime-api] +path = "../substrate/frame/nomination-pools/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-offences] +path = "../substrate/frame/offences" +default-features = false +optional = true + +[dependencies.pallet-offences-benchmarking] +path = "../substrate/frame/offences/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-paged-list] +path = "../substrate/frame/paged-list" +default-features = false +optional = true + +[dependencies.pallet-parameters] +path = "../substrate/frame/parameters" +default-features = false +optional = true + +[dependencies.pallet-preimage] +path = "../substrate/frame/preimage" +default-features = false +optional = true + +[dependencies.pallet-proxy] +path = "../substrate/frame/proxy" +default-features = false +optional = true + +[dependencies.pallet-ranked-collective] +path = "../substrate/frame/ranked-collective" +default-features = false +optional = true + +[dependencies.pallet-recovery] +path = "../substrate/frame/recovery" +default-features = false +optional = true + +[dependencies.pallet-referenda] +path = "../substrate/frame/referenda" +default-features = false +optional = true + +[dependencies.pallet-remark] +path = "../substrate/frame/remark" +default-features = false +optional = true + +[dependencies.pallet-root-offences] +path = "../substrate/frame/root-offences" +default-features = false +optional = true + +[dependencies.pallet-root-testing] +path = "../substrate/frame/root-testing" +default-features = false +optional = true + +[dependencies.pallet-safe-mode] +path = "../substrate/frame/safe-mode" +default-features = false +optional = true + +[dependencies.pallet-salary] +path = "../substrate/frame/salary" +default-features = false +optional = true + +[dependencies.pallet-scheduler] +path = "../substrate/frame/scheduler" +default-features = false +optional = true + +[dependencies.pallet-scored-pool] +path = "../substrate/frame/scored-pool" +default-features = false +optional = true + +[dependencies.pallet-session] +path = "../substrate/frame/session" +default-features = false +optional = true + +[dependencies.pallet-session-benchmarking] +path = "../substrate/frame/session/benchmarking" +default-features = false +optional = true + +[dependencies.pallet-skip-feeless-payment] +path = "../substrate/frame/transaction-payment/skip-feeless-payment" +default-features = false +optional = true + +[dependencies.pallet-society] +path = "../substrate/frame/society" +default-features = false +optional = true + +[dependencies.pallet-staking] +path = "../substrate/frame/staking" +default-features = false +optional = true + +[dependencies.pallet-staking-reward-curve] +path = "../substrate/frame/staking/reward-curve" +default-features = false +optional = true + +[dependencies.pallet-staking-reward-fn] +path = "../substrate/frame/staking/reward-fn" +default-features = false +optional = true + +[dependencies.pallet-staking-runtime-api] +path = "../substrate/frame/staking/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-state-trie-migration] +path = "../substrate/frame/state-trie-migration" +default-features = false +optional = true + +[dependencies.pallet-statement] +path = "../substrate/frame/statement" +default-features = false +optional = true + +[dependencies.pallet-sudo] +path = "../substrate/frame/sudo" +default-features = false +optional = true + +[dependencies.pallet-timestamp] +path = "../substrate/frame/timestamp" +default-features = false +optional = true + +[dependencies.pallet-tips] +path = "../substrate/frame/tips" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment] +path = "../substrate/frame/transaction-payment" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment-rpc-runtime-api] +path = "../substrate/frame/transaction-payment/rpc/runtime-api" +default-features = false +optional = true + +[dependencies.pallet-transaction-storage] +path = "../substrate/frame/transaction-storage" +default-features = false +optional = true + +[dependencies.pallet-treasury] +path = "../substrate/frame/treasury" +default-features = false +optional = true + +[dependencies.pallet-tx-pause] +path = "../substrate/frame/tx-pause" +default-features = false +optional = true + +[dependencies.pallet-uniques] +path = "../substrate/frame/uniques" +default-features = false +optional = true + +[dependencies.pallet-utility] +path = "../substrate/frame/utility" +default-features = false +optional = true + +[dependencies.pallet-vesting] +path = "../substrate/frame/vesting" +default-features = false +optional = true + +[dependencies.pallet-whitelist] +path = "../substrate/frame/whitelist" +default-features = false +optional = true + +[dependencies.pallet-xcm] +path = "../polkadot/xcm/pallet-xcm" +default-features = false +optional = true + +[dependencies.pallet-xcm-benchmarks] +path = "../polkadot/xcm/pallet-xcm-benchmarks" +default-features = false +optional = true + +[dependencies.pallet-xcm-bridge-hub] +path = "../bridges/modules/xcm-bridge-hub" +default-features = false +optional = true + +[dependencies.pallet-xcm-bridge-hub-router] +path = "../bridges/modules/xcm-bridge-hub-router" +default-features = false +optional = true + +[dependencies.parachains-common] +path = "../cumulus/parachains/common" +default-features = false +optional = true + +[dependencies.polkadot-core-primitives] +path = "../polkadot/core-primitives" +default-features = false +optional = true + +[dependencies.polkadot-parachain-primitives] +path = "../polkadot/parachain" +default-features = false +optional = true + +[dependencies.polkadot-primitives] +path = "../polkadot/primitives" +default-features = false +optional = true + +[dependencies.polkadot-runtime-common] +path = "../polkadot/runtime/common" +default-features = false +optional = true + +[dependencies.polkadot-runtime-metrics] +path = "../polkadot/runtime/metrics" +default-features = false +optional = true + +[dependencies.polkadot-runtime-parachains] +path = "../polkadot/runtime/parachains" +default-features = false +optional = true + +[dependencies.polkadot-sdk-frame] +path = "../substrate/frame" +default-features = false +optional = true + +[dependencies.rococo-runtime-constants] +path = "../polkadot/runtime/rococo/constants" +default-features = false +optional = true + +[dependencies.sc-chain-spec-derive] +path = "../substrate/client/chain-spec/derive" +default-features = false +optional = true + +[dependencies.sc-tracing-proc-macro] +path = "../substrate/client/tracing/proc-macro" +default-features = false +optional = true + +[dependencies.slot-range-helper] +path = "../polkadot/runtime/common/slot_range_helper" +default-features = false +optional = true + +[dependencies.snowbridge-beacon-primitives] +path = "../bridges/snowbridge/primitives/beacon" +default-features = false +optional = true + +[dependencies.snowbridge-core] +path = "../bridges/snowbridge/primitives/core" +default-features = false +optional = true + +[dependencies.snowbridge-ethereum] +path = "../bridges/snowbridge/primitives/ethereum" +default-features = false +optional = true + +[dependencies.snowbridge-outbound-queue-merkle-tree] +path = "../bridges/snowbridge/pallets/outbound-queue/merkle-tree" +default-features = false +optional = true + +[dependencies.snowbridge-outbound-queue-runtime-api] +path = "../bridges/snowbridge/pallets/outbound-queue/runtime-api" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-ethereum-client] +path = "../bridges/snowbridge/pallets/ethereum-client" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-ethereum-client-fixtures] +path = "../bridges/snowbridge/pallets/ethereum-client/fixtures" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-inbound-queue] +path = "../bridges/snowbridge/pallets/inbound-queue" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-inbound-queue-fixtures] +path = "../bridges/snowbridge/pallets/inbound-queue/fixtures" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-outbound-queue] +path = "../bridges/snowbridge/pallets/outbound-queue" +default-features = false +optional = true + +[dependencies.snowbridge-pallet-system] +path = "../bridges/snowbridge/pallets/system" +default-features = false +optional = true + +[dependencies.snowbridge-router-primitives] +path = "../bridges/snowbridge/primitives/router" +default-features = false +optional = true + +[dependencies.snowbridge-runtime-common] +path = "../bridges/snowbridge/runtime/runtime-common" +default-features = false +optional = true + +[dependencies.snowbridge-system-runtime-api] +path = "../bridges/snowbridge/pallets/system/runtime-api" +default-features = false +optional = true + +[dependencies.sp-api] +path = "../substrate/primitives/api" +default-features = false +optional = true + +[dependencies.sp-api-proc-macro] +path = "../substrate/primitives/api/proc-macro" +default-features = false +optional = true + +[dependencies.sp-application-crypto] +path = "../substrate/primitives/application-crypto" +default-features = false +optional = true + +[dependencies.sp-arithmetic] +path = "../substrate/primitives/arithmetic" +default-features = false +optional = true + +[dependencies.sp-authority-discovery] +path = "../substrate/primitives/authority-discovery" +default-features = false +optional = true + +[dependencies.sp-block-builder] +path = "../substrate/primitives/block-builder" +default-features = false +optional = true + +[dependencies.sp-consensus-aura] +path = "../substrate/primitives/consensus/aura" +default-features = false +optional = true + +[dependencies.sp-consensus-babe] +path = "../substrate/primitives/consensus/babe" +default-features = false +optional = true + +[dependencies.sp-consensus-beefy] +path = "../substrate/primitives/consensus/beefy" +default-features = false +optional = true + +[dependencies.sp-consensus-grandpa] +path = "../substrate/primitives/consensus/grandpa" +default-features = false +optional = true + +[dependencies.sp-consensus-pow] +path = "../substrate/primitives/consensus/pow" +default-features = false +optional = true + +[dependencies.sp-consensus-slots] +path = "../substrate/primitives/consensus/slots" +default-features = false +optional = true + +[dependencies.sp-core] +path = "../substrate/primitives/core" +default-features = false +optional = true + +[dependencies.sp-crypto-ec-utils] +path = "../substrate/primitives/crypto/ec-utils" +default-features = false +optional = true + +[dependencies.sp-crypto-hashing] +path = "../substrate/primitives/crypto/hashing" +default-features = false +optional = true + +[dependencies.sp-crypto-hashing-proc-macro] +path = "../substrate/primitives/crypto/hashing/proc-macro" +default-features = false +optional = true + +[dependencies.sp-debug-derive] +path = "../substrate/primitives/debug-derive" +default-features = false +optional = true + +[dependencies.sp-externalities] +path = "../substrate/primitives/externalities" +default-features = false +optional = true + +[dependencies.sp-genesis-builder] +path = "../substrate/primitives/genesis-builder" +default-features = false +optional = true + +[dependencies.sp-inherents] +path = "../substrate/primitives/inherents" +default-features = false +optional = true + +[dependencies.sp-io] +path = "../substrate/primitives/io" +default-features = false +optional = true + +[dependencies.sp-keyring] +path = "../substrate/primitives/keyring" +default-features = false +optional = true + +[dependencies.sp-keystore] +path = "../substrate/primitives/keystore" +default-features = false +optional = true + +[dependencies.sp-metadata-ir] +path = "../substrate/primitives/metadata-ir" +default-features = false +optional = true + +[dependencies.sp-mixnet] +path = "../substrate/primitives/mixnet" +default-features = false +optional = true + +[dependencies.sp-mmr-primitives] +path = "../substrate/primitives/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.sp-npos-elections] +path = "../substrate/primitives/npos-elections" +default-features = false +optional = true + +[dependencies.sp-offchain] +path = "../substrate/primitives/offchain" +default-features = false +optional = true + +[dependencies.sp-runtime] +path = "../substrate/primitives/runtime" +default-features = false +optional = true + +[dependencies.sp-runtime-interface] +path = "../substrate/primitives/runtime-interface" +default-features = false +optional = true + +[dependencies.sp-runtime-interface-proc-macro] +path = "../substrate/primitives/runtime-interface/proc-macro" +default-features = false +optional = true + +[dependencies.sp-session] +path = "../substrate/primitives/session" +default-features = false +optional = true + +[dependencies.sp-staking] +path = "../substrate/primitives/staking" +default-features = false +optional = true + +[dependencies.sp-state-machine] +path = "../substrate/primitives/state-machine" +default-features = false +optional = true + +[dependencies.sp-statement-store] +path = "../substrate/primitives/statement-store" +default-features = false +optional = true + +[dependencies.sp-std] +path = "../substrate/primitives/std" +default-features = false +optional = true + +[dependencies.sp-storage] +path = "../substrate/primitives/storage" +default-features = false +optional = true + +[dependencies.sp-timestamp] +path = "../substrate/primitives/timestamp" +default-features = false +optional = true + +[dependencies.sp-tracing] +path = "../substrate/primitives/tracing" +default-features = false +optional = true + +[dependencies.sp-transaction-pool] +path = "../substrate/primitives/transaction-pool" +default-features = false +optional = true + +[dependencies.sp-transaction-storage-proof] +path = "../substrate/primitives/transaction-storage-proof" +default-features = false +optional = true + +[dependencies.sp-trie] +path = "../substrate/primitives/trie" +default-features = false +optional = true + +[dependencies.sp-version] +path = "../substrate/primitives/version" +default-features = false +optional = true + +[dependencies.sp-version-proc-macro] +path = "../substrate/primitives/version/proc-macro" +default-features = false +optional = true + +[dependencies.sp-wasm-interface] +path = "../substrate/primitives/wasm-interface" +default-features = false +optional = true + +[dependencies.sp-weights] +path = "../substrate/primitives/weights" +default-features = false +optional = true + +[dependencies.staging-parachain-info] +path = "../cumulus/parachains/pallets/parachain-info" +default-features = false +optional = true + +[dependencies.staging-xcm] +path = "../polkadot/xcm" +default-features = false +optional = true + +[dependencies.staging-xcm-builder] +path = "../polkadot/xcm/xcm-builder" +default-features = false +optional = true + +[dependencies.staging-xcm-executor] +path = "../polkadot/xcm/xcm-executor" +default-features = false +optional = true + +[dependencies.substrate-bip39] +path = "../substrate/utils/substrate-bip39" +default-features = false +optional = true + +[dependencies.testnet-parachains-constants] +path = "../cumulus/parachains/runtimes/constants" +default-features = false +optional = true + +[dependencies.tracing-gum-proc-macro] +path = "../polkadot/node/gum/proc-macro" +default-features = false +optional = true + +[dependencies.westend-runtime-constants] +path = "../polkadot/runtime/westend/constants" +default-features = false +optional = true + +[dependencies.xcm-fee-payment-runtime-api] +path = "../polkadot/xcm/xcm-fee-payment-runtime-api" +default-features = false +optional = true + +[dependencies.xcm-procedural] +path = "../polkadot/xcm/procedural" +default-features = false +optional = true + +[dependencies.asset-test-utils] +path = "../cumulus/parachains/runtimes/assets/test-utils" +default-features = false +optional = true + +[dependencies.bridge-hub-test-utils] +path = "../cumulus/parachains/runtimes/bridge-hubs/test-utils" +default-features = false +optional = true + +[dependencies.cumulus-client-cli] +path = "../cumulus/client/cli" +default-features = false +optional = true + +[dependencies.cumulus-client-collator] +path = "../cumulus/client/collator" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-aura] +path = "../cumulus/client/consensus/aura" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-common] +path = "../cumulus/client/consensus/common" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-proposer] +path = "../cumulus/client/consensus/proposer" +default-features = false +optional = true + +[dependencies.cumulus-client-consensus-relay-chain] +path = "../cumulus/client/consensus/relay-chain" +default-features = false +optional = true + +[dependencies.cumulus-client-network] +path = "../cumulus/client/network" +default-features = false +optional = true + +[dependencies.cumulus-client-parachain-inherent] +path = "../cumulus/client/parachain-inherent" +default-features = false +optional = true + +[dependencies.cumulus-client-pov-recovery] +path = "../cumulus/client/pov-recovery" +default-features = false +optional = true + +[dependencies.cumulus-client-service] +path = "../cumulus/client/service" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-inprocess-interface] +path = "../cumulus/client/relay-chain-inprocess-interface" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-interface] +path = "../cumulus/client/relay-chain-interface" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-minimal-node] +path = "../cumulus/client/relay-chain-minimal-node" +default-features = false +optional = true + +[dependencies.cumulus-relay-chain-rpc-interface] +path = "../cumulus/client/relay-chain-rpc-interface" +default-features = false +optional = true + +[dependencies.cumulus-test-relay-sproof-builder] +path = "../cumulus/test/relay-sproof-builder" +default-features = false +optional = true + +[dependencies.emulated-integration-tests-common] +path = "../cumulus/parachains/integration-tests/emulated/common" +default-features = false +optional = true + +[dependencies.fork-tree] +path = "../substrate/utils/fork-tree" +default-features = false +optional = true + +[dependencies.frame-benchmarking-cli] +path = "../substrate/utils/frame/benchmarking-cli" +default-features = false +optional = true + +[dependencies.frame-remote-externalities] +path = "../substrate/utils/frame/remote-externalities" +default-features = false +optional = true + +[dependencies.frame-support-procedural-tools] +path = "../substrate/frame/support/procedural/tools" +default-features = false +optional = true + +[dependencies.generate-bags] +path = "../substrate/utils/frame/generate-bags" +default-features = false +optional = true + +[dependencies.mmr-gadget] +path = "../substrate/client/merkle-mountain-range" +default-features = false +optional = true + +[dependencies.mmr-rpc] +path = "../substrate/client/merkle-mountain-range/rpc" +default-features = false +optional = true + +[dependencies.pallet-contracts-mock-network] +path = "../substrate/frame/contracts/mock-network" +default-features = false +optional = true + +[dependencies.pallet-transaction-payment-rpc] +path = "../substrate/frame/transaction-payment/rpc" +default-features = false +optional = true + +[dependencies.parachains-runtimes-test-utils] +path = "../cumulus/parachains/runtimes/test-utils" +default-features = false +optional = true + +[dependencies.polkadot-approval-distribution] +path = "../polkadot/node/network/approval-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-bitfield-distribution] +path = "../polkadot/node/network/bitfield-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-distribution] +path = "../polkadot/node/network/availability-distribution" +default-features = false +optional = true + +[dependencies.polkadot-availability-recovery] +path = "../polkadot/node/network/availability-recovery" +default-features = false +optional = true + +[dependencies.polkadot-cli] +path = "../polkadot/cli" +default-features = false +optional = true + +[dependencies.polkadot-collator-protocol] +path = "../polkadot/node/network/collator-protocol" +default-features = false +optional = true + +[dependencies.polkadot-dispute-distribution] +path = "../polkadot/node/network/dispute-distribution" +default-features = false +optional = true + +[dependencies.polkadot-erasure-coding] +path = "../polkadot/erasure-coding" +default-features = false +optional = true + +[dependencies.polkadot-gossip-support] +path = "../polkadot/node/network/gossip-support" +default-features = false +optional = true + +[dependencies.polkadot-network-bridge] +path = "../polkadot/node/network/bridge" +default-features = false +optional = true + +[dependencies.polkadot-node-collation-generation] +path = "../polkadot/node/collation-generation" +default-features = false +optional = true + +[dependencies.polkadot-node-core-approval-voting] +path = "../polkadot/node/core/approval-voting" +default-features = false +optional = true + +[dependencies.polkadot-node-core-av-store] +path = "../polkadot/node/core/av-store" +default-features = false +optional = true + +[dependencies.polkadot-node-core-backing] +path = "../polkadot/node/core/backing" +default-features = false +optional = true + +[dependencies.polkadot-node-core-bitfield-signing] +path = "../polkadot/node/core/bitfield-signing" +default-features = false +optional = true + +[dependencies.polkadot-node-core-candidate-validation] +path = "../polkadot/node/core/candidate-validation" +default-features = false +optional = true + +[dependencies.polkadot-node-core-chain-api] +path = "../polkadot/node/core/chain-api" +default-features = false +optional = true + +[dependencies.polkadot-node-core-chain-selection] +path = "../polkadot/node/core/chain-selection" +default-features = false +optional = true + +[dependencies.polkadot-node-core-dispute-coordinator] +path = "../polkadot/node/core/dispute-coordinator" +default-features = false +optional = true + +[dependencies.polkadot-node-core-parachains-inherent] +path = "../polkadot/node/core/parachains-inherent" +default-features = false +optional = true + +[dependencies.polkadot-node-core-prospective-parachains] +path = "../polkadot/node/core/prospective-parachains" +default-features = false +optional = true + +[dependencies.polkadot-node-core-provisioner] +path = "../polkadot/node/core/provisioner" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf] +path = "../polkadot/node/core/pvf" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-checker] +path = "../polkadot/node/core/pvf-checker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-common] +path = "../polkadot/node/core/pvf/common" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-execute-worker] +path = "../polkadot/node/core/pvf/execute-worker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-pvf-prepare-worker] +path = "../polkadot/node/core/pvf/prepare-worker" +default-features = false +optional = true + +[dependencies.polkadot-node-core-runtime-api] +path = "../polkadot/node/core/runtime-api" +default-features = false +optional = true + +[dependencies.polkadot-node-jaeger] +path = "../polkadot/node/jaeger" +default-features = false +optional = true + +[dependencies.polkadot-node-metrics] +path = "../polkadot/node/metrics" +default-features = false +optional = true + +[dependencies.polkadot-node-network-protocol] +path = "../polkadot/node/network/protocol" +default-features = false +optional = true + +[dependencies.polkadot-node-primitives] +path = "../polkadot/node/primitives" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem] +path = "../polkadot/node/subsystem" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem-types] +path = "../polkadot/node/subsystem-types" +default-features = false +optional = true + +[dependencies.polkadot-node-subsystem-util] +path = "../polkadot/node/subsystem-util" +default-features = false +optional = true + +[dependencies.polkadot-overseer] +path = "../polkadot/node/overseer" +default-features = false +optional = true + +[dependencies.polkadot-rpc] +path = "../polkadot/rpc" +default-features = false +optional = true + +[dependencies.polkadot-service] +path = "../polkadot/node/service" +default-features = false +optional = true + +[dependencies.polkadot-statement-distribution] +path = "../polkadot/node/network/statement-distribution" +default-features = false +optional = true + +[dependencies.polkadot-statement-table] +path = "../polkadot/statement-table" +default-features = false +optional = true + +[dependencies.sc-allocator] +path = "../substrate/client/allocator" +default-features = false +optional = true + +[dependencies.sc-authority-discovery] +path = "../substrate/client/authority-discovery" +default-features = false +optional = true + +[dependencies.sc-basic-authorship] +path = "../substrate/client/basic-authorship" +default-features = false +optional = true + +[dependencies.sc-block-builder] +path = "../substrate/client/block-builder" +default-features = false +optional = true + +[dependencies.sc-chain-spec] +path = "../substrate/client/chain-spec" +default-features = false +optional = true + +[dependencies.sc-cli] +path = "../substrate/client/cli" +default-features = false +optional = true + +[dependencies.sc-client-api] +path = "../substrate/client/api" +default-features = false +optional = true + +[dependencies.sc-client-db] +path = "../substrate/client/db" +default-features = false +optional = true + +[dependencies.sc-consensus] +path = "../substrate/client/consensus/common" +default-features = false +optional = true + +[dependencies.sc-consensus-aura] +path = "../substrate/client/consensus/aura" +default-features = false +optional = true + +[dependencies.sc-consensus-babe] +path = "../substrate/client/consensus/babe" +default-features = false +optional = true + +[dependencies.sc-consensus-babe-rpc] +path = "../substrate/client/consensus/babe/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-beefy] +path = "../substrate/client/consensus/beefy" +default-features = false +optional = true + +[dependencies.sc-consensus-beefy-rpc] +path = "../substrate/client/consensus/beefy/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-epochs] +path = "../substrate/client/consensus/epochs" +default-features = false +optional = true + +[dependencies.sc-consensus-grandpa] +path = "../substrate/client/consensus/grandpa" +default-features = false +optional = true + +[dependencies.sc-consensus-grandpa-rpc] +path = "../substrate/client/consensus/grandpa/rpc" +default-features = false +optional = true + +[dependencies.sc-consensus-manual-seal] +path = "../substrate/client/consensus/manual-seal" +default-features = false +optional = true + +[dependencies.sc-consensus-pow] +path = "../substrate/client/consensus/pow" +default-features = false +optional = true + +[dependencies.sc-consensus-slots] +path = "../substrate/client/consensus/slots" +default-features = false +optional = true + +[dependencies.sc-executor] +path = "../substrate/client/executor" +default-features = false +optional = true + +[dependencies.sc-executor-common] +path = "../substrate/client/executor/common" +default-features = false +optional = true + +[dependencies.sc-executor-polkavm] +path = "../substrate/client/executor/polkavm" +default-features = false +optional = true + +[dependencies.sc-executor-wasmtime] +path = "../substrate/client/executor/wasmtime" +default-features = false +optional = true + +[dependencies.sc-informant] +path = "../substrate/client/informant" +default-features = false +optional = true + +[dependencies.sc-keystore] +path = "../substrate/client/keystore" +default-features = false +optional = true + +[dependencies.sc-mixnet] +path = "../substrate/client/mixnet" +default-features = false +optional = true + +[dependencies.sc-network] +path = "../substrate/client/network" +default-features = false +optional = true + +[dependencies.sc-network-common] +path = "../substrate/client/network/common" +default-features = false +optional = true + +[dependencies.sc-network-gossip] +path = "../substrate/client/network-gossip" +default-features = false +optional = true + +[dependencies.sc-network-light] +path = "../substrate/client/network/light" +default-features = false +optional = true + +[dependencies.sc-network-statement] +path = "../substrate/client/network/statement" +default-features = false +optional = true + +[dependencies.sc-network-sync] +path = "../substrate/client/network/sync" +default-features = false +optional = true + +[dependencies.sc-network-transactions] +path = "../substrate/client/network/transactions" +default-features = false +optional = true + +[dependencies.sc-network-types] +path = "../substrate/client/network/types" +default-features = false +optional = true + +[dependencies.sc-offchain] +path = "../substrate/client/offchain" +default-features = false +optional = true + +[dependencies.sc-proposer-metrics] +path = "../substrate/client/proposer-metrics" +default-features = false +optional = true + +[dependencies.sc-rpc] +path = "../substrate/client/rpc" +default-features = false +optional = true + +[dependencies.sc-rpc-api] +path = "../substrate/client/rpc-api" +default-features = false +optional = true + +[dependencies.sc-rpc-server] +path = "../substrate/client/rpc-servers" +default-features = false +optional = true + +[dependencies.sc-rpc-spec-v2] +path = "../substrate/client/rpc-spec-v2" +default-features = false +optional = true + +[dependencies.sc-service] +path = "../substrate/client/service" +default-features = false +optional = true + +[dependencies.sc-state-db] +path = "../substrate/client/state-db" +default-features = false +optional = true + +[dependencies.sc-statement-store] +path = "../substrate/client/statement-store" +default-features = false +optional = true + +[dependencies.sc-storage-monitor] +path = "../substrate/client/storage-monitor" +default-features = false +optional = true + +[dependencies.sc-sync-state-rpc] +path = "../substrate/client/sync-state-rpc" +default-features = false +optional = true + +[dependencies.sc-sysinfo] +path = "../substrate/client/sysinfo" +default-features = false +optional = true + +[dependencies.sc-telemetry] +path = "../substrate/client/telemetry" +default-features = false +optional = true + +[dependencies.sc-tracing] +path = "../substrate/client/tracing" +default-features = false +optional = true + +[dependencies.sc-transaction-pool] +path = "../substrate/client/transaction-pool" +default-features = false +optional = true + +[dependencies.sc-transaction-pool-api] +path = "../substrate/client/transaction-pool/api" +default-features = false +optional = true + +[dependencies.sc-utils] +path = "../substrate/client/utils" +default-features = false +optional = true + +[dependencies.snowbridge-runtime-test-common] +path = "../bridges/snowbridge/runtime/test-common" +default-features = false +optional = true + +[dependencies.sp-blockchain] +path = "../substrate/primitives/blockchain" +default-features = false +optional = true + +[dependencies.sp-consensus] +path = "../substrate/primitives/consensus/common" +default-features = false +optional = true + +[dependencies.sp-core-hashing] +path = "../substrate/deprecated/hashing" +default-features = false +optional = true + +[dependencies.sp-core-hashing-proc-macro] +path = "../substrate/deprecated/hashing/proc-macro" +default-features = false +optional = true + +[dependencies.sp-database] +path = "../substrate/primitives/database" +default-features = false +optional = true + +[dependencies.sp-maybe-compressed-blob] +path = "../substrate/primitives/maybe-compressed-blob" +default-features = false +optional = true + +[dependencies.sp-panic-handler] +path = "../substrate/primitives/panic-handler" +default-features = false +optional = true + +[dependencies.sp-rpc] +path = "../substrate/primitives/rpc" +default-features = false +optional = true + +[dependencies.staging-chain-spec-builder] +path = "../substrate/bin/utils/chain-spec-builder" +default-features = false +optional = true + +[dependencies.staging-node-inspect] +path = "../substrate/bin/node/inspect" +default-features = false +optional = true + +[dependencies.staging-tracking-allocator] +path = "../polkadot/node/tracking-allocator" +default-features = false +optional = true + +[dependencies.subkey] +path = "../substrate/bin/utils/subkey" +default-features = false +optional = true + +[dependencies.substrate-build-script-utils] +path = "../substrate/utils/build-script-utils" +default-features = false +optional = true + +[dependencies.substrate-frame-rpc-support] +path = "../substrate/utils/frame/rpc/support" +default-features = false +optional = true + +[dependencies.substrate-frame-rpc-system] +path = "../substrate/utils/frame/rpc/system" +default-features = false +optional = true + +[dependencies.substrate-prometheus-endpoint] +path = "../substrate/utils/prometheus" +default-features = false +optional = true + +[dependencies.substrate-rpc-client] +path = "../substrate/utils/frame/rpc/client" +default-features = false +optional = true + +[dependencies.substrate-state-trie-migration-rpc] +path = "../substrate/utils/frame/rpc/state-trie-migration-rpc" +default-features = false +optional = true + +[dependencies.substrate-wasm-builder] +path = "../substrate/utils/wasm-builder" +default-features = false +optional = true + +[dependencies.tracing-gum] +path = "../polkadot/node/gum" +default-features = false +optional = true + +[dependencies.xcm-emulator] +path = "../cumulus/xcm/xcm-emulator" +default-features = false +optional = true + +[dependencies.xcm-simulator] +path = "../polkadot/xcm/xcm-simulator" +default-features = false +optional = true + +[package.metadata.docs.rs] +features = ["node", "runtime"] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..78b34ba179b7dd7fd7b08980d14e58d624c480dc --- /dev/null +++ b/umbrella/src/lib.rs @@ -0,0 +1,1569 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Polkadot SDK umbrella crate re-exporting all other published crates. +//! +//! This helps to set a single version number for all your dependencies. Docs are in the +//! `polkadot-sdk-docs` crate. + +// This file is auto-generated and checked by the CI. You can edit it manually, but it must be +// exactly the way that the CI expects it. + +/// Test utils for Asset Hub runtimes. +#[cfg(feature = "asset-test-utils")] +pub use asset_test_utils; + +/// Assets common utilities. +#[cfg(feature = "assets-common")] +pub use assets_common; + +/// A no-std/Substrate compatible library to construct binary merkle tree. +#[cfg(feature = "binary-merkle-tree")] +pub use binary_merkle_tree; + +/// Primitives of AssetHubRococo parachain runtime. +#[cfg(feature = "bp-asset-hub-rococo")] +pub use bp_asset_hub_rococo; + +/// Primitives of AssetHubWestend parachain runtime. +#[cfg(feature = "bp-asset-hub-westend")] +pub use bp_asset_hub_westend; + +/// Primitives for BridgeHub parachain runtimes. +#[cfg(feature = "bp-bridge-hub-cumulus")] +pub use bp_bridge_hub_cumulus; + +/// Primitives of BridgeHubKusama parachain runtime. +#[cfg(feature = "bp-bridge-hub-kusama")] +pub use bp_bridge_hub_kusama; + +/// Primitives of BridgeHubPolkadot parachain runtime. +#[cfg(feature = "bp-bridge-hub-polkadot")] +pub use bp_bridge_hub_polkadot; + +/// Primitives of BridgeHubRococo parachain runtime. +#[cfg(feature = "bp-bridge-hub-rococo")] +pub use bp_bridge_hub_rococo; + +/// Primitives of BridgeHubWestend parachain runtime. +#[cfg(feature = "bp-bridge-hub-westend")] +pub use bp_bridge_hub_westend; + +/// A common interface for describing what a bridge pallet should be able to do. +#[cfg(feature = "bp-header-chain")] +pub use bp_header_chain; + +/// Primitives of Kusama runtime. +#[cfg(feature = "bp-kusama")] +pub use bp_kusama; + +/// Primitives of messages module. +#[cfg(feature = "bp-messages")] +pub use bp_messages; + +/// Primitives of parachains module. +#[cfg(feature = "bp-parachains")] +pub use bp_parachains; + +/// Primitives of Polkadot runtime. +#[cfg(feature = "bp-polkadot")] +pub use bp_polkadot; + +/// Primitives of Polkadot Bulletin chain runtime. +#[cfg(feature = "bp-polkadot-bulletin")] +pub use bp_polkadot_bulletin; + +/// Primitives of Polkadot-like runtime. +#[cfg(feature = "bp-polkadot-core")] +pub use bp_polkadot_core; + +/// Primitives of relayers module. +#[cfg(feature = "bp-relayers")] +pub use bp_relayers; + +/// Primitives of Rococo runtime. +#[cfg(feature = "bp-rococo")] +pub use bp_rococo; + +/// Primitives that may be used at (bridges) runtime level. +#[cfg(feature = "bp-runtime")] +pub use bp_runtime; + +/// Utilities for testing substrate-based runtime bridge code. +#[cfg(feature = "bp-test-utils")] +pub use bp_test_utils; + +/// Primitives of Westend runtime. +#[cfg(feature = "bp-westend")] +pub use bp_westend; + +/// Primitives of the xcm-bridge-hub pallet. +#[cfg(feature = "bp-xcm-bridge-hub")] +pub use bp_xcm_bridge_hub; + +/// Primitives of the xcm-bridge-hub fee pallet. +#[cfg(feature = "bp-xcm-bridge-hub-router")] +pub use bp_xcm_bridge_hub_router; + +/// Bridge hub common utilities. +#[cfg(feature = "bridge-hub-common")] +pub use bridge_hub_common; + +/// Utils for BridgeHub testing. +#[cfg(feature = "bridge-hub-test-utils")] +pub use bridge_hub_test_utils; + +/// Common types and functions that may be used by substrate-based runtimes of all bridged +/// chains. +#[cfg(feature = "bridge-runtime-common")] +pub use bridge_runtime_common; + +/// Parachain node CLI utilities. +#[cfg(feature = "cumulus-client-cli")] +pub use cumulus_client_cli; + +/// Common node-side functionality and glue code to collate parachain blocks. +#[cfg(feature = "cumulus-client-collator")] +pub use cumulus_client_collator; + +/// AURA consensus algorithm for parachains. +#[cfg(feature = "cumulus-client-consensus-aura")] +pub use cumulus_client_consensus_aura; + +/// Cumulus specific common consensus implementations. +#[cfg(feature = "cumulus-client-consensus-common")] +pub use cumulus_client_consensus_common; + +/// A Substrate `Proposer` for building parachain blocks. +#[cfg(feature = "cumulus-client-consensus-proposer")] +pub use cumulus_client_consensus_proposer; + +/// The relay-chain provided consensus algorithm. +#[cfg(feature = "cumulus-client-consensus-relay-chain")] +pub use cumulus_client_consensus_relay_chain; + +/// Cumulus-specific networking protocol. +#[cfg(feature = "cumulus-client-network")] +pub use cumulus_client_network; + +/// Inherent that needs to be present in every parachain block. Contains messages and a relay +/// chain storage-proof. +#[cfg(feature = "cumulus-client-parachain-inherent")] +pub use cumulus_client_parachain_inherent; + +/// Cumulus-specific networking protocol. +#[cfg(feature = "cumulus-client-pov-recovery")] +pub use cumulus_client_pov_recovery; + +/// Common functions used to assemble the components of a parachain node. +#[cfg(feature = "cumulus-client-service")] +pub use cumulus_client_service; + +/// AURA consensus extension pallet for parachains. +#[cfg(feature = "cumulus-pallet-aura-ext")] +pub use cumulus_pallet_aura_ext; + +/// Migrates messages from the old DMP queue pallet. +#[cfg(feature = "cumulus-pallet-dmp-queue")] +pub use cumulus_pallet_dmp_queue; + +/// Base pallet for cumulus-based parachains. +#[cfg(feature = "cumulus-pallet-parachain-system")] +pub use cumulus_pallet_parachain_system; + +/// Proc macros provided by the parachain-system pallet. +#[cfg(feature = "cumulus-pallet-parachain-system-proc-macro")] +pub use cumulus_pallet_parachain_system_proc_macro; + +/// FRAME sessions pallet benchmarking. +#[cfg(feature = "cumulus-pallet-session-benchmarking")] +pub use cumulus_pallet_session_benchmarking; + +/// Adds functionality to migrate from a Solo to a Parachain. +#[cfg(feature = "cumulus-pallet-solo-to-para")] +pub use cumulus_pallet_solo_to_para; + +/// Pallet for stuff specific to parachains' usage of XCM. +#[cfg(feature = "cumulus-pallet-xcm")] +pub use cumulus_pallet_xcm; + +/// Pallet to queue outbound and inbound XCMP messages. +#[cfg(feature = "cumulus-pallet-xcmp-queue")] +pub use cumulus_pallet_xcmp_queue; + +/// Ping Pallet for Cumulus XCM/UMP testing. +#[cfg(feature = "cumulus-ping")] +pub use cumulus_ping; + +/// Core primitives for Aura in Cumulus. +#[cfg(feature = "cumulus-primitives-aura")] +pub use cumulus_primitives_aura; + +/// Cumulus related core primitive types and traits. +#[cfg(feature = "cumulus-primitives-core")] +pub use cumulus_primitives_core; + +/// Inherent that needs to be present in every parachain block. Contains messages and a relay +/// chain storage-proof. +#[cfg(feature = "cumulus-primitives-parachain-inherent")] +pub use cumulus_primitives_parachain_inherent; + +/// Hostfunction exposing storage proof size to the runtime. +#[cfg(feature = "cumulus-primitives-proof-size-hostfunction")] +pub use cumulus_primitives_proof_size_hostfunction; + +/// Utilities to reclaim storage weight. +#[cfg(feature = "cumulus-primitives-storage-weight-reclaim")] +pub use cumulus_primitives_storage_weight_reclaim; + +/// Provides timestamp related functionality for parachains. +#[cfg(feature = "cumulus-primitives-timestamp")] +pub use cumulus_primitives_timestamp; + +/// Helper datatypes for Cumulus. +#[cfg(feature = "cumulus-primitives-utility")] +pub use cumulus_primitives_utility; + +/// Implementation of the RelayChainInterface trait for Polkadot full-nodes. +#[cfg(feature = "cumulus-relay-chain-inprocess-interface")] +pub use cumulus_relay_chain_inprocess_interface; + +/// Common interface for different relay chain datasources. +#[cfg(feature = "cumulus-relay-chain-interface")] +pub use cumulus_relay_chain_interface; + +/// Minimal node implementation to be used in tandem with RPC or light-client mode. +#[cfg(feature = "cumulus-relay-chain-minimal-node")] +pub use cumulus_relay_chain_minimal_node; + +/// Implementation of the RelayChainInterface trait that connects to a remote RPC-node. +#[cfg(feature = "cumulus-relay-chain-rpc-interface")] +pub use cumulus_relay_chain_rpc_interface; + +/// Mocked relay state proof builder for testing Cumulus. +#[cfg(feature = "cumulus-test-relay-sproof-builder")] +pub use cumulus_test_relay_sproof_builder; + +/// Common resources for integration testing with xcm-emulator. +#[cfg(feature = "emulated-integration-tests-common")] +pub use emulated_integration_tests_common; + +/// Utility library for managing tree-like ordered data with logic for pruning the tree while +/// finalizing nodes. +#[cfg(feature = "fork-tree")] +pub use fork_tree; + +/// Macro for benchmarking a FRAME runtime. +#[cfg(feature = "frame-benchmarking")] +pub use frame_benchmarking; + +/// CLI for benchmarking FRAME. +#[cfg(feature = "frame-benchmarking-cli")] +pub use frame_benchmarking_cli; + +/// Pallet for testing FRAME PoV benchmarking. +#[cfg(feature = "frame-benchmarking-pallet-pov")] +pub use frame_benchmarking_pallet_pov; + +/// NPoS Solution Type. +#[cfg(feature = "frame-election-provider-solution-type")] +pub use frame_election_provider_solution_type; + +/// election provider supporting traits. +#[cfg(feature = "frame-election-provider-support")] +pub use frame_election_provider_support; + +/// FRAME executives engine. +#[cfg(feature = "frame-executive")] +pub use frame_executive; + +/// FRAME signed extension for verifying the metadata hash. +#[cfg(feature = "frame-metadata-hash-extension")] +pub use frame_metadata_hash_extension; + +/// An externalities provided environment that can load itself from remote nodes or cached +/// files. +#[cfg(feature = "frame-remote-externalities")] +pub use frame_remote_externalities; + +/// Support code for the runtime. +#[cfg(feature = "frame-support")] +pub use frame_support; + +/// Proc macro of Support code for the runtime. +#[cfg(feature = "frame-support-procedural")] +pub use frame_support_procedural; + +/// Proc macro helpers for procedural macros. +#[cfg(feature = "frame-support-procedural-tools")] +pub use frame_support_procedural_tools; + +/// Use to derive parsing for parsing struct. +#[cfg(feature = "frame-support-procedural-tools-derive")] +pub use frame_support_procedural_tools_derive; + +/// FRAME system module. +#[cfg(feature = "frame-system")] +pub use frame_system; + +/// FRAME System benchmarking. +#[cfg(feature = "frame-system-benchmarking")] +pub use frame_system_benchmarking; + +/// Runtime API definition required by System RPC extensions. +#[cfg(feature = "frame-system-rpc-runtime-api")] +pub use frame_system_rpc_runtime_api; + +/// FRAME pallet for democracy. +#[cfg(feature = "frame-try-runtime")] +pub use frame_try_runtime; + +/// Bag threshold generation script for pallet-bag-list. +#[cfg(feature = "generate-bags")] +pub use generate_bags; + +/// MMR Client gadget for substrate. +#[cfg(feature = "mmr-gadget")] +pub use mmr_gadget; + +/// Node-specific RPC methods for interaction with Merkle Mountain Range pallet. +#[cfg(feature = "mmr-rpc")] +pub use mmr_rpc; + +/// The Alliance pallet provides a collective for standard-setting industry collaboration. +#[cfg(feature = "pallet-alliance")] +pub use pallet_alliance; + +/// FRAME asset conversion pallet. +#[cfg(feature = "pallet-asset-conversion")] +pub use pallet_asset_conversion; + +/// FRAME asset conversion pallet's operations suite. +#[cfg(feature = "pallet-asset-conversion-ops")] +pub use pallet_asset_conversion_ops; + +/// Pallet to manage transaction payments in assets by converting them to native assets. +#[cfg(feature = "pallet-asset-conversion-tx-payment")] +pub use pallet_asset_conversion_tx_payment; + +/// Whitelist non-native assets for treasury spending and provide conversion to native balance. +#[cfg(feature = "pallet-asset-rate")] +pub use pallet_asset_rate; + +/// pallet to manage transaction payments in assets. +#[cfg(feature = "pallet-asset-tx-payment")] +pub use pallet_asset_tx_payment; + +/// FRAME asset management pallet. +#[cfg(feature = "pallet-assets")] +pub use pallet_assets; + +/// FRAME atomic swap pallet. +#[cfg(feature = "pallet-atomic-swap")] +pub use pallet_atomic_swap; + +/// FRAME AURA consensus pallet. +#[cfg(feature = "pallet-aura")] +pub use pallet_aura; + +/// FRAME pallet for authority discovery. +#[cfg(feature = "pallet-authority-discovery")] +pub use pallet_authority_discovery; + +/// Block and Uncle Author tracking for the FRAME. +#[cfg(feature = "pallet-authorship")] +pub use pallet_authorship; + +/// Consensus extension module for BABE consensus. Collects on-chain randomness from VRF +/// outputs and manages epoch transitions. +#[cfg(feature = "pallet-babe")] +pub use pallet_babe; + +/// FRAME pallet bags list. +#[cfg(feature = "pallet-bags-list")] +pub use pallet_bags_list; + +/// FRAME pallet to manage balances. +#[cfg(feature = "pallet-balances")] +pub use pallet_balances; + +/// BEEFY FRAME pallet. +#[cfg(feature = "pallet-beefy")] +pub use pallet_beefy; + +/// BEEFY + MMR runtime utilities. +#[cfg(feature = "pallet-beefy-mmr")] +pub use pallet_beefy_mmr; + +/// FRAME pallet to manage bounties. +#[cfg(feature = "pallet-bounties")] +pub use pallet_bounties; + +/// Module implementing GRANDPA on-chain light client used for bridging consensus of +/// substrate-based chains. +#[cfg(feature = "pallet-bridge-grandpa")] +pub use pallet_bridge_grandpa; + +/// Module that allows bridged chains to exchange messages using lane concept. +#[cfg(feature = "pallet-bridge-messages")] +pub use pallet_bridge_messages; + +/// Module that allows bridged relay chains to exchange information on their parachains' heads. +#[cfg(feature = "pallet-bridge-parachains")] +pub use pallet_bridge_parachains; + +/// Module used to store relayer rewards and coordinate relayers set. +#[cfg(feature = "pallet-bridge-relayers")] +pub use pallet_bridge_relayers; + +/// Brokerage tool for managing Polkadot Core scheduling. +#[cfg(feature = "pallet-broker")] +pub use pallet_broker; + +/// FRAME pallet to manage child bounties. +#[cfg(feature = "pallet-child-bounties")] +pub use pallet_child_bounties; + +/// Simple pallet to select collators for a parachain. +#[cfg(feature = "pallet-collator-selection")] +pub use pallet_collator_selection; + +/// Collective system: Members of a set of account IDs can make their collective feelings known +/// through dispatched calls from one of two specialized origins. +#[cfg(feature = "pallet-collective")] +pub use pallet_collective; + +/// Managed content. +#[cfg(feature = "pallet-collective-content")] +pub use pallet_collective_content; + +/// FRAME pallet for WASM contracts. +#[cfg(feature = "pallet-contracts")] +pub use pallet_contracts; + +/// A mock network for testing pallet-contracts. +#[cfg(feature = "pallet-contracts-mock-network")] +pub use pallet_contracts_mock_network; + +/// Procedural macros used in pallet_contracts. +#[cfg(feature = "pallet-contracts-proc-macro")] +pub use pallet_contracts_proc_macro; + +/// Exposes all the host functions that a contract can import. +#[cfg(feature = "pallet-contracts-uapi")] +pub use pallet_contracts_uapi; + +/// FRAME pallet for conviction voting in referenda. +#[cfg(feature = "pallet-conviction-voting")] +pub use pallet_conviction_voting; + +/// Logic as per the description of The Fellowship for core Polkadot technology. +#[cfg(feature = "pallet-core-fellowship")] +pub use pallet_core_fellowship; + +/// FRAME delegated staking pallet. +#[cfg(feature = "pallet-delegated-staking")] +pub use pallet_delegated_staking; + +/// FRAME pallet for democracy. +#[cfg(feature = "pallet-democracy")] +pub use pallet_democracy; + +/// FRAME example pallet. +#[cfg(feature = "pallet-dev-mode")] +pub use pallet_dev_mode; + +/// PALLET two phase election providers. +#[cfg(feature = "pallet-election-provider-multi-phase")] +pub use pallet_election_provider_multi_phase; + +/// Benchmarking for election provider support onchain config trait. +#[cfg(feature = "pallet-election-provider-support-benchmarking")] +pub use pallet_election_provider_support_benchmarking; + +/// FRAME pallet based on seq-Phragmén election method. +#[cfg(feature = "pallet-elections-phragmen")] +pub use pallet_elections_phragmen; + +/// FRAME fast unstake pallet. +#[cfg(feature = "pallet-fast-unstake")] +pub use pallet_fast_unstake; + +/// FRAME pallet for pushing a chain to its weight limits. +#[cfg(feature = "pallet-glutton")] +pub use pallet_glutton; + +/// FRAME pallet for GRANDPA finality gadget. +#[cfg(feature = "pallet-grandpa")] +pub use pallet_grandpa; + +/// FRAME identity management pallet. +#[cfg(feature = "pallet-identity")] +pub use pallet_identity; + +/// FRAME's I'm online pallet. +#[cfg(feature = "pallet-im-online")] +pub use pallet_im_online; + +/// FRAME indices management pallet. +#[cfg(feature = "pallet-indices")] +pub use pallet_indices; + +/// Insecure do not use in production: FRAME randomness collective flip pallet. +#[cfg(feature = "pallet-insecure-randomness-collective-flip")] +pub use pallet_insecure_randomness_collective_flip; + +/// FRAME Participation Lottery Pallet. +#[cfg(feature = "pallet-lottery")] +pub use pallet_lottery; + +/// FRAME membership management pallet. +#[cfg(feature = "pallet-membership")] +pub use pallet_membership; + +/// FRAME pallet to queue and process messages. +#[cfg(feature = "pallet-message-queue")] +pub use pallet_message_queue; + +/// FRAME pallet to execute multi-block migrations. +#[cfg(feature = "pallet-migrations")] +pub use pallet_migrations; + +/// FRAME's mixnet pallet. +#[cfg(feature = "pallet-mixnet")] +pub use pallet_mixnet; + +/// FRAME Merkle Mountain Range pallet. +#[cfg(feature = "pallet-mmr")] +pub use pallet_mmr; + +/// FRAME multi-signature dispatch pallet. +#[cfg(feature = "pallet-multisig")] +pub use pallet_multisig; + +/// FRAME pallet to convert non-fungible to fungible tokens. +#[cfg(feature = "pallet-nft-fractionalization")] +pub use pallet_nft_fractionalization; + +/// FRAME NFTs pallet. +#[cfg(feature = "pallet-nfts")] +pub use pallet_nfts; + +/// Runtime API for the FRAME NFTs pallet. +#[cfg(feature = "pallet-nfts-runtime-api")] +pub use pallet_nfts_runtime_api; + +/// FRAME pallet for rewarding account freezing. +#[cfg(feature = "pallet-nis")] +pub use pallet_nis; + +/// FRAME pallet for node authorization. +#[cfg(feature = "pallet-node-authorization")] +pub use pallet_node_authorization; + +/// FRAME nomination pools pallet. +#[cfg(feature = "pallet-nomination-pools")] +pub use pallet_nomination_pools; + +/// FRAME nomination pools pallet benchmarking. +#[cfg(feature = "pallet-nomination-pools-benchmarking")] +pub use pallet_nomination_pools_benchmarking; + +/// Runtime API for nomination-pools FRAME pallet. +#[cfg(feature = "pallet-nomination-pools-runtime-api")] +pub use pallet_nomination_pools_runtime_api; + +/// FRAME offences pallet. +#[cfg(feature = "pallet-offences")] +pub use pallet_offences; + +/// FRAME offences pallet benchmarking. +#[cfg(feature = "pallet-offences-benchmarking")] +pub use pallet_offences_benchmarking; + +/// FRAME pallet that provides a paged list data structure. +#[cfg(feature = "pallet-paged-list")] +pub use pallet_paged_list; + +/// Pallet to store and configure parameters. +#[cfg(feature = "pallet-parameters")] +pub use pallet_parameters; + +/// FRAME pallet for storing preimages of hashes. +#[cfg(feature = "pallet-preimage")] +pub use pallet_preimage; + +/// FRAME proxying pallet. +#[cfg(feature = "pallet-proxy")] +pub use pallet_proxy; + +/// Ranked collective system: Members of a set of account IDs can make their collective +/// feelings known through dispatched calls from one of two specialized origins. +#[cfg(feature = "pallet-ranked-collective")] +pub use pallet_ranked_collective; + +/// FRAME account recovery pallet. +#[cfg(feature = "pallet-recovery")] +pub use pallet_recovery; + +/// FRAME pallet for inclusive on-chain decisions. +#[cfg(feature = "pallet-referenda")] +pub use pallet_referenda; + +/// Remark storage pallet. +#[cfg(feature = "pallet-remark")] +pub use pallet_remark; + +/// FRAME root offences pallet. +#[cfg(feature = "pallet-root-offences")] +pub use pallet_root_offences; + +/// FRAME root testing pallet. +#[cfg(feature = "pallet-root-testing")] +pub use pallet_root_testing; + +/// FRAME safe-mode pallet. +#[cfg(feature = "pallet-safe-mode")] +pub use pallet_safe_mode; + +/// Paymaster. +#[cfg(feature = "pallet-salary")] +pub use pallet_salary; + +/// FRAME Scheduler pallet. +#[cfg(feature = "pallet-scheduler")] +pub use pallet_scheduler; + +/// FRAME pallet for scored pools. +#[cfg(feature = "pallet-scored-pool")] +pub use pallet_scored_pool; + +/// FRAME sessions pallet. +#[cfg(feature = "pallet-session")] +pub use pallet_session; + +/// FRAME sessions pallet benchmarking. +#[cfg(feature = "pallet-session-benchmarking")] +pub use pallet_session_benchmarking; + +/// Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions +/// are satisfied. +#[cfg(feature = "pallet-skip-feeless-payment")] +pub use pallet_skip_feeless_payment; + +/// FRAME society pallet. +#[cfg(feature = "pallet-society")] +pub use pallet_society; + +/// FRAME pallet staking. +#[cfg(feature = "pallet-staking")] +pub use pallet_staking; + +/// Reward Curve for FRAME staking pallet. +#[cfg(feature = "pallet-staking-reward-curve")] +pub use pallet_staking_reward_curve; + +/// Reward function for FRAME staking pallet. +#[cfg(feature = "pallet-staking-reward-fn")] +pub use pallet_staking_reward_fn; + +/// RPC runtime API for transaction payment FRAME pallet. +#[cfg(feature = "pallet-staking-runtime-api")] +pub use pallet_staking_runtime_api; + +/// FRAME pallet migration of trie. +#[cfg(feature = "pallet-state-trie-migration")] +pub use pallet_state_trie_migration; + +/// FRAME pallet for statement store. +#[cfg(feature = "pallet-statement")] +pub use pallet_statement; + +/// FRAME pallet for sudo. +#[cfg(feature = "pallet-sudo")] +pub use pallet_sudo; + +/// FRAME Timestamp Module. +#[cfg(feature = "pallet-timestamp")] +pub use pallet_timestamp; + +/// FRAME pallet to manage tips. +#[cfg(feature = "pallet-tips")] +pub use pallet_tips; + +/// FRAME pallet to manage transaction payments. +#[cfg(feature = "pallet-transaction-payment")] +pub use pallet_transaction_payment; + +/// RPC interface for the transaction payment pallet. +#[cfg(feature = "pallet-transaction-payment-rpc")] +pub use pallet_transaction_payment_rpc; + +/// RPC runtime API for transaction payment FRAME pallet. +#[cfg(feature = "pallet-transaction-payment-rpc-runtime-api")] +pub use pallet_transaction_payment_rpc_runtime_api; + +/// Storage chain pallet. +#[cfg(feature = "pallet-transaction-storage")] +pub use pallet_transaction_storage; + +/// FRAME pallet to manage treasury. +#[cfg(feature = "pallet-treasury")] +pub use pallet_treasury; + +/// FRAME transaction pause pallet. +#[cfg(feature = "pallet-tx-pause")] +pub use pallet_tx_pause; + +/// FRAME NFT asset management pallet. +#[cfg(feature = "pallet-uniques")] +pub use pallet_uniques; + +/// FRAME utilities pallet. +#[cfg(feature = "pallet-utility")] +pub use pallet_utility; + +/// FRAME pallet for manage vesting. +#[cfg(feature = "pallet-vesting")] +pub use pallet_vesting; + +/// FRAME pallet for whitelisting call, and dispatch from specific origin. +#[cfg(feature = "pallet-whitelist")] +pub use pallet_whitelist; + +/// A pallet for handling XCM programs. +#[cfg(feature = "pallet-xcm")] +pub use pallet_xcm; + +/// Benchmarks for the XCM pallet. +#[cfg(feature = "pallet-xcm-benchmarks")] +pub use pallet_xcm_benchmarks; + +/// Module that adds dynamic bridges/lanes support to XCM infrastructure at the bridge hub. +#[cfg(feature = "pallet-xcm-bridge-hub")] +pub use pallet_xcm_bridge_hub; + +/// Bridge hub interface for sibling/parent chains with dynamic fees support. +#[cfg(feature = "pallet-xcm-bridge-hub-router")] +pub use pallet_xcm_bridge_hub_router; + +/// Logic which is common to all parachain runtimes. +#[cfg(feature = "parachains-common")] +pub use parachains_common; + +/// Utils for Runtimes testing. +#[cfg(feature = "parachains-runtimes-test-utils")] +pub use parachains_runtimes_test_utils; + +/// Polkadot Approval Distribution subsystem for the distribution of assignments and approvals +/// for approval checks on candidates over the network. +#[cfg(feature = "polkadot-approval-distribution")] +pub use polkadot_approval_distribution; + +/// Polkadot Bitfiled Distribution subsystem, which gossips signed availability bitfields used +/// to compactly determine which backed candidates are available or not based on a 2/3+ quorum. +#[cfg(feature = "polkadot-availability-bitfield-distribution")] +pub use polkadot_availability_bitfield_distribution; + +/// The Availability Distribution subsystem. Requests the required availability data. Also +/// distributes availability data and chunks to requesters. +#[cfg(feature = "polkadot-availability-distribution")] +pub use polkadot_availability_distribution; + +/// The Availability Recovery subsystem. Handles requests for recovering the availability data +/// of included candidates. +#[cfg(feature = "polkadot-availability-recovery")] +pub use polkadot_availability_recovery; + +/// Polkadot Relay-chain Client Node. +#[cfg(feature = "polkadot-cli")] +pub use polkadot_cli; + +/// Polkadot Collator Protocol subsystem. Allows collators and validators to talk to each +/// other. +#[cfg(feature = "polkadot-collator-protocol")] +pub use polkadot_collator_protocol; + +/// Core Polkadot types used by Relay Chains and parachains. +#[cfg(feature = "polkadot-core-primitives")] +pub use polkadot_core_primitives; + +/// Polkadot Dispute Distribution subsystem, which ensures all concerned validators are aware +/// of a dispute and have the relevant votes. +#[cfg(feature = "polkadot-dispute-distribution")] +pub use polkadot_dispute_distribution; + +/// Erasure coding used for Polkadot's availability system. +#[cfg(feature = "polkadot-erasure-coding")] +pub use polkadot_erasure_coding; + +/// Polkadot Gossip Support subsystem. Responsible for keeping track of session changes and +/// issuing a connection request to the relevant validators on every new session. +#[cfg(feature = "polkadot-gossip-support")] +pub use polkadot_gossip_support; + +/// The Network Bridge Subsystem — protocol multiplexer for Polkadot. +#[cfg(feature = "polkadot-network-bridge")] +pub use polkadot_network_bridge; + +/// Collator-side subsystem that handles incoming candidate submissions from the parachain. +#[cfg(feature = "polkadot-node-collation-generation")] +pub use polkadot_node_collation_generation; + +/// Approval Voting Subsystem of the Polkadot node. +#[cfg(feature = "polkadot-node-core-approval-voting")] +pub use polkadot_node_core_approval_voting; + +/// The Availability Store subsystem. Wrapper over the DB that stores availability data and +/// chunks. +#[cfg(feature = "polkadot-node-core-av-store")] +pub use polkadot_node_core_av_store; + +/// The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as +/// the issuance of statements about candidates. +#[cfg(feature = "polkadot-node-core-backing")] +pub use polkadot_node_core_backing; + +/// Bitfield signing subsystem for the Polkadot node. +#[cfg(feature = "polkadot-node-core-bitfield-signing")] +pub use polkadot_node_core_bitfield_signing; + +/// Polkadot crate that implements the Candidate Validation subsystem. Handles requests to +/// validate candidates according to a PVF. +#[cfg(feature = "polkadot-node-core-candidate-validation")] +pub use polkadot_node_core_candidate_validation; + +/// The Chain API subsystem provides access to chain related utility functions like block +/// number to hash conversions. +#[cfg(feature = "polkadot-node-core-chain-api")] +pub use polkadot_node_core_chain_api; + +/// Chain Selection Subsystem. +#[cfg(feature = "polkadot-node-core-chain-selection")] +pub use polkadot_node_core_chain_selection; + +/// The node-side components that participate in disputes. +#[cfg(feature = "polkadot-node-core-dispute-coordinator")] +pub use polkadot_node_core_dispute_coordinator; + +/// Parachains inherent data provider for Polkadot node. +#[cfg(feature = "polkadot-node-core-parachains-inherent")] +pub use polkadot_node_core_parachains_inherent; + +/// The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments. +#[cfg(feature = "polkadot-node-core-prospective-parachains")] +pub use polkadot_node_core_prospective_parachains; + +/// Responsible for assembling a relay chain block from a set of available parachain +/// candidates. +#[cfg(feature = "polkadot-node-core-provisioner")] +pub use polkadot_node_core_provisioner; + +/// Polkadot crate that implements the PVF validation host. Responsible for coordinating +/// preparation and execution of PVFs. +#[cfg(feature = "polkadot-node-core-pvf")] +pub use polkadot_node_core_pvf; + +/// Polkadot crate that implements the PVF pre-checking subsystem. Responsible for checking and +/// voting for PVFs that are pending approval. +#[cfg(feature = "polkadot-node-core-pvf-checker")] +pub use polkadot_node_core_pvf_checker; + +/// Polkadot crate that contains functionality related to PVFs that is shared by the PVF host +/// and the PVF workers. +#[cfg(feature = "polkadot-node-core-pvf-common")] +pub use polkadot_node_core_pvf_common; + +/// Polkadot crate that contains the logic for executing PVFs. Used by the +/// polkadot-execute-worker binary. +#[cfg(feature = "polkadot-node-core-pvf-execute-worker")] +pub use polkadot_node_core_pvf_execute_worker; + +/// Polkadot crate that contains the logic for preparing PVFs. Used by the +/// polkadot-prepare-worker binary. +#[cfg(feature = "polkadot-node-core-pvf-prepare-worker")] +pub use polkadot_node_core_pvf_prepare_worker; + +/// Wrapper around the parachain-related runtime APIs. +#[cfg(feature = "polkadot-node-core-runtime-api")] +pub use polkadot_node_core_runtime_api; + +/// Polkadot Jaeger primitives, but equally useful for Grafana/Tempo. +#[cfg(feature = "polkadot-node-jaeger")] +pub use polkadot_node_jaeger; + +/// Subsystem metric helpers. +#[cfg(feature = "polkadot-node-metrics")] +pub use polkadot_node_metrics; + +/// Primitives types for the Node-side. +#[cfg(feature = "polkadot-node-network-protocol")] +pub use polkadot_node_network_protocol; + +/// Primitives types for the Node-side. +#[cfg(feature = "polkadot-node-primitives")] +pub use polkadot_node_primitives; + +/// Subsystem traits and message definitions and the generated overseer. +#[cfg(feature = "polkadot-node-subsystem")] +pub use polkadot_node_subsystem; + +/// Subsystem traits and message definitions. +#[cfg(feature = "polkadot-node-subsystem-types")] +pub use polkadot_node_subsystem_types; + +/// Subsystem traits and message definitions. +#[cfg(feature = "polkadot-node-subsystem-util")] +pub use polkadot_node_subsystem_util; + +/// System overseer of the Polkadot node. +#[cfg(feature = "polkadot-overseer")] +pub use polkadot_overseer; + +/// Types and utilities for creating and working with parachains. +#[cfg(feature = "polkadot-parachain-primitives")] +pub use polkadot_parachain_primitives; + +/// Shared primitives used by Polkadot runtime. +#[cfg(feature = "polkadot-primitives")] +pub use polkadot_primitives; + +/// Polkadot specific RPC functionality. +#[cfg(feature = "polkadot-rpc")] +pub use polkadot_rpc; + +/// Pallets and constants used in Relay Chain networks. +#[cfg(feature = "polkadot-runtime-common")] +pub use polkadot_runtime_common; + +/// Runtime metric interface for the Polkadot node. +#[cfg(feature = "polkadot-runtime-metrics")] +pub use polkadot_runtime_metrics; + +/// Relay Chain runtime code responsible for Parachains. +#[cfg(feature = "polkadot-runtime-parachains")] +pub use polkadot_runtime_parachains; + +/// Experimental: The single package to get you started with building frame pallets and +/// runtimes. +#[cfg(feature = "polkadot-sdk-frame")] +pub use polkadot_sdk_frame; + +/// Utils to tie different Polkadot components together and allow instantiation of a node. +#[cfg(feature = "polkadot-service")] +pub use polkadot_service; + +/// Statement Distribution Subsystem. +#[cfg(feature = "polkadot-statement-distribution")] +pub use polkadot_statement_distribution; + +/// Stores messages other authorities issue about candidates in Polkadot. +#[cfg(feature = "polkadot-statement-table")] +pub use polkadot_statement_table; + +/// Constants used throughout the Rococo network. +#[cfg(feature = "rococo-runtime-constants")] +pub use rococo_runtime_constants; + +/// Collection of allocator implementations. +#[cfg(feature = "sc-allocator")] +pub use sc_allocator; + +/// Substrate authority discovery. +#[cfg(feature = "sc-authority-discovery")] +pub use sc_authority_discovery; + +/// Basic implementation of block-authoring logic. +#[cfg(feature = "sc-basic-authorship")] +pub use sc_basic_authorship; + +/// Substrate block builder. +#[cfg(feature = "sc-block-builder")] +pub use sc_block_builder; + +/// Substrate chain configurations. +#[cfg(feature = "sc-chain-spec")] +pub use sc_chain_spec; + +/// Macros to derive chain spec extension traits implementation. +#[cfg(feature = "sc-chain-spec-derive")] +pub use sc_chain_spec_derive; + +/// Substrate CLI interface. +#[cfg(feature = "sc-cli")] +pub use sc_cli; + +/// Substrate client interfaces. +#[cfg(feature = "sc-client-api")] +pub use sc_client_api; + +/// Client backend that uses RocksDB database as storage. +#[cfg(feature = "sc-client-db")] +pub use sc_client_db; + +/// Collection of common consensus specific implementations for Substrate (client). +#[cfg(feature = "sc-consensus")] +pub use sc_consensus; + +/// Aura consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-aura")] +pub use sc_consensus_aura; + +/// BABE consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-babe")] +pub use sc_consensus_babe; + +/// RPC extensions for the BABE consensus algorithm. +#[cfg(feature = "sc-consensus-babe-rpc")] +pub use sc_consensus_babe_rpc; + +/// BEEFY Client gadget for substrate. +#[cfg(feature = "sc-consensus-beefy")] +pub use sc_consensus_beefy; + +/// RPC for the BEEFY Client gadget for substrate. +#[cfg(feature = "sc-consensus-beefy-rpc")] +pub use sc_consensus_beefy_rpc; + +/// Generic epochs-based utilities for consensus. +#[cfg(feature = "sc-consensus-epochs")] +pub use sc_consensus_epochs; + +/// Integration of the GRANDPA finality gadget into substrate. +#[cfg(feature = "sc-consensus-grandpa")] +pub use sc_consensus_grandpa; + +/// RPC extensions for the GRANDPA finality gadget. +#[cfg(feature = "sc-consensus-grandpa-rpc")] +pub use sc_consensus_grandpa_rpc; + +/// Manual sealing engine for Substrate. +#[cfg(feature = "sc-consensus-manual-seal")] +pub use sc_consensus_manual_seal; + +/// PoW consensus algorithm for substrate. +#[cfg(feature = "sc-consensus-pow")] +pub use sc_consensus_pow; + +/// Generic slots-based utilities for consensus. +#[cfg(feature = "sc-consensus-slots")] +pub use sc_consensus_slots; + +/// A crate that provides means of executing/dispatching calls into the runtime. +#[cfg(feature = "sc-executor")] +pub use sc_executor; + +/// A set of common definitions that are needed for defining execution engines. +#[cfg(feature = "sc-executor-common")] +pub use sc_executor_common; + +/// PolkaVM executor for Substrate. +#[cfg(feature = "sc-executor-polkavm")] +pub use sc_executor_polkavm; + +/// Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +#[cfg(feature = "sc-executor-wasmtime")] +pub use sc_executor_wasmtime; + +/// Substrate informant. +#[cfg(feature = "sc-informant")] +pub use sc_informant; + +/// Keystore (and session key management) for ed25519 based chains like Polkadot. +#[cfg(feature = "sc-keystore")] +pub use sc_keystore; + +/// Substrate mixnet service. +#[cfg(feature = "sc-mixnet")] +pub use sc_mixnet; + +/// Substrate network protocol. +#[cfg(feature = "sc-network")] +pub use sc_network; + +/// Substrate network common. +#[cfg(feature = "sc-network-common")] +pub use sc_network_common; + +/// Gossiping for the Substrate network protocol. +#[cfg(feature = "sc-network-gossip")] +pub use sc_network_gossip; + +/// Substrate light network protocol. +#[cfg(feature = "sc-network-light")] +pub use sc_network_light; + +/// Substrate statement protocol. +#[cfg(feature = "sc-network-statement")] +pub use sc_network_statement; + +/// Substrate sync network protocol. +#[cfg(feature = "sc-network-sync")] +pub use sc_network_sync; + +/// Substrate transaction protocol. +#[cfg(feature = "sc-network-transactions")] +pub use sc_network_transactions; + +/// Substrate network types. +#[cfg(feature = "sc-network-types")] +pub use sc_network_types; + +/// Substrate offchain workers. +#[cfg(feature = "sc-offchain")] +pub use sc_offchain; + +/// Basic metrics for block production. +#[cfg(feature = "sc-proposer-metrics")] +pub use sc_proposer_metrics; + +/// Substrate Client RPC. +#[cfg(feature = "sc-rpc")] +pub use sc_rpc; + +/// Substrate RPC interfaces. +#[cfg(feature = "sc-rpc-api")] +pub use sc_rpc_api; + +/// Substrate RPC servers. +#[cfg(feature = "sc-rpc-server")] +pub use sc_rpc_server; + +/// Substrate RPC interface v2. +#[cfg(feature = "sc-rpc-spec-v2")] +pub use sc_rpc_spec_v2; + +/// Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +/// Manages communication between them. +#[cfg(feature = "sc-service")] +pub use sc_service; + +/// State database maintenance. Handles canonicalization and pruning in the database. +#[cfg(feature = "sc-state-db")] +pub use sc_state_db; + +/// Substrate statement store. +#[cfg(feature = "sc-statement-store")] +pub use sc_statement_store; + +/// Storage monitor service for substrate. +#[cfg(feature = "sc-storage-monitor")] +pub use sc_storage_monitor; + +/// A RPC handler to create sync states for light clients. +#[cfg(feature = "sc-sync-state-rpc")] +pub use sc_sync_state_rpc; + +/// A crate that provides basic hardware and software telemetry information. +#[cfg(feature = "sc-sysinfo")] +pub use sc_sysinfo; + +/// Telemetry utils. +#[cfg(feature = "sc-telemetry")] +pub use sc_telemetry; + +/// Instrumentation implementation for substrate. +#[cfg(feature = "sc-tracing")] +pub use sc_tracing; + +/// Helper macros for Substrate's client CLI. +#[cfg(feature = "sc-tracing-proc-macro")] +pub use sc_tracing_proc_macro; + +/// Substrate transaction pool implementation. +#[cfg(feature = "sc-transaction-pool")] +pub use sc_transaction_pool; + +/// Transaction pool client facing API. +#[cfg(feature = "sc-transaction-pool-api")] +pub use sc_transaction_pool_api; + +/// I/O for Substrate runtimes. +#[cfg(feature = "sc-utils")] +pub use sc_utils; + +/// Helper crate for generating slot ranges for the Polkadot runtime. +#[cfg(feature = "slot-range-helper")] +pub use slot_range_helper; + +/// Snowbridge Beacon Primitives. +#[cfg(feature = "snowbridge-beacon-primitives")] +pub use snowbridge_beacon_primitives; + +/// Snowbridge Core. +#[cfg(feature = "snowbridge-core")] +pub use snowbridge_core; + +/// Snowbridge Ethereum. +#[cfg(feature = "snowbridge-ethereum")] +pub use snowbridge_ethereum; + +/// Snowbridge Outbound Queue Merkle Tree. +#[cfg(feature = "snowbridge-outbound-queue-merkle-tree")] +pub use snowbridge_outbound_queue_merkle_tree; + +/// Snowbridge Outbound Queue Runtime API. +#[cfg(feature = "snowbridge-outbound-queue-runtime-api")] +pub use snowbridge_outbound_queue_runtime_api; + +/// Snowbridge Ethereum Client Pallet. +#[cfg(feature = "snowbridge-pallet-ethereum-client")] +pub use snowbridge_pallet_ethereum_client; + +/// Snowbridge Ethereum Client Test Fixtures. +#[cfg(feature = "snowbridge-pallet-ethereum-client-fixtures")] +pub use snowbridge_pallet_ethereum_client_fixtures; + +/// Snowbridge Inbound Queue Pallet. +#[cfg(feature = "snowbridge-pallet-inbound-queue")] +pub use snowbridge_pallet_inbound_queue; + +/// Snowbridge Inbound Queue Test Fixtures. +#[cfg(feature = "snowbridge-pallet-inbound-queue-fixtures")] +pub use snowbridge_pallet_inbound_queue_fixtures; + +/// Snowbridge Outbound Queue Pallet. +#[cfg(feature = "snowbridge-pallet-outbound-queue")] +pub use snowbridge_pallet_outbound_queue; + +/// Snowbridge System Pallet. +#[cfg(feature = "snowbridge-pallet-system")] +pub use snowbridge_pallet_system; + +/// Snowbridge Router Primitives. +#[cfg(feature = "snowbridge-router-primitives")] +pub use snowbridge_router_primitives; + +/// Snowbridge Runtime Common. +#[cfg(feature = "snowbridge-runtime-common")] +pub use snowbridge_runtime_common; + +/// Snowbridge Runtime Tests. +#[cfg(feature = "snowbridge-runtime-test-common")] +pub use snowbridge_runtime_test_common; + +/// Snowbridge System Runtime API. +#[cfg(feature = "snowbridge-system-runtime-api")] +pub use snowbridge_system_runtime_api; + +/// Substrate runtime api primitives. +#[cfg(feature = "sp-api")] +pub use sp_api; + +/// Macros for declaring and implementing runtime apis. +#[cfg(feature = "sp-api-proc-macro")] +pub use sp_api_proc_macro; + +/// Provides facilities for generating application specific crypto wrapper types. +#[cfg(feature = "sp-application-crypto")] +pub use sp_application_crypto; + +/// Minimal fixed point arithmetic primitives and types for runtime. +#[cfg(feature = "sp-arithmetic")] +pub use sp_arithmetic; + +/// Authority discovery primitives. +#[cfg(feature = "sp-authority-discovery")] +pub use sp_authority_discovery; + +/// The block builder runtime api. +#[cfg(feature = "sp-block-builder")] +pub use sp_block_builder; + +/// Substrate blockchain traits and primitives. +#[cfg(feature = "sp-blockchain")] +pub use sp_blockchain; + +/// Common utilities for building and using consensus engines in substrate. +#[cfg(feature = "sp-consensus")] +pub use sp_consensus; + +/// Primitives for Aura consensus. +#[cfg(feature = "sp-consensus-aura")] +pub use sp_consensus_aura; + +/// Primitives for BABE consensus. +#[cfg(feature = "sp-consensus-babe")] +pub use sp_consensus_babe; + +/// Primitives for BEEFY protocol. +#[cfg(feature = "sp-consensus-beefy")] +pub use sp_consensus_beefy; + +/// Primitives for GRANDPA integration, suitable for WASM compilation. +#[cfg(feature = "sp-consensus-grandpa")] +pub use sp_consensus_grandpa; + +/// Primitives for Aura consensus. +#[cfg(feature = "sp-consensus-pow")] +pub use sp_consensus_pow; + +/// Primitives for slots-based consensus. +#[cfg(feature = "sp-consensus-slots")] +pub use sp_consensus_slots; + +/// Shareable Substrate types. +#[cfg(feature = "sp-core")] +pub use sp_core; + +/// Hashing primitives (deprecated: use sp-crypto-hashing for new applications). +#[cfg(feature = "sp-core-hashing")] +pub use sp_core_hashing; + +/// Procedural macros for calculating static hashes (deprecated in favor of +/// `sp-crypto-hashing-proc-macro`). +#[cfg(feature = "sp-core-hashing-proc-macro")] +pub use sp_core_hashing_proc_macro; + +/// Host functions for common Arkworks elliptic curve operations. +#[cfg(feature = "sp-crypto-ec-utils")] +pub use sp_crypto_ec_utils; + +/// Hashing primitives. +#[cfg(feature = "sp-crypto-hashing")] +pub use sp_crypto_hashing; + +/// Procedural macros for calculating static hashes. +#[cfg(feature = "sp-crypto-hashing-proc-macro")] +pub use sp_crypto_hashing_proc_macro; + +/// Substrate database trait. +#[cfg(feature = "sp-database")] +pub use sp_database; + +/// Macros to derive runtime debug implementation. +#[cfg(feature = "sp-debug-derive")] +pub use sp_debug_derive; + +/// Substrate externalities abstraction. +#[cfg(feature = "sp-externalities")] +pub use sp_externalities; + +/// Substrate RuntimeGenesisConfig builder API. +#[cfg(feature = "sp-genesis-builder")] +pub use sp_genesis_builder; + +/// Provides types and traits for creating and checking inherents. +#[cfg(feature = "sp-inherents")] +pub use sp_inherents; + +/// I/O for Substrate runtimes. +#[cfg(feature = "sp-io")] +pub use sp_io; + +/// Keyring support code for the runtime. A set of test accounts. +#[cfg(feature = "sp-keyring")] +pub use sp_keyring; + +/// Keystore primitives. +#[cfg(feature = "sp-keystore")] +pub use sp_keystore; + +/// Handling of blobs, usually Wasm code, which may be compressed. +#[cfg(feature = "sp-maybe-compressed-blob")] +pub use sp_maybe_compressed_blob; + +/// Intermediate representation of the runtime metadata. +#[cfg(feature = "sp-metadata-ir")] +pub use sp_metadata_ir; + +/// Substrate mixnet types and runtime interface. +#[cfg(feature = "sp-mixnet")] +pub use sp_mixnet; + +/// Merkle Mountain Range primitives. +#[cfg(feature = "sp-mmr-primitives")] +pub use sp_mmr_primitives; + +/// NPoS election algorithm primitives. +#[cfg(feature = "sp-npos-elections")] +pub use sp_npos_elections; + +/// Substrate offchain workers primitives. +#[cfg(feature = "sp-offchain")] +pub use sp_offchain; + +/// Custom panic hook with bug report link. +#[cfg(feature = "sp-panic-handler")] +pub use sp_panic_handler; + +/// Substrate RPC primitives and utilities. +#[cfg(feature = "sp-rpc")] +pub use sp_rpc; + +/// Runtime Modules shared primitive types. +#[cfg(feature = "sp-runtime")] +pub use sp_runtime; + +/// Substrate runtime interface. +#[cfg(feature = "sp-runtime-interface")] +pub use sp_runtime_interface; + +/// This crate provides procedural macros for usage within the context of the Substrate runtime +/// interface. +#[cfg(feature = "sp-runtime-interface-proc-macro")] +pub use sp_runtime_interface_proc_macro; + +/// Primitives for sessions. +#[cfg(feature = "sp-session")] +pub use sp_session; + +/// A crate which contains primitives that are useful for implementation that uses staking +/// approaches in general. Definitions related to sessions, slashing, etc go here. +#[cfg(feature = "sp-staking")] +pub use sp_staking; + +/// Substrate State Machine. +#[cfg(feature = "sp-state-machine")] +pub use sp_state_machine; + +/// A crate which contains primitives related to the statement store. +#[cfg(feature = "sp-statement-store")] +pub use sp_statement_store; + +/// Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std +/// or client/alloc to be used with any code that depends on the runtime. +#[cfg(feature = "sp-std")] +pub use sp_std; + +/// Storage related primitives. +#[cfg(feature = "sp-storage")] +pub use sp_storage; + +/// Substrate core types and inherents for timestamps. +#[cfg(feature = "sp-timestamp")] +pub use sp_timestamp; + +/// Instrumentation primitives and macros for Substrate. +#[cfg(feature = "sp-tracing")] +pub use sp_tracing; + +/// Transaction pool runtime facing API. +#[cfg(feature = "sp-transaction-pool")] +pub use sp_transaction_pool; + +/// Transaction storage proof primitives. +#[cfg(feature = "sp-transaction-storage-proof")] +pub use sp_transaction_storage_proof; + +/// Patricia trie stuff using a parity-scale-codec node format. +#[cfg(feature = "sp-trie")] +pub use sp_trie; + +/// Version module for the Substrate runtime; Provides a function that returns the runtime +/// version. +#[cfg(feature = "sp-version")] +pub use sp_version; + +/// Macro for defining a runtime version. +#[cfg(feature = "sp-version-proc-macro")] +pub use sp_version_proc_macro; + +/// Types and traits for interfacing between the host and the wasm runtime. +#[cfg(feature = "sp-wasm-interface")] +pub use sp_wasm_interface; + +/// Types and traits for interfacing between the host and the wasm runtime. +#[cfg(feature = "sp-weights")] +pub use sp_weights; + +/// Utility for building chain-specification files for Substrate-based runtimes based on +/// `sp-genesis-builder`. +#[cfg(feature = "staging-chain-spec-builder")] +pub use staging_chain_spec_builder; + +/// Substrate node block inspection tool. +#[cfg(feature = "staging-node-inspect")] +pub use staging_node_inspect; + +/// Pallet to store the parachain ID. +#[cfg(feature = "staging-parachain-info")] +pub use staging_parachain_info; + +/// Tracking allocator to control the amount of memory consumed by the process. +#[cfg(feature = "staging-tracking-allocator")] +pub use staging_tracking_allocator; + +/// The basic XCM datastructures. +#[cfg(feature = "staging-xcm")] +pub use staging_xcm; + +/// Tools & types for building with XCM and its executor. +#[cfg(feature = "staging-xcm-builder")] +pub use staging_xcm_builder; + +/// An abstract and configurable XCM message executor. +#[cfg(feature = "staging-xcm-executor")] +pub use staging_xcm_executor; + +/// Generate and restore keys for Substrate based chains such as Polkadot, Kusama and a growing +/// number of parachains and Substrate based projects. +#[cfg(feature = "subkey")] +pub use subkey; + +/// Converting BIP39 entropy to valid Substrate (sr25519) SecretKeys. +#[cfg(feature = "substrate-bip39")] +pub use substrate_bip39; + +/// Crate with utility functions for `build.rs` scripts. +#[cfg(feature = "substrate-build-script-utils")] +pub use substrate_build_script_utils; + +/// Substrate RPC for FRAME's support. +#[cfg(feature = "substrate-frame-rpc-support")] +pub use substrate_frame_rpc_support; + +/// FRAME's system exposed over Substrate RPC. +#[cfg(feature = "substrate-frame-rpc-system")] +pub use substrate_frame_rpc_system; + +/// Endpoint to expose Prometheus metrics. +#[cfg(feature = "substrate-prometheus-endpoint")] +pub use substrate_prometheus_endpoint; + +/// Shared JSON-RPC client. +#[cfg(feature = "substrate-rpc-client")] +pub use substrate_rpc_client; + +/// Node-specific RPC methods for interaction with state trie migration. +#[cfg(feature = "substrate-state-trie-migration-rpc")] +pub use substrate_state_trie_migration_rpc; + +/// Utility for building WASM binaries. +#[cfg(feature = "substrate-wasm-builder")] +pub use substrate_wasm_builder; + +/// Common constants for Testnet Parachains runtimes. +#[cfg(feature = "testnet-parachains-constants")] +pub use testnet_parachains_constants; + +/// Stick logs together with the TraceID as provided by tempo. +#[cfg(feature = "tracing-gum")] +pub use tracing_gum; + +/// Generate an overseer including builder pattern and message wrapper from a single annotated +/// struct definition. +#[cfg(feature = "tracing-gum-proc-macro")] +pub use tracing_gum_proc_macro; + +/// Constants used throughout the Westend network. +#[cfg(feature = "westend-runtime-constants")] +pub use westend_runtime_constants; + +/// Test kit to emulate XCM program execution. +#[cfg(feature = "xcm-emulator")] +pub use xcm_emulator; + +/// XCM fee payment runtime API. +#[cfg(feature = "xcm-fee-payment-runtime-api")] +pub use xcm_fee_payment_runtime_api; + +/// Procedural macros for XCM. +#[cfg(feature = "xcm-procedural")] +pub use xcm_procedural; + +/// Test kit to simulate cross-chain message passing and XCM execution. +#[cfg(feature = "xcm-simulator")] +pub use xcm_simulator;