diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index d2a4baf12fa739bb21f9978255919ef6ec552ad5..56d0371d678ec42182e6cb84940303759ff38984 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -405,14 +405,10 @@ function find_runtimes() { # output: none filter_version_from_input() { version=$1 - regex="(^v[0-9]+\.[0-9]+\.[0-9]+)$|(^v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+)$" + regex="^(v)?[0-9]+\.[0-9]+\.[0-9]+(-rc[0-9]+)?$" if [[ $version =~ $regex ]]; then - if [ -n "${BASH_REMATCH[1]}" ]; then - echo "${BASH_REMATCH[1]}" - elif [ -n "${BASH_REMATCH[2]}" ]; then - echo "${BASH_REMATCH[2]}" - fi + echo $version else echo "Invalid version: $version" exit 1 @@ -462,7 +458,7 @@ function get_polkadot_node_version_from_code() { validate_stable_tag() { tag="$1" - pattern="^stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)?$" + pattern="^(polkadot-)?stable[0-9]{4}(-[0-9]+)?(-rc[0-9]+)?$" if [[ $tag =~ $pattern ]]; then echo $tag diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index edcdb82cd22ea4e0aeb25114defd2cdbcc2b15b7..780fa0012976681adf1d2f75480ccc4b50f4ae4b 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -122,7 +122,7 @@ def setup_parser(parser=None, pr_required=True): return parser def snake_to_title(s): - return ' '.join(word.capitalize() for word in s.split('_')) + return ' '.join(word.capitalize() for word in s.split('_')) def main(args): print(f"Args: {args}, force: {args.force}") @@ -130,6 +130,8 @@ def main(args): try: # Convert snake_case audience arguments to title case mapped_audiences = [snake_to_title(a) for a in args.audience] + if len(mapped_audiences) == 1: + mapped_audiences = mapped_audiences[0] from_pr_number(args.pr, mapped_audiences, args.bump, args.force) return 0 except Exception as e: diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index baf8cd5b155587b5797c6a7da65f29aa8b14b8f2..3bbb9baba46eb7c19b19c9855a70745fe63183a8 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.0 (22. Sep 2023) - name: Lychee link checker - uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # for v1.9.1 (10. Jan 2024) + uses: lycheeverse/lychee-action@2bb232618be239862e31382c5c0eaeba12e5e966 # for v1.9.1 (10. Jan 2024) with: args: >- --config .config/lychee.toml diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 24050d9e0981bbb57bb8c067932bfa62e6e4098b..65f3339b7ac71ad3790b69734bc0540d98b0bbc8 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -62,7 +62,7 @@ jobs: echo "PRDOC_EXTRA_ARGS=--max-bump minor" >> $GITHUB_ENV - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true diff --git a/.github/workflows/fork-sync-action.yml b/.github/workflows/fork-sync-action.yml index 065226764be680e47549b18026fbfb64556d5611..50774e910527433a5ced86b86fe586de4e2ec9af 100644 --- a/.github/workflows/fork-sync-action.yml +++ b/.github/workflows/fork-sync-action.yml @@ -12,7 +12,7 @@ on: jobs: job_sync_branches: - uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@latest + uses: paritytech-release/sync-workflows/.github/workflows/sync-with-upstream.yml@main with: fork_writer_app_id: ${{ vars.UPSTREAM_CONTENT_SYNC_APP_ID}} fork_owner: ${{ vars.RELEASE_ORG}} diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml index a5af041185728a3bba5bb9379d105a44efe46b29..3fad3b64147422bb842ab40985d224458f283f34 100644 --- a/.github/workflows/publish-check-crates.yml +++ b/.github/workflows/publish-check-crates.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml index f9bc6ce4daeaa8b3c2f4ce9341fe03d5f9247188..37bf06bb82d86e82f33108ecb1da73083e09794d 100644 --- a/.github/workflows/publish-claim-crates.yml +++ b/.github/workflows/publish-claim-crates.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 - name: Rust Cache - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 + uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true diff --git a/.github/workflows/release-10_rc-automation.yml b/.github/workflows/release-10_rc-automation.yml index 195c14dbd5ab9a102787221f19cecc6319bf46af..41783f6cc721b10cf69d66210223e2e746d2ab40 100644 --- a/.github/workflows/release-10_rc-automation.yml +++ b/.github/workflows/release-10_rc-automation.yml @@ -12,7 +12,7 @@ on: workflow_dispatch: inputs: version: - description: Current release/rc version in format vX.X.X + description: Current release/rc version in format polkadot-stableYYMM jobs: tag_rc: @@ -41,7 +41,7 @@ jobs: if [[ -z "${{ inputs.version }}" ]]; then version=v$(get_polkadot_node_version_from_code) else - version=$(filter_version_from_input ${{ inputs.version }}) + version=$(validate_stable_tag ${{ inputs.version }}) fi echo "$version" echo "version=$version" >> $GITHUB_OUTPUT diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index dd6a111d67e815077c0764368a63d29caf93a46b..2a5d92ed167ae4f7be75802c4d8b6c0d3aa8893f 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -5,6 +5,7 @@ on: tags: # Catches v1.2.3 and v1.2.3-rc1 - v[0-9]+.[0-9]+.[0-9]+* + # - polkadot-stable[0-9]+* Activate when the release process from release org is setteled workflow_dispatch: inputs: @@ -25,7 +26,7 @@ jobs: build-runtimes: uses: "./.github/workflows/release-srtool.yml" with: - excluded_runtimes: "substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" + excluded_runtimes: "asset-hub-rococo bridge-hub-rococo contracts-rococo coretime-rococo people-rococo rococo rococo-parachain substrate-test bp cumulus-test kitchensink minimal-template parachain-template penpal polkadot-test seedling shell frame-try sp solochain-template" build_opts: "--features on-chain-release-build" build-binaries: @@ -79,30 +80,27 @@ jobs: env: RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - ASSET_HUB_ROCOCO_DIGEST: ${{ github.workspace}}/asset-hub-rococo-runtime/asset-hub-rococo-srtool-digest.json ASSET_HUB_WESTEND_DIGEST: ${{ github.workspace}}/asset-hub-westend-runtime/asset-hub-westend-srtool-digest.json - BRIDGE_HUB_ROCOCO_DIGEST: ${{ github.workspace}}/bridge-hub-rococo-runtime/bridge-hub-rococo-srtool-digest.json BRIDGE_HUB_WESTEND_DIGEST: ${{ github.workspace}}/bridge-hub-westend-runtime/bridge-hub-westend-srtool-digest.json COLLECTIVES_WESTEND_DIGEST: ${{ github.workspace}}/collectives-westend-runtime/collectives-westend-srtool-digest.json - CONTRACTS_ROCOCO_DIGEST: ${{ github.workspace}}/contracts-rococo-runtime/contracts-rococo-srtool-digest.json - CORETIME_ROCOCO_DIGEST: ${{ github.workspace}}/coretime-rococo-runtime/coretime-rococo-srtool-digest.json CORETIME_WESTEND_DIGEST: ${{ github.workspace}}/coretime-westend-runtime/coretime-westend-srtool-digest.json GLUTTON_WESTEND_DIGEST: ${{ github.workspace}}/glutton-westend-runtime/glutton-westend-srtool-digest.json - PEOPLE_ROCOCO_DIGEST: ${{ github.workspace}}/people-rococo-runtime/people-rococo-srtool-digest.json PEOPLE_WESTEND_DIGEST: ${{ github.workspace}}/people-westend-runtime/people-westend-srtool-digest.json - ROCOCO_DIGEST: ${{ github.workspace}}/rococo-runtime/rococo-srtool-digest.json WESTEND_DIGEST: ${{ github.workspace}}/westend-runtime/westend-srtool-digest.json + shell: bash run: | . ./.github/scripts/common/lib.sh export REF1=$(get_latest_release_tag) if [[ -z "${{ inputs.version }}" ]]; then export REF2="${{ github.ref_name }}" + echo "REF2: ${REF2}" else export REF2="${{ inputs.version }}" + echo "REF2: ${REF2}" fi echo "REL_TAG=$REF2" >> $GITHUB_ENV - export VERSION=$(echo "$REF2" | sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/') + export VERSION=$(echo "$REF2" | sed -E 's/.*(stable[0-9]+).*$/\1/') ./scripts/release/build-changelogs.sh diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index dacfcc5995b742f4233ac7150cd5f581dd4fc88c..6e0e8f20aa5ee3281c9e6e15368edbb88ad7d767 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -194,14 +194,12 @@ jobs: run: | . ./.github/scripts/common/lib.sh - release="${{ needs.validate-inputs.outputs.stable_tag }}" && \ - echo "release=${release}" >> $GITHUB_OUTPUT + echo "release=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT commit=$(git rev-parse --short HEAD) && \ echo "commit=${commit}" >> $GITHUB_OUTPUT - tag="${{ needs.validate-inputs.outputs.version }}" && \ - echo "tag=${tag}" >> $GITHUB_OUTPUT + echo "tag=${{ needs.validate-inputs.outputs.version }}" >> $GITHUB_OUTPUT - name: Fetch release tags working-directory: release-artifacts @@ -324,7 +322,7 @@ jobs: uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Cache Docker layers uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -348,7 +346,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@32945a339266b759abcbdc89316275140b0fc960 # v6.8.0 + uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile diff --git a/.github/workflows/release-branchoff-stable.yml b/.github/workflows/release-branchoff-stable.yml index c4c50f5398e819fde478f333d00b10b176a7d507..3086c0d21f42fe3006d10bd4265be5e366278215 100644 --- a/.github/workflows/release-branchoff-stable.yml +++ b/.github/workflows/release-branchoff-stable.yml @@ -9,16 +9,17 @@ on: type: string node_version: - description: Version of the polkadot node in the format vX.XX.X (e.g. 1.15.0) + description: Version of the polkadot node in the format X.XX.X (e.g. 1.15.0) required: true jobs: - # TODO: Activate this job when the pipeline is moved to the fork in the `paritytech-release` org - # check-workflow-can-run: - # uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@latest + check-workflow-can-run: + uses: paritytech-release/sync-workflows/.github/workflows/check-syncronization.yml@main prepare-tooling: + needs: [check-workflow-can-run] + if: needs.check-workflow-can-run.outputs.checks_passed == 'true' runs-on: ubuntu-latest outputs: node_version: ${{ steps.validate_inputs.outputs.node_version }} @@ -40,11 +41,9 @@ jobs: echo "stable_version=${stable_version}" >> $GITHUB_OUTPUT create-stable-branch: - # needs: [check-workflow-can-run, prepare-tooling] needs: [prepare-tooling] - # if: needs. check-workflow-can-run.outputs.checks_passed == 'true' runs-on: ubuntu-latest - + environment: release env: PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} @@ -100,6 +99,6 @@ jobs: # Set new version for polkadot-parachain binary to match the polkadot node binary # set_polkadot_parachain_binary_version $NODE_VERSION "cumulus/polkadot-parachain/Cargo.toml" - reorder_prdocs $NODE_VERSION + reorder_prdocs $STABLE_BRANCH_NAME git push origin "$STABLE_BRANCH_NAME" diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index 2aee9dc995b18eac0b2025b7e45da8b814910bd2..ae6c430b6d376d6d5e57d577ad77123ae156050e 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -183,7 +183,7 @@ jobs: needs: [build-rc] uses: ./.github/workflows/release-reusable-s3-upload.yml with: - package: ${{ inputs.binary }} + package: polkadot-parachain release_tag: ${{ inputs.release_tag }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} diff --git a/.github/workflows/release-srtool.yml b/.github/workflows/release-srtool.yml index 83119dd4ed24363172c547b394f8a7d8e305e32b..9a29b46d2fc3290495226ec09d3320ed49a07f53 100644 --- a/.github/workflows/release-srtool.yml +++ b/.github/workflows/release-srtool.yml @@ -39,7 +39,8 @@ jobs: sudo dpkg -i toml.deb toml --version; jq --version - - name: Scan runtimes + - name: Scan and get runtimes list + id: get_runtimes_list env: EXCLUDED_RUNTIMES: ${{ inputs.excluded_runtimes }}:"substrate-test" run: | @@ -51,13 +52,6 @@ jobs: MATRIX=$(find_runtimes | tee runtimes_list.json) echo $MATRIX - - - name: Get runtimes list - id: get_runtimes_list - run: | - ls -al - MATRIX=$(cat runtimes_list.json) - echo $MATRIX echo "runtime=$MATRIX" >> $GITHUB_OUTPUT srtool: diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index b51f2c249d848d6a460deb6c878b6018eb560476..cca32650b106056527297d594cd118ceeece74ca 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -348,7 +348,7 @@ jobs: - name: Set up Homebrew uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 - name: Install rust ${{ env.RUST_VERSION }} - uses: actions-rust-lang/setup-rust-toolchain@4d1965c9142484e48d40c19de54b5cba84953a06 # v1.10.0 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: cache: false toolchain: ${{ env.RUST_VERSION }} diff --git a/.gitignore b/.gitignore index 0263626d832df2633a3c5fc9db18268c2cc9953b..afa9ed33f4a0369b47c7a5c519965249cb3f6859 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,5 @@ rls*.log runtime/wasm/target/ substrate.code-workspace target/ +*.scale +justfile diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 1589fccc972efac9a2c19da88dd34bd2a708c480..08bfed2e24ce903cb265be034bce6b51040d193b 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.115" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.116" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" ZOMBIE_PROVIDER: "k8s" diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml index 070bfc8472d5ab797141862bd0d0066d420bc922..07711e32a9a3fc99275a586740a5d90ea8c31b44 100644 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ b/.gitlab/pipeline/zombienet/bridges.yml @@ -11,7 +11,7 @@ - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ variables: DOCKER_IMAGES_VERSION: ${CI_COMMIT_SHORT_SHA} - - !reference [.build-refs, rules] + - !reference [ .build-refs, rules ] before_script: - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index d17380839942a1243dbc9b8d90768b07b05c5415..9a907d8d99462705730f5f1ccb91e5264479e357 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -158,7 +158,7 @@ zombienet-polkadot-functional-0010-validator-disabling: --local-dir="${LOCAL_DIR}/functional" --test="0010-validator-disabling.zndsl" -zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: +.zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: extends: - .zombienet-polkadot-common script: @@ -233,6 +233,25 @@ zombienet-polkadot-functional-0016-approval-voting-parallel: --local-dir="${LOCAL_DIR}/functional" --test="0016-approval-voting-parallel.zndsl" +zombienet-polkadot-functional-0017-sync-backing: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0017-sync-backing.zndsl" + +zombienet-polkadot-functional-0018-shared-core-idle-parachain: + extends: + - .zombienet-polkadot-common + before_script: + - !reference [ .zombienet-polkadot-common, before_script ] + - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0018-shared-core-idle-parachain.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 030a0a3f50a9f5bb1587e71df6d3a57c4b9b8ee8..52118307e6a03696c6a972ff44c483c398e254ee 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -70,11 +70,11 @@ zombienet-substrate-0001-basic-warp-sync: --local-dir="${LOCAL_DIR}/0001-basic-warp-sync" --test="test-warp-sync.zndsl" -.zombienet-substrate-0002-validators-warp-sync: +zombienet-substrate-0002-validators-warp-sync: extends: - .zombienet-substrate-warp-sync-common before_script: - - !reference [.zombienet-substrate-warp-sync-common, before_script] + - !reference [ .zombienet-substrate-warp-sync-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/0001-basic-warp-sync/chain-spec.json ${LOCAL_DIR}/0002-validators-warp-sync script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -85,7 +85,7 @@ zombienet-substrate-0003-block-building-warp-sync: extends: - .zombienet-substrate-warp-sync-common before_script: - - !reference [.zombienet-substrate-warp-sync-common, before_script] + - !reference [ .zombienet-substrate-warp-sync-common, before_script ] - cp --remove-destination ${LOCAL_DIR}/0001-basic-warp-sync/chain-spec.json ${LOCAL_DIR}/0003-block-building-warp-sync script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh diff --git a/Cargo.lock b/Cargo.lock index 74559ed32593da9801948edc0c185d77d8e0ac26..1b3869258a9d9351599a2f21e439f0f0d0c9266f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -168,7 +168,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", "syn-solidity", "tiny-keccak", ] @@ -295,7 +295,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -753,7 +753,7 @@ checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", "synstructure 0.13.1", ] @@ -776,7 +776,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1022,6 +1022,7 @@ dependencies = [ "pallet-nfts", "pallet-nfts-runtime-api", "pallet-proxy", + "pallet-revive", "pallet-session", "pallet-state-trie-migration", "pallet-timestamp", @@ -1234,7 +1235,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.21", + "rustix 0.38.25", "slab", "tracing", "windows-sys 0.52.0", @@ -1316,7 +1317,7 @@ dependencies = [ "cfg-if", "event-listener 5.2.0", "futures-lite 2.3.0", - "rustix 0.38.21", + "rustix 0.38.25", "tracing", ] @@ -1332,7 +1333,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.21", + "rustix 0.38.25", "signal-hook-registry", "slab", "windows-sys 0.52.0", @@ -1384,7 +1385,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1401,7 +1402,7 @@ checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -1616,7 +1617,23 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.82", +] + +[[package]] +name = "bip32" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa13fae8b6255872fd86f7faf4b41168661d7d78609f7bfe6771b85c6739a15b" +dependencies = [ + "bs58", + "hmac 0.12.1", + "k256", + "rand_core 0.6.4", + "ripemd", + "sha2 0.10.8", + "subtle 2.5.0", + "zeroize", ] [[package]] @@ -2559,6 +2576,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-trie 29.0.0", + "sp-weights 27.0.0", "staging-xcm", "static_assertions", "tuplex", @@ -2570,6 +2588,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ + "sha2 0.10.8", "tinyvec", ] @@ -2805,6 +2824,7 @@ name = "chain-spec-guide-runtime" version = "0.0.0" dependencies = [ "docify", + "frame-support", "pallet-balances", "pallet-sudo", "pallet-timestamp", @@ -3039,7 +3059,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4397,7 +4417,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4554,6 +4574,7 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", "docify", + "frame-benchmarking", "frame-support", "frame-system", "log", @@ -4949,7 +4970,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -4989,7 +5010,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "scratch", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5006,7 +5027,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5054,7 +5075,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5076,7 +5097,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5193,7 +5214,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5204,7 +5225,7 @@ checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5215,7 +5236,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5323,7 +5344,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5384,7 +5405,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "regex", - "syn 2.0.79", + "syn 2.0.82", "termcolor", "toml 0.8.12", "walkdir", @@ -5616,7 +5637,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5636,7 +5657,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5647,7 +5668,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5862,7 +5883,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -5934,7 +5955,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -6277,7 +6298,7 @@ dependencies = [ "quote 1.0.37", "scale-info", "sp-arithmetic 23.0.0", - "syn 2.0.79", + "syn 2.0.82", "trybuild", ] @@ -6365,6 +6386,7 @@ name = "frame-metadata-hash-extension" version = "0.1.0" dependencies = [ "array-bytes", + "const-hex", "docify", "frame-metadata 16.0.0", "frame-support", @@ -6421,6 +6443,7 @@ dependencies = [ name = "frame-support" version = "28.0.0" dependencies = [ + "Inflector", "aquamarine", "array-bytes", "assert_matches", @@ -6491,7 +6514,7 @@ dependencies = [ "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "static_assertions", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -6502,7 +6525,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -6511,7 +6534,7 @@ version = "11.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -6654,7 +6677,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -6765,7 +6788,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -6795,6 +6818,10 @@ name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +dependencies = [ + "gloo-timers", + "send_wrapper 0.4.0", +] [[package]] name = "futures-util" @@ -6930,6 +6957,27 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d" +[[package]] +name = "gloo-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06f627b1a58ca3d42b45d6104bf1e1a03799df472df00988b6ba21accc10580" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "http 1.1.0", + "js-sys", + "pin-project", + "serde", + "serde_json", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "gloo-timers" version = "0.2.6" @@ -6942,6 +6990,19 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-utils" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "glutton-westend-runtime" version = "3.0.0" @@ -7440,16 +7501,17 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.3.1", "hyper-util", "log", - "rustls 0.23.10", + "rustls 0.23.14", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -7845,7 +7907,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.9", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -8043,11 +8105,13 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ec465b607a36dc5dd45d48b7689bc83f679f66a3ac6b6b21cc787a11e0f8685" dependencies = [ + "jsonrpsee-client-transport 0.24.3", "jsonrpsee-core 0.24.3", "jsonrpsee-http-client 0.24.3", "jsonrpsee-proc-macros", "jsonrpsee-server", "jsonrpsee-types 0.24.3", + "jsonrpsee-wasm-client", "jsonrpsee-ws-client 0.24.3", "tokio", "tracing", @@ -8085,7 +8149,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -8104,11 +8168,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90f0977f9c15694371b8024c35ab58ca043dbbf4b51ccb03db8858a021241df1" dependencies = [ "base64 0.22.1", + "futures-channel", "futures-util", + "gloo-net", "http 1.1.0", "jsonrpsee-core 0.24.3", "pin-project", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -8189,6 +8255,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "wasm-bindgen-futures", ] [[package]] @@ -8221,11 +8288,11 @@ dependencies = [ "base64 0.22.1", "http-body 1.0.0", "hyper 1.3.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-util", "jsonrpsee-core 0.24.3", "jsonrpsee-types 0.24.3", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-platform-verifier", "serde", "serde_json", @@ -8246,7 +8313,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -8314,6 +8381,17 @@ dependencies = [ "thiserror", ] +[[package]] +name = "jsonrpsee-wasm-client" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0470d0ae043ffcb0cd323797a631e637fb4b55fe3eaa6002934819458bba62a7" +dependencies = [ + "jsonrpsee-client-transport 0.24.3", + "jsonrpsee-core 0.24.3", + "jsonrpsee-types 0.24.3", +] + [[package]] name = "jsonrpsee-ws-client" version = "0.23.2" @@ -8377,6 +8455,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-hash" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" +dependencies = [ + "primitive-types 0.12.2", + "tiny-keccak", +] + [[package]] name = "keccak-hasher" version = "0.16.0" @@ -8407,6 +8495,7 @@ dependencies = [ "primitive-types 0.13.1", "scale-info", "serde_json", + "sp-debug-derive 14.0.0", "static_assertions", "substrate-wasm-builder", ] @@ -8978,7 +9067,7 @@ dependencies = [ "proc-macro-warning 0.4.2", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -9042,7 +9131,7 @@ dependencies = [ "futures", "js-sys", "libp2p-core", - "send_wrapper", + "send_wrapper 0.6.0", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -9203,9 +9292,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lioness" @@ -9262,7 +9351,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project", "prost 0.12.6", - "prost-build 0.13.2", + "prost-build", "rand", "rcgen", "ring 0.16.20", @@ -9386,7 +9475,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -9400,7 +9489,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -9411,7 +9500,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -9422,7 +9511,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -9759,7 +9848,7 @@ dependencies = [ "cfg-if", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -10232,6 +10321,7 @@ dependencies = [ "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", + "pallet-revive", "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", @@ -10363,7 +10453,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -10539,7 +10629,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -10690,6 +10780,7 @@ dependencies = [ name = "pallet-asset-conversion-tx-payment" version = "10.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-asset-conversion", @@ -11310,7 +11401,7 @@ version = "18.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -11511,6 +11602,24 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-example-authorization-tx-extension" +version = "1.0.0" +dependencies = [ + "docify", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-verify-signature", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-keyring", + "sp-runtime 31.0.1", +] + [[package]] name = "pallet-example-basic" version = "27.0.0" @@ -11636,6 +11745,7 @@ version = "4.0.0-dev" dependencies = [ "pallet-default-config-example", "pallet-dev-mode", + "pallet-example-authorization-tx-extension", "pallet-example-basic", "pallet-example-frame-crate", "pallet-example-kitchensink", @@ -12317,11 +12427,16 @@ dependencies = [ "array-bytes", "assert_matches", "bitflags 1.3.2", + "derive_more", "environmental", + "ethereum-types", "frame-benchmarking", "frame-support", "frame-system", + "hex", + "hex-literal", "impl-trait-for-tuples", + "jsonrpsee 0.24.3", "log", "pallet-assets", "pallet-balances", @@ -12331,25 +12446,30 @@ dependencies = [ "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-timestamp", + "pallet-transaction-payment", "pallet-utility", "parity-scale-codec", "paste", - "polkavm 0.12.0", - "polkavm-common 0.12.0", + "polkavm 0.13.0", + "polkavm-common 0.13.0", "pretty_assertions", "rlp 0.6.1", "scale-info", + "secp256k1", "serde", + "serde_json", "sp-api 26.0.0", + "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", "sp-runtime 31.0.1", "sp-std 14.0.0", "sp-tracing 16.0.0", + "sp-weights 27.0.0", "staging-xcm", "staging-xcm-builder", - "wat", + "subxt-signer", ] [[package]] @@ -12360,7 +12480,7 @@ dependencies = [ "frame-system", "log", "parity-wasm", - "polkavm-linker 0.12.0", + "polkavm-linker 0.13.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", @@ -12410,7 +12530,7 @@ version = "0.1.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -12420,7 +12540,7 @@ dependencies = [ "bitflags 1.3.2", "parity-scale-codec", "paste", - "polkavm-derive 0.12.0", + "polkavm-derive 0.13.0", "scale-info", ] @@ -12655,7 +12775,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "sp-runtime 31.0.1", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -12787,6 +12907,7 @@ dependencies = [ name = "pallet-transaction-payment" version = "28.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -12918,6 +13039,25 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-verify-signature" +version = "1.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-collective", + "pallet-root-testing", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-runtime 31.0.1", + "sp-weights 27.0.0", +] + [[package]] name = "pallet-vesting" version = "28.0.0" @@ -13650,7 +13790,7 @@ dependencies = [ "pest_meta", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -13691,7 +13831,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -15288,6 +15428,7 @@ dependencies = [ "pallet-tx-pause", "pallet-uniques", "pallet-utility", + "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", @@ -15497,6 +15638,7 @@ dependencies = [ name = "polkadot-sdk-docs" version = "0.0.1" dependencies = [ + "assert_cmd", "chain-spec-guide-runtime", "cumulus-client-service", "cumulus-pallet-aura-ext", @@ -15504,6 +15646,7 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "docify", + "frame-benchmarking", "frame-executive", "frame-metadata-hash-extension", "frame-support", @@ -15523,9 +15666,11 @@ dependencies = [ "pallet-contracts", "pallet-default-config-example", "pallet-democracy", + "pallet-example-authorization-tx-extension", "pallet-example-offchain-worker", "pallet-example-single-block-migrations", "pallet-examples", + "pallet-grandpa", "pallet-multisig", "pallet-nfts", "pallet-preimage", @@ -15540,8 +15685,12 @@ dependencies = [ "pallet-xcm", "parachain-template-runtime", "parity-scale-codec", + "polkadot-omni-node-lib", "polkadot-sdk", + "polkadot-sdk-docs-first-pallet", + "polkadot-sdk-docs-first-runtime", "polkadot-sdk-frame", + "rand", "sc-chain-spec", "sc-cli", "sc-client-db", @@ -15557,6 +15706,7 @@ dependencies = [ "sc-rpc-api", "sc-service", "scale-info", + "serde_json", "simple-mermaid 0.1.1", "solochain-template-runtime", "sp-api 26.0.0", @@ -15571,6 +15721,7 @@ dependencies = [ "sp-std 14.0.0", "sp-tracing 16.0.0", "sp-version 29.0.0", + "sp-weights 27.0.0", "staging-chain-spec-builder", "staging-node-cli", "staging-parachain-info", @@ -15583,6 +15734,35 @@ dependencies = [ "xcm-simulator", ] +[[package]] +name = "polkadot-sdk-docs-first-pallet" +version = "0.0.0" +dependencies = [ + "docify", + "parity-scale-codec", + "polkadot-sdk-frame", + "scale-info", +] + +[[package]] +name = "polkadot-sdk-docs-first-runtime" +version = "0.0.0" +dependencies = [ + "docify", + "pallet-balances", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "polkadot-sdk-docs-first-pallet", + "polkadot-sdk-frame", + "scale-info", + "serde_json", + "sp-keyring", + "substrate-wasm-builder", +] + [[package]] name = "polkadot-sdk-frame" version = "0.1.0" @@ -15605,8 +15785,10 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-grandpa", "sp-core 28.0.0", + "sp-genesis-builder", "sp-inherents", "sp-io 30.0.0", + "sp-keyring", "sp-offchain", "sp-runtime 31.0.1", "sp-session", @@ -16068,15 +16250,15 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27910c5061e4cea6be6c66684b49d0f42b6a05900c9b0da9e7f3dd2d587a8d4" +checksum = "57e79a14b15ed38cb5b9a1e38d02e933f19e3d180ae5b325fed606c5e5b9177e" dependencies = [ "libc", "log", - "polkavm-assembler 0.12.0", - "polkavm-common 0.12.0", - "polkavm-linux-raw 0.12.0", + "polkavm-assembler 0.13.0", + "polkavm-common 0.13.0", + "polkavm-linux-raw 0.13.0", ] [[package]] @@ -16090,9 +16272,9 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82f0e374fa043f31459b30d629d7e866247ac4b6c7662ac72e4e5bf50d052b92" +checksum = "4e8da55465000feb0a61bbf556ed03024db58f3420eca37721fc726b3b2136bf" dependencies = [ "log", ] @@ -16114,13 +16296,13 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e42e082c3d89da2346555baf4d951fe07dcb9208e42a02c272e6d5d0326f9a" +checksum = "084b4339aae7dfdaaa5aa7d634110afd95970e0737b6fb2a0cb10db8b56b753c" dependencies = [ "blake3", "log", - "polkavm-assembler 0.12.0", + "polkavm-assembler 0.13.0", ] [[package]] @@ -16143,11 +16325,11 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "540b798393e68a890202d5dc9f86a985b7ea83611e3406d90dc1043e7997b4d1" +checksum = "f4456b9657b2abd04ac41a61c99e206b7410f93daf0e9b42b49089508d836c40" dependencies = [ - "polkavm-derive-impl-macro 0.12.0", + "polkavm-derive-impl-macro 0.13.0", ] [[package]] @@ -16159,7 +16341,7 @@ dependencies = [ "polkavm-common 0.8.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16171,19 +16353,19 @@ dependencies = [ "polkavm-common 0.9.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] name = "polkavm-derive-impl" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d179eddaaef62ce5960faaa2ec9e8f131c81661c8b9365c4d55b275011688534" +checksum = "5e4f2c19e7ccc53d8e21429e83b6589bd4139d15481e455a90ba4335a4decb5a" dependencies = [ - "polkavm-common 0.12.0", + "polkavm-common 0.13.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16193,7 +16375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" dependencies = [ "polkavm-derive-impl 0.8.0", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16203,17 +16385,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl 0.9.0", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] name = "polkavm-derive-impl-macro" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd35472599d35d90e24afe9eb39ae6ee6cb1b924f0c03b277ef8b5f174a63853" +checksum = "e6f3ad876ca1855038c21d48cbe35164552208a54b21f8295a7d76bc33ef1e38" dependencies = [ - "polkavm-derive-impl 0.12.0", - "syn 2.0.79", + "polkavm-derive-impl 0.13.0", + "syn 2.0.82", ] [[package]] @@ -16233,15 +16415,15 @@ dependencies = [ [[package]] name = "polkavm-linker" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f917b16db9ab13819a738a321b48a2d0d20d9e32dedcff75054148676afbec4" +checksum = "4aa6e5a396abf195289d6d63d70182e59a7c27b9b06d0b7361317df05c07c8a8" dependencies = [ "gimli 0.28.0", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.12.0", + "polkavm-common 0.13.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -16254,9 +16436,9 @@ checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" [[package]] name = "polkavm-linux-raw" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d280301d5b5a321c732173c969058f4b5726f3a0046f6802f396df2599f3753d" +checksum = "686c4dd9c9c16cc22565b51bdbb269792318d0fd2e6b966b5f6c788534cad0e9" [[package]] name = "polling" @@ -16283,7 +16465,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.21", + "rustix 0.38.25", "tracing", "windows-sys 0.52.0", ] @@ -16430,7 +16612,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2 1.0.86", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16535,7 +16717,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16546,7 +16728,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16579,7 +16761,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.25", ] [[package]] @@ -16627,7 +16809,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16692,27 +16874,6 @@ dependencies = [ "prost-derive 0.13.2", ] -[[package]] -name = "prost-build" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" -dependencies = [ - "bytes", - "heck 0.5.0", - "itertools 0.12.1", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.12.6", - "prost-types 0.12.4", - "regex", - "syn 2.0.79", - "tempfile", -] - [[package]] name = "prost-build" version = "0.13.2" @@ -16720,17 +16881,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", - "heck 0.5.0", - "itertools 0.12.1", + "heck 0.4.1", + "itertools 0.10.5", "log", "multimap", "once_cell", "petgraph", "prettyplease", "prost 0.13.2", - "prost-types 0.13.2", + "prost-types", "regex", - "syn 2.0.79", + "syn 2.0.82", "tempfile", ] @@ -16754,10 +16915,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.10.5", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -16767,19 +16928,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.10.5", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", -] - -[[package]] -name = "prost-types" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" -dependencies = [ - "prost 0.12.6", + "syn 2.0.82", ] [[package]] @@ -16934,7 +17086,7 @@ dependencies = [ "quinn-proto 0.11.8", "quinn-udp 0.5.4", "rustc-hash 2.0.0", - "rustls 0.23.10", + "rustls 0.23.14", "socket2 0.5.7", "thiserror", "tokio", @@ -16968,7 +17120,7 @@ dependencies = [ "rand", "ring 0.17.7", "rustc-hash 2.0.0", - "rustls 0.23.10", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -17239,7 +17391,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -17461,7 +17613,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-util", "ipnet", "js-sys", @@ -17471,7 +17623,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn 0.11.5", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pemfile 2.0.0", "rustls-pki-types", "serde", @@ -17555,6 +17707,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -17849,7 +18010,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.79", + "syn 2.0.82", "unicode-ident", ] @@ -17998,14 +18159,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] @@ -18041,22 +18202,22 @@ dependencies = [ "log", "ring 0.17.7", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle 2.5.0", "zeroize", ] [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", "once_cell", "ring 0.17.7", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle 2.5.0", "zeroize", ] @@ -18086,6 +18247,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.0.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.3" @@ -18107,9 +18281,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-platform-verifier" @@ -18122,10 +18296,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots 0.26.3", @@ -18150,9 +18324,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.7", "rustls-pki-types", @@ -18279,7 +18453,7 @@ dependencies = [ "multihash 0.19.1", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "quickcheck", "rand", "sc-client-api", @@ -18377,7 +18551,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -19095,7 +19269,7 @@ dependencies = [ "partial_sort", "pin-project", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "rand", "sc-block-builder", "sc-client-api", @@ -19140,7 +19314,7 @@ dependencies = [ "futures", "libp2p-identity", "parity-scale-codec", - "prost-build 0.12.4", + "prost-build", "sc-consensus", "sc-network-types", "sp-consensus", @@ -19182,7 +19356,7 @@ dependencies = [ "log", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "sc-client-api", "sc-network", "sc-network-types", @@ -19225,7 +19399,7 @@ dependencies = [ "mockall 0.11.4", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build", "quickcheck", "sc-block-builder", "sc-client-api", @@ -19327,14 +19501,17 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper 0.14.29", - "hyper-rustls 0.24.2", + "http-body-util", + "hyper 1.3.1", + "hyper-rustls 0.27.3", + "hyper-util", "log", "num_cpus", "once_cell", "parity-scale-codec", "parking_lot 0.12.3", "rand", + "rustls 0.23.14", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -19739,7 +19916,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -19922,7 +20099,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "scale-info", - "syn 2.0.79", + "syn 2.0.82", "thiserror", ] @@ -20181,6 +20358,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + [[package]] name = "send_wrapper" version = "0.6.0" @@ -20238,7 +20421,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -20263,9 +20446,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.2.3", "itoa", @@ -20340,7 +20523,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21311,7 +21494,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21326,7 +21509,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21665,7 +21848,7 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "impl-serde 0.4.0", + "impl-serde 0.5.0", "itertools 0.11.0", "k256", "libsecp256k1", @@ -21865,7 +22048,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -21936,7 +22119,7 @@ version = "0.1.0" dependencies = [ "quote 1.0.37", "sp-crypto-hashing 0.1.0", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21947,7 +22130,7 @@ checksum = "b85d0f1f1e44bd8617eb2a48203ee854981229e3e79e6f468c7175d5fd37489b" dependencies = [ "quote 1.0.37", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21965,7 +22148,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21974,7 +22157,7 @@ version = "14.0.0" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -21985,7 +22168,7 @@ checksum = "48d09fa0a5f7299fb81ee25ae3853d26200f7a348148aed6de76be905c007dbe" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -22376,6 +22559,7 @@ dependencies = [ "sp-weights 27.0.0", "substrate-test-runtime-client", "tracing", + "tuplex", "zstd 0.12.4", ] @@ -22540,13 +22724,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -22558,7 +22742,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -22572,7 +22756,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -22780,7 +22964,7 @@ dependencies = [ name = "sp-storage" version = "19.0.0" dependencies = [ - "impl-serde 0.4.0", + "impl-serde 0.5.0", "parity-scale-codec", "ref-cast", "serde", @@ -23008,7 +23192,7 @@ dependencies = [ name = "sp-version" version = "29.0.0" dependencies = [ - "impl-serde 0.4.0", + "impl-serde 0.5.0", "parity-scale-codec", "parity-wasm", "scale-info", @@ -23047,7 +23231,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "sp-version 29.0.0", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -23059,7 +23243,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -23279,6 +23463,7 @@ dependencies = [ "sp-keyring", "staging-node-inspect", "substrate-cli-test-utils", + "subxt-signer", "tempfile", "tokio", "tokio-util", @@ -23525,7 +23710,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -23538,7 +23723,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "rustversion", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24027,7 +24212,7 @@ dependencies = [ "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.79", + "syn 2.0.82", "thiserror", "tokio", ] @@ -24090,7 +24275,7 @@ dependencies = [ "quote 1.0.37", "scale-typegen", "subxt-codegen", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24112,10 +24297,12 @@ version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49888ae6ae90fe01b471193528eea5bd4ed52d8eecd2d13f4a2333b87388850" dependencies = [ + "bip32", "bip39", "cfg-if", "hex", "hmac 0.12.1", + "keccak-hash", "parity-scale-codec", "pbkdf2", "regex", @@ -24243,9 +24430,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "83540f837a8afc019423a8edb95b52a8effe46957ee402287f4292fae35be021" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", @@ -24261,7 +24448,7 @@ dependencies = [ "paste", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24290,7 +24477,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24361,7 +24548,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.0", "redox_syscall 0.4.1", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -24391,7 +24578,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -24420,7 +24607,7 @@ checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24560,9 +24747,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] @@ -24589,13 +24776,13 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24757,7 +24944,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -24808,7 +24995,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -25033,7 +25220,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -25075,7 +25262,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -25532,7 +25719,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.10", + "rustls 0.23.14", "rustls-pki-types", "serde", "serde_json", @@ -25727,7 +25914,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", "wasm-bindgen-shared", ] @@ -25761,7 +25948,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -26896,7 +27083,7 @@ dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", "staging-xcm", - "syn 2.0.79", + "syn 2.0.82", "trybuild", ] @@ -27067,7 +27254,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] @@ -27087,7 +27274,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.79", + "syn 2.0.82", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 40d3615fed1d82e76ca76df333a24a2bfc3167fa..dd1fa40e146a902edf4e6600080698e5c37d313e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -149,6 +149,8 @@ members = [ "cumulus/test/service", "cumulus/xcm/xcm-emulator", "docs/sdk", + "docs/sdk/packages/guides/first-pallet", + "docs/sdk/packages/guides/first-runtime", "docs/sdk/src/reference_docs/chain_spec_runtime", "polkadot", "polkadot/cli", @@ -347,6 +349,7 @@ members = [ "substrate/frame/election-provider-support/solution-type/fuzzer", "substrate/frame/elections-phragmen", "substrate/frame/examples", + "substrate/frame/examples/authorization-tx-extension", "substrate/frame/examples/basic", "substrate/frame/examples/default-config", "substrate/frame/examples/dev-mode", @@ -442,6 +445,7 @@ members = [ "substrate/frame/tx-pause", "substrate/frame/uniques", "substrate/frame/utility", + "substrate/frame/verify-signature", "substrate/frame/vesting", "substrate/frame/whitelist", "substrate/primitives/api", @@ -681,6 +685,7 @@ color-print = { version = "0.3.4" } colored = { version = "2.0.4" } comfy-table = { version = "7.1.0", default-features = false } console = { version = "0.15.8" } +const-hex = { version = "1.10.0", default-features = false } contracts-rococo-runtime = { path = "cumulus/parachains/runtimes/contracts/contracts-rococo" } coretime-rococo-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo" } coretime-rococo-runtime = { path = "cumulus/parachains/runtimes/coretime/coretime-rococo" } @@ -752,6 +757,8 @@ femme = { version = "2.2.1" } filetime = { version = "0.2.16" } finality-grandpa = { version = "0.16.2", default-features = false } finality-relay = { path = "bridges/relays/finality" } +first-pallet = { package = "polkadot-sdk-docs-first-pallet", path = "docs/sdk/packages/guides/first-pallet", default-features = false } +first-runtime = { package = "polkadot-sdk-docs-first-runtime", path = "docs/sdk/packages/guides/first-runtime", default-features = false } flate2 = { version = "1.0" } fnv = { version = "1.0.6" } fork-tree = { path = "substrate/utils/fork-tree", default-features = false } @@ -800,11 +807,9 @@ http = { version = "1.1" } http-body = { version = "1", default-features = false } http-body-util = { version = "0.1.2", default-features = false } hyper = { version = "1.3.1", default-features = false } -hyper-rustls = { version = "0.24.2" } +hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "logging", "ring", "rustls-native-certs", "tls12"] } hyper-util = { version = "0.1.5", default-features = false } -# TODO: remove hyper v0.14 https://github.com/paritytech/polkadot-sdk/issues/4896 -hyperv14 = { package = "hyper", version = "0.14.29", default-features = false } -impl-serde = { version = "0.4.0", default-features = false } +impl-serde = { version = "0.5.0", default-features = false } impl-trait-for-tuples = { version = "0.2.2" } indexmap = { version = "2.0.0" } indicatif = { version = "0.17.7" } @@ -915,6 +920,7 @@ pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } +pallet-example-authorization-tx-extension = { path = "substrate/frame/examples/authorization-tx-extension", default-features = false } pallet-example-basic = { path = "substrate/frame/examples/basic", default-features = false } pallet-example-frame-crate = { path = "substrate/frame/examples/frame-crate", default-features = false } pallet-example-kitchensink = { path = "substrate/frame/examples/kitchensink", default-features = false } @@ -991,6 +997,7 @@ pallet-treasury = { path = "substrate/frame/treasury", default-features = false pallet-tx-pause = { default-features = false, path = "substrate/frame/tx-pause" } pallet-uniques = { path = "substrate/frame/uniques", default-features = false } pallet-utility = { path = "substrate/frame/utility", default-features = false } +pallet-verify-signature = { path = "substrate/frame/verify-signature", default-features = false } pallet-vesting = { path = "substrate/frame/vesting", default-features = false } pallet-whitelist = { path = "substrate/frame/whitelist", default-features = false } pallet-xcm = { path = "polkadot/xcm/pallet-xcm", default-features = false } @@ -1087,7 +1094,7 @@ prometheus = { version = "0.13.0", default-features = false } prometheus-endpoint = { path = "substrate/utils/prometheus", default-features = false, package = "substrate-prometheus-endpoint" } prometheus-parse = { version = "0.2.2" } prost = { version = "0.12.4" } -prost-build = { version = "0.12.4" } +prost-build = { version = "0.13.2" } pyroscope = { version = "0.5.7" } pyroscope_pprofrs = { version = "0.2.7" } quick_cache = { version = "0.3" } @@ -1118,6 +1125,7 @@ rstest = { version = "0.18.2" } rustc-hash = { version = "1.1.0" } rustc-hex = { version = "2.1.0", default-features = false } rustix = { version = "0.36.7", default-features = false } +rustls = { version = "0.23.14", default-features = false, features = ["logging", "ring", "std", "tls12"] } rustversion = { version = "1.0.17" } rusty-fork = { version = "0.3.0", default-features = false } safe-mix = { version = "1.0", default-features = false } @@ -1189,7 +1197,7 @@ separator = { version = "0.4.1" } serde = { version = "1.0.210", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.128", default-features = false } +serde_json = { version = "1.0.132", default-features = false } serde_yaml = { version = "0.9" } serial_test = { version = "2.0.0" } sha1 = { version = "0.10.6" } @@ -1300,7 +1308,7 @@ substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } subxt = { version = "0.37", default-features = false } subxt-signer = { version = "0.37" } -syn = { version = "2.0.79" } +syn = { version = "2.0.82" } sysinfo = { version = "0.30" } tar = { version = "0.4" } tempfile = { version = "3.8.1" } @@ -1311,7 +1319,7 @@ test-parachain-halt = { path = "polkadot/parachain/test-parachains/halt" } test-parachain-undying = { path = "polkadot/parachain/test-parachains/undying" } test-runtime-constants = { path = "polkadot/runtime/test-runtime/constants", default-features = false } testnet-parachains-constants = { path = "cumulus/parachains/runtimes/constants", default-features = false } -thiserror = { version = "1.0.48" } +thiserror = { version = "1.0.64" } thousands = { version = "0.2.0" } threadpool = { version = "1.7" } tikv-jemalloc-ctl = { version = "0.5.0" } diff --git a/README.md b/README.md index 8016b6b37301a32a9ccf5dd7d80827625620d760..6c0dfbb2e7e4255efb7bd925789bbc6d5fd7862a 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ forks](https://img.shields.io/github/forks/paritytech/polkadot-sdk) ## ⚡ Quickstart If you want to get an example node running quickly you can execute the following getting started script: + ``` curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/scripts/getting-started.sh | bash ``` @@ -31,9 +32,8 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), namely how to build your first FRAME pallet * [Templates](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/templates/index.html) - for starting a new project -* Other resources: - * [Polkadot Wiki -> Build](https://wiki.polkadot.network/docs/build-guide) + for starting a new project. + * [External Resources](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/external_resources/index.html) ## 🚀 Releases diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index b8835d55f0da2ad9a88a94f10539f4312950600a..37b56140c289e2a82d8e3e2d1566df420185bbde 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -39,6 +39,7 @@ sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } sp-trie = { optional = true, workspace = true } +sp-weights = { workspace = true } # Polkadot dependencies xcm = { workspace = true } @@ -80,6 +81,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-trie/std", + "sp-weights/std", "tuplex/std", "xcm/std", ] @@ -93,6 +95,7 @@ runtime-benchmarks = [ "pallet-bridge-messages/test-helpers", "pallet-bridge-parachains/runtime-benchmarks", "pallet-bridge-relayers/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "sp-trie", diff --git a/bridges/bin/runtime-common/src/extensions.rs b/bridges/bin/runtime-common/src/extensions.rs index dced502394717411654d270b335d57b94feda31a..19d1554c668b6ea19007b55303c96661f03f8094 100644 --- a/bridges/bin/runtime-common/src/extensions.rs +++ b/bridges/bin/runtime-common/src/extensions.rs @@ -47,8 +47,7 @@ pub trait BridgeRuntimeFilterCall { /// Data that may be passed from the validate to `post_dispatch`. type ToPostDispatch; /// Called during validation. Needs to checks whether a runtime call, submitted - /// by the `who` is valid. `who` may be `None` if transaction is not signed - /// by a regular account. + /// by the `who` is valid. Transactions not signed are not validated. fn validate(who: &AccountId, call: &Call) -> (Self::ToPostDispatch, TransactionValidity); /// Called after transaction is dispatched. fn post_dispatch(_who: &AccountId, _has_failed: bool, _to_post_dispatch: Self::ToPostDispatch) { @@ -274,12 +273,10 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { ($call:ty, $account_id:ty, $($filter_call:ty),*) => { #[derive(Clone, codec::Decode, Default, codec::Encode, Eq, PartialEq, sp_runtime::RuntimeDebug, scale_info::TypeInfo)] pub struct BridgeRejectObsoleteHeadersAndMessages; - impl sp_runtime::traits::SignedExtension for BridgeRejectObsoleteHeadersAndMessages { + impl sp_runtime::traits::TransactionExtension<$call> for BridgeRejectObsoleteHeadersAndMessages { const IDENTIFIER: &'static str = "BridgeRejectObsoleteHeadersAndMessages"; - type AccountId = $account_id; - type Call = $call; - type AdditionalSigned = (); - type Pre = ( + type Implicit = (); + type Val = Option<( $account_id, ( $( <$filter_call as $crate::extensions::BridgeRuntimeFilterCall< @@ -287,72 +284,75 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { $call, >>::ToPostDispatch, )* ), - ); + )>; + type Pre = Self::Val; - fn additional_signed(&self) -> sp_std::result::Result< - (), - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) + fn weight(&self, _: &$call) -> frame_support::pallet_prelude::Weight { + frame_support::pallet_prelude::Weight::zero() } - #[allow(unused_variables)] fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + origin: <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, _len: usize, - ) -> sp_runtime::transaction_validity::TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl codec::Encode, + ) -> Result< + ( + sp_runtime::transaction_validity::ValidTransaction, + Self::Val, + <$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + ), sp_runtime::transaction_validity::TransactionValidityError + > { + use $crate::extensions::__private::tuplex::PushBack; + use sp_runtime::traits::AsSystemOriginSigner; + + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), None, origin)); + }; + + let to_post_dispatch = (); let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); - let to_prepare = (); $( let (from_validate, call_filter_validity) = < $filter_call as $crate::extensions::BridgeRuntimeFilterCall< - Self::AccountId, + $account_id, $call, - >>::validate(&who, call); + >>::validate(who, call); + let to_post_dispatch = to_post_dispatch.push_back(from_validate); let tx_validity = tx_validity.combine_with(call_filter_validity?); )* - Ok(tx_validity) + Ok((tx_validity, Some((who.clone(), to_post_dispatch)), origin)) } - #[allow(unused_variables)] - fn pre_dispatch( + fn prepare( self, - relayer: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &<$call as sp_runtime::traits::Dispatchable>::RuntimeOrigin, + _call: &$call, + _info: &sp_runtime::traits::DispatchInfoOf<$call>, + _len: usize, ) -> Result { - use $crate::extensions::__private::tuplex::PushBack; - - let to_post_dispatch = (); - $( - let (from_validate, call_filter_validity) = < - $filter_call as - $crate::extensions::BridgeRuntimeFilterCall< - $account_id, - $call, - >>::validate(&relayer, call); - let _ = call_filter_validity?; - let to_post_dispatch = to_post_dispatch.push_back(from_validate); - )* - Ok((relayer.clone(), to_post_dispatch)) + Ok(val) } #[allow(unused_variables)] - fn post_dispatch( - to_post_dispatch: Option, - info: &sp_runtime::traits::DispatchInfoOf, - post_info: &sp_runtime::traits::PostDispatchInfoOf, + fn post_dispatch_details( + to_post_dispatch: Self::Pre, + info: &sp_runtime::traits::DispatchInfoOf<$call>, + post_info: &sp_runtime::traits::PostDispatchInfoOf<$call>, len: usize, result: &sp_runtime::DispatchResult, - ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { + ) -> Result { use $crate::extensions::__private::tuplex::PopFront; - let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; + let Some((relayer, to_post_dispatch)) = to_post_dispatch else { + return Ok(frame_support::pallet_prelude::Weight::zero()) + }; + let has_failed = result.is_err(); $( let (item, to_post_dispatch) = to_post_dispatch.pop_front(); @@ -363,7 +363,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { $call, >>::post_dispatch(&relayer, has_failed, item); )* - Ok(()) + Ok(frame_support::pallet_prelude::Weight::zero()) } } }; @@ -380,11 +380,16 @@ mod tests { use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::HeaderId; use bp_test_utils::{make_default_justification, test_keyring, TEST_GRANDPA_SET_ID}; + use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{assert_err, assert_ok, traits::fungible::Mutate}; use pallet_bridge_grandpa::{Call as GrandpaCall, StoredAuthoritySet}; use pallet_bridge_parachains::Call as ParachainsCall; + use scale_info::TypeInfo; use sp_runtime::{ - traits::{parameter_types, ConstU64, Header as _, SignedExtension}, + traits::{ + parameter_types, AsSystemOriginSigner, AsTransactionAuthorizedOrigin, ConstU64, + DispatchTransaction, Header as _, TransactionExtension, + }, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -402,12 +407,34 @@ mod tests { ); } + #[derive(Debug, Clone, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct MockCall { data: u32, } + #[derive(Debug, Clone, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub struct MockOrigin(pub u64); + + impl AsSystemOriginSigner for MockOrigin { + fn as_system_origin_signer(&self) -> Option<&u64> { + Some(&self.0) + } + } + + impl AsTransactionAuthorizedOrigin for MockOrigin { + fn is_transaction_authorized(&self) -> bool { + true + } + } + + impl From for MockOrigin { + fn from(o: u64) -> Self { + Self(o) + } + } + impl sp_runtime::traits::Dispatchable for MockCall { - type RuntimeOrigin = u64; + type RuntimeOrigin = MockOrigin; type Config = (); type Info = (); type PostInfo = (); @@ -579,12 +606,17 @@ mod tests { run_test(|| { assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 1 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only( + 42u64.into(), + &MockCall { data: 1 }, + &(), + 0 + ), InvalidTransaction::Custom(1) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( - &42, + BridgeRejectObsoleteHeadersAndMessages.validate_and_prepare( + 42u64.into(), &MockCall { data: 1 }, &(), 0 @@ -593,12 +625,17 @@ mod tests { ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 2 }, &(), 0), + BridgeRejectObsoleteHeadersAndMessages.validate_only( + 42u64.into(), + &MockCall { data: 2 }, + &(), + 0 + ), InvalidTransaction::Custom(2) ); assert_err!( - BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( - &42, + BridgeRejectObsoleteHeadersAndMessages.validate_and_prepare( + 42u64.into(), &MockCall { data: 2 }, &(), 0 @@ -608,37 +645,40 @@ mod tests { assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .validate(&42, &MockCall { data: 3 }, &(), 0) - .unwrap(), + .validate_only(42u64.into(), &MockCall { data: 3 }, &(), 0) + .unwrap() + .0, ValidTransaction { priority: 3, ..Default::default() }, ); assert_eq!( BridgeRejectObsoleteHeadersAndMessages - .pre_dispatch(&42, &MockCall { data: 3 }, &(), 0) + .validate_and_prepare(42u64.into(), &MockCall { data: 3 }, &(), 0) + .unwrap() + .0 .unwrap(), (42, (1, 2)), ); // when post_dispatch is called with `Ok(())`, it is propagated to all "nested" // extensions - assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch_details( Some((0, (1, 2))), &(), &(), 0, - &Ok(()) + &Ok(()), )); FirstFilterCall::verify_post_dispatch_called_with(true); SecondFilterCall::verify_post_dispatch_called_with(true); // when post_dispatch is called with `Err(())`, it is propagated to all "nested" // extensions - assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch_details( Some((0, (1, 2))), &(), &(), 0, - &Err(DispatchError::BadOrigin) + &Err(DispatchError::BadOrigin), )); FirstFilterCall::verify_post_dispatch_called_with(false); SecondFilterCall::verify_post_dispatch_called_with(false); diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 1d4043fc4b61b257ca559b3454eeed63b882ca62..6cf04b452da71264df7459939065d819f29f967f 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -162,7 +162,6 @@ impl pallet_transaction_payment::Config for TestRuntime { MinimumMultiplier, MaximumMultiplier, >; - type RuntimeEvent = RuntimeEvent; } impl pallet_bridge_grandpa::Config for TestRuntime { diff --git a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs index a5c90ceba111e0c8a095f7e96e6d4a8dba92d183..f626fa6df010b96290ca2980d1fc8b4c44623bd5 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs @@ -26,7 +26,7 @@ pub use bp_polkadot_core::{ }; use bp_messages::*; -use bp_polkadot_core::SuffixedCommonSignedExtension; +use bp_polkadot_core::SuffixedCommonTransactionExtension; use bp_runtime::extensions::{ BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, }; @@ -167,7 +167,7 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// Signed extension that is used by all bridge hubs. -pub type SignedExtension = SuffixedCommonSignedExtension<( +pub type TransactionExtension = SuffixedCommonTransactionExtension<( BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, )>; diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index 538bc44019f5226f56b2f07b8cca47451b93e678..fda6a5f0b722e90f8c2501fda15e8274e43c07f7 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -109,11 +109,11 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 297_644_174; + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 297_685_840; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 56_740_432; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 56_782_099; } /// Wrapper over `BridgeHubRococo`'s `RuntimeCall` that can be used without a runtime. diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 7a213fdb28c82df5212a86669b480fe371cf8837..e941b584023822e0342c93cd965a38d07f060206 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -98,11 +98,11 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 89_293_427_116; + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 89_305_927_116; /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_022_177_116; + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_034_677_116; } /// Wrapper over `BridgeHubWestend`'s `RuntimeCall` that can be used without a runtime. diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index dcd0b23abbbefa2dfba741d6934b5d5510c93017..f1f30c4484ebbfe9af600b4fff161853dbd337e3 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Kusama { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Kusama. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Kusama. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Kusama runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-polkadot-bulletin/src/lib.rs b/bridges/chains/chain-polkadot-bulletin/src/lib.rs index d009369197213da84aca80b9263f82ae4407ca0f..c5c18beb2cadc81ae6b7502ef0b194cdee4be535 100644 --- a/bridges/chains/chain-polkadot-bulletin/src/lib.rs +++ b/bridges/chains/chain-polkadot-bulletin/src/lib.rs @@ -25,7 +25,7 @@ use bp_runtime::{ decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, extensions::{ CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, - CheckWeight, GenericSignedExtension, GenericSignedExtensionSchema, + CheckWeight, GenericTransactionExtension, GenericTransactionExtensionSchema, }, Chain, ChainId, TransactionEra, }; @@ -38,7 +38,8 @@ use frame_support::{ use frame_system::limits; use scale_info::TypeInfo; use sp_runtime::{ - traits::DispatchInfoOf, transaction_validity::TransactionValidityError, Perbill, StateVersion, + impl_tx_ext_default, traits::Dispatchable, transaction_validity::TransactionValidityError, + Perbill, StateVersion, }; // This chain reuses most of Polkadot primitives. @@ -73,10 +74,10 @@ pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; /// This signed extension is used to ensure that the chain transactions are signed by proper -pub type ValidateSigned = GenericSignedExtensionSchema<(), ()>; +pub type ValidateSigned = GenericTransactionExtensionSchema<(), ()>; /// Signed extension schema, used by Polkadot Bulletin. -pub type SignedExtensionSchema = GenericSignedExtension<( +pub type TransactionExtensionSchema = GenericTransactionExtension<( ( CheckNonZeroSender, CheckSpecVersion, @@ -89,34 +90,30 @@ pub type SignedExtensionSchema = GenericSignedExtension<( ValidateSigned, )>; -/// Signed extension, used by Polkadot Bulletin. +/// Transaction extension, used by Polkadot Bulletin. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct SignedExtension(SignedExtensionSchema); +pub struct TransactionExtension(TransactionExtensionSchema); -impl sp_runtime::traits::SignedExtension for SignedExtension { +impl sp_runtime::traits::TransactionExtension for TransactionExtension +where + C: Dispatchable, +{ const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = - ::AdditionalSigned; - type Pre = (); + type Implicit = + >::Implicit; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn implicit(&self) -> Result { + >::implicit( + &self.0, + ) } + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; weight validate prepare); } -impl SignedExtension { +impl TransactionExtension { /// Create signed extension from its components. pub fn from_params( spec_version: u32, @@ -125,7 +122,7 @@ impl SignedExtension { genesis_hash: Hash, nonce: Nonce, ) -> Self { - Self(GenericSignedExtension::new( + Self(GenericTransactionExtension::new( ( ( (), // non-zero sender diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index f4b262d40735d7470a4d7e289f24bc1d4556d039..5d2f9e4aa9e06d297c1152f226c283a442ad86e0 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -63,8 +63,8 @@ impl ChainWithGrandpa for Polkadot { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -/// The SignedExtension used by Polkadot. -pub type SignedExtension = SuffixedCommonSignedExtension; +/// The TransactionExtension used by Polkadot. +pub type TransactionExtension = SuffixedCommonTransactionExtension; /// Name of the parachains pallet in the Polkadot runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index bfcafdf41ea2e629c9a58f2545016b2e776375b8..2827d1f137b0f7cd593848ff6e9cdf4f45836744 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Rococo { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Rococo. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Rococo. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index 2a247e03e59d666d3c5dd54d74e3a4f852a60bd3..2b0a609115bc3e40de6343a469d701ffae1e5d4e 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -61,8 +61,8 @@ impl ChainWithGrandpa for Westend { const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; } -// The SignedExtension used by Westend. -pub use bp_polkadot_core::CommonSignedExtension as SignedExtension; +// The TransactionExtension used by Westend. +pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; /// Name of the parachains pallet in the Rococo runtime. pub const PARAS_PALLET_NAME: &str = "Paras"; diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 0bf889bcca0ef674b68b0f10dbbf01eb9a7bc046..04e7b52ed86c4dbeebc2f6b26e7ddb54ebba155a 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -79,6 +79,7 @@ runtime-benchmarks = [ "pallet-bridge-grandpa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", "pallet-bridge-parachains/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/bridges/modules/relayers/src/extension/mod.rs b/bridges/modules/relayers/src/extension/mod.rs index 9a248eb8e7983c09934396e7ccae824dd83b296c..710533c223a0b9575a579a44ebe5ce4e62d5702f 100644 --- a/bridges/modules/relayers/src/extension/mod.rs +++ b/bridges/modules/relayers/src/extension/mod.rs @@ -33,6 +33,7 @@ use bp_runtime::{Chain, RangeInclusiveExt, StaticStrProvider}; use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, + weights::Weight, CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use frame_system::Config as SystemConfig; @@ -44,10 +45,11 @@ use pallet_transaction_payment::{ }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - TransactionValidity, TransactionValidityError, ValidTransactionBuilder, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, ValidateResult, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransactionBuilder}, DispatchResult, RuntimeDebug, }; use sp_std::{fmt::Debug, marker::PhantomData}; @@ -62,7 +64,7 @@ mod messages_adapter; mod parachain_adapter; mod priority; -/// Data that is crafted in `pre_dispatch` method and used at `post_dispatch`. +/// Data that is crafted in `validate`, passed to `prepare` and used at `post_dispatch` method. #[cfg_attr(test, derive(Debug, PartialEq))] pub struct PreDispatchData< AccountId, @@ -78,7 +80,7 @@ pub struct PreDispatchData< impl PreDispatchData { - /// Returns mutable reference to pre-dispatch `finality_target` sent to the + /// Returns mutable reference to `finality_target` sent to the /// `SubmitFinalityProof` call. #[cfg(test)] pub fn submit_finality_proof_info_mut( @@ -119,11 +121,11 @@ pub enum RelayerAccountAction { TypeInfo, )] #[scale_info(skip_type_params(Runtime, Config, LaneId))] -pub struct BridgeRelayersSignedExtension( +pub struct BridgeRelayersTransactionExtension( PhantomData<(Runtime, Config, LaneId)>, ); -impl BridgeRelayersSignedExtension +impl BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, R: RelayersConfig @@ -131,6 +133,7 @@ where + TransactionPaymentConfig, C: ExtensionConfig, R::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, ::OnChargeTransaction: OnChargeTransaction, LaneId: Clone + Copy + Decode + Encode + Debug + TypeInfo, @@ -145,13 +148,12 @@ where /// virtually boosted. The relayer registration (we only boost priority for registered /// relayer transactions) must be checked outside. fn bundled_messages_for_priority_boost( - call_info: Option<&ExtensionCallInfo>, + parsed_call: &ExtensionCallInfo, ) -> Option { // we only boost priority of message delivery transactions - let parsed_call = match call_info { - Some(parsed_call) if parsed_call.is_receive_messages_proof_call() => parsed_call, - _ => return None, - }; + if !parsed_call.is_receive_messages_proof_call() { + return None; + } // compute total number of messages in transaction let bundled_messages = parsed_call.messages_call_info().bundled_messages().saturating_len(); @@ -169,9 +171,7 @@ where /// Given post-dispatch information, analyze the outcome of relayer call and return /// actions that need to be performed on relayer account. fn analyze_call_result( - pre: Option< - Option>, - >, + pre: Option>, info: &DispatchInfo, post_info: &PostDispatchInfo, len: usize, @@ -179,7 +179,7 @@ where ) -> RelayerAccountAction { // We don't refund anything for transactions that we don't support. let (relayer, call_info) = match pre { - Some(Some(pre)) => (pre.relayer, pre.call_info), + Some(pre) => (pre.relayer, pre.call_info), _ => return RelayerAccountAction::None, }; @@ -201,15 +201,14 @@ where // // we are not checking if relayer is registered here - it happens during the slash attempt // - // there are couple of edge cases here: + // there are a couple of edge cases here: // // - when the relayer becomes registered during message dispatch: this is unlikely + relayer // should be ready for slashing after registration; // // - when relayer is registered after `validate` is called and priority is not boosted: // relayer should be ready for slashing after registration. - let may_slash_relayer = - Self::bundled_messages_for_priority_boost(Some(&call_info)).is_some(); + let may_slash_relayer = Self::bundled_messages_for_priority_boost(&call_info).is_some(); let slash_relayer_if_delivery_result = may_slash_relayer .then(|| RelayerAccountAction::Slash(relayer.clone(), reward_account_params)) .unwrap_or(RelayerAccountAction::None); @@ -244,7 +243,7 @@ where let post_info_len = len.saturating_sub(call_data.extra_size as usize); let mut post_info_weight = post_info .actual_weight - .unwrap_or(info.weight) + .unwrap_or(info.total_weight()) .saturating_sub(call_data.extra_weight); // let's also replace the weight of slashing relayer with the weight of rewarding relayer @@ -274,7 +273,8 @@ where } } -impl SignedExtension for BridgeRelayersSignedExtension +impl TransactionExtension + for BridgeRelayersTransactionExtension where Self: 'static + Send + Sync, R: RelayersConfig @@ -282,46 +282,50 @@ where + TransactionPaymentConfig, C: ExtensionConfig, R::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, ::OnChargeTransaction: OnChargeTransaction, LaneId: Clone + Copy + Decode + Encode + Debug + TypeInfo, { const IDENTIFIER: &'static str = C::IdProvider::STR; - type AccountId = R::AccountId; - type Call = R::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); type Pre = Option>; + type Val = Self::Pre; - fn additional_signed(&self) -> Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _call: &R::RuntimeCall) -> Weight { + Weight::zero() } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &R::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - // this is the only relevant line of code for the `pre_dispatch` - // - // we're not calling `validate` from `pre_dispatch` directly because of performance - // reasons, so if you're adding some code that may fail here, please check if it needs - // to be added to the `pre_dispatch` as well - let parsed_call = C::parse_and_check_for_obsolete_call(call)?; + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + // Prepare relevant data for `prepare` + let parsed_call = match C::parse_and_check_for_obsolete_call(call)? { + Some(parsed_call) => parsed_call, + None => return Ok((Default::default(), None, origin)), + }; + // Those calls are only for signed transactions. + let relayer = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + + let data = PreDispatchData { relayer: relayer.clone(), call_info: parsed_call }; - // the following code just plays with transaction priority and never returns an error + // the following code just plays with transaction priority // we only boost priority of presumably correct message delivery transactions - let bundled_messages = match Self::bundled_messages_for_priority_boost(parsed_call.as_ref()) - { + let bundled_messages = match Self::bundled_messages_for_priority_boost(&data.call_info) { Some(bundled_messages) => bundled_messages, - None => return Ok(Default::default()), + None => return Ok((Default::default(), Some(data), origin)), }; // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(who) { - return Ok(Default::default()) + if !RelayersPallet::::is_registration_active(&data.relayer) { + return Ok((Default::default(), Some(data), origin)) } // compute priority boost @@ -334,48 +338,43 @@ where "{}.{:?}: has boosted priority of message delivery transaction \ of relayer {:?}: {} messages -> {} priority", Self::IDENTIFIER, - parsed_call.as_ref().map(|p| p.messages_call_info().lane_id()), - who, + data.call_info.messages_call_info().lane_id(), + data.relayer, bundled_messages, priority_boost, ); - valid_transaction.build() + let validity = valid_transaction.build()?; + Ok((validity, Some(data), origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + val: Self::Val, + _origin: &::RuntimeOrigin, + _call: &R::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, ) -> Result { - // this is a relevant piece of `validate` that we need here (in `pre_dispatch`) - let parsed_call = C::parse_and_check_for_obsolete_call(call)?; - - Ok(parsed_call.map(|call_info| { + Ok(val.inspect(|data| { log::trace!( target: LOG_TARGET, - "{}.{:?}: parsed bridge transaction in pre-dispatch: {:?}", + "{}.{:?}: parsed bridge transaction in prepare: {:?}", Self::IDENTIFIER, - call_info.messages_call_info().lane_id(), - call_info, + data.call_info.messages_call_info().lane_id(), + data.call_info, ); - PreDispatchData { relayer: who.clone(), call_info } })) } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let lane_id = pre - .as_ref() - .and_then(|p| p.as_ref()) - .map(|p| p.call_info.messages_call_info().lane_id()); + ) -> Result { + let lane_id = pre.as_ref().map(|p| p.call_info.messages_call_info().lane_id()); let call_result = Self::analyze_call_result(pre, info, post_info, len, result); match call_result { @@ -399,7 +398,7 @@ where ), } - Ok(()) + Ok(Weight::zero()) } } @@ -463,8 +462,8 @@ mod tests { use pallet_bridge_parachains::{Call as ParachainsCall, Pallet as ParachainsPallet}; use pallet_utility::Call as UtilityCall; use sp_runtime::{ - traits::{ConstU64, Header as HeaderT}, - transaction_validity::{InvalidTransaction, ValidTransaction}, + traits::{ConstU64, DispatchTransaction, Header as HeaderT}, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, DispatchError, }; @@ -496,7 +495,7 @@ mod tests { ConstU64<1>, >; type TestGrandpaExtension = - BridgeRelayersSignedExtension; + BridgeRelayersTransactionExtension; type TestExtensionConfig = parachain_adapter::WithParachainExtensionConfig< StrTestExtension, TestRuntime, @@ -507,7 +506,7 @@ mod tests { ConstU64<1>, >; type TestExtension = - BridgeRelayersSignedExtension; + BridgeRelayersTransactionExtension; type TestMessagesExtensionConfig = messages_adapter::WithMessagesExtensionConfig< StrTestMessagesExtension, TestRuntime, @@ -515,8 +514,11 @@ mod tests { (), ConstU64<1>, >; - type TestMessagesExtension = - BridgeRelayersSignedExtension; + type TestMessagesExtension = BridgeRelayersTransactionExtension< + TestRuntime, + TestMessagesExtensionConfig, + TestLaneIdType, + >; fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { let test_stake: ThisChainBalance = Stake::get(); @@ -1067,18 +1069,39 @@ mod tests { } fn run_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|t| t.0) } fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestGrandpaExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestGrandpaExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|t| t.0) } fn run_messages_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestMessagesExtension = BridgeRelayersSignedExtension(PhantomData); - extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestMessagesExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_only( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|t| t.0) } fn ignore_priority(tx: TransactionValidity) -> TransactionValidity { @@ -1095,8 +1118,15 @@ mod tests { TransactionValidityError, > { sp_tracing::try_init_simple(); - let extension: TestExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn run_grandpa_pre_dispatch( @@ -1105,8 +1135,15 @@ mod tests { Option>, TransactionValidityError, > { - let extension: TestGrandpaExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestGrandpaExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn run_messages_pre_dispatch( @@ -1115,16 +1152,24 @@ mod tests { Option>, TransactionValidityError, > { - let extension: TestMessagesExtension = BridgeRelayersSignedExtension(PhantomData); - extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + let extension: TestMessagesExtension = BridgeRelayersTransactionExtension(PhantomData); + extension + .validate_and_prepare( + Some(relayer_account_at_this_chain()).into(), + &call, + &DispatchInfo::default(), + 0, + ) + .map(|(pre, _)| pre) } fn dispatch_info() -> DispatchInfo { DispatchInfo { - weight: Weight::from_parts( + call_weight: Weight::from_parts( frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, 0, ), + extension_weight: Weight::zero(), class: frame_support::dispatch::DispatchClass::Normal, pays_fee: frame_support::dispatch::Pays::Yes, } @@ -1140,21 +1185,21 @@ mod tests { >, dispatch_result: DispatchResult, ) { - let post_dispatch_result = TestExtension::post_dispatch( - Some(pre_dispatch_data), + let post_dispatch_result = TestExtension::post_dispatch_details( + pre_dispatch_data, &dispatch_info(), &post_dispatch_info(), 1024, &dispatch_result, ); - assert_eq!(post_dispatch_result, Ok(())); + assert_eq!(post_dispatch_result, Ok(Weight::zero())); } fn expected_delivery_reward() -> ThisChainBalance { let mut post_dispatch_info = post_dispatch_info(); let extra_weight = ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(); post_dispatch_info.actual_weight = - Some(dispatch_info().weight.saturating_sub(extra_weight)); + Some(dispatch_info().call_weight.saturating_sub(extra_weight)); pallet_transaction_payment::Pallet::::compute_actual_fee( 1024, &dispatch_info(), @@ -1714,7 +1759,7 @@ mod tests { initialize_environment(200, 200, 200); let mut dispatch_info = dispatch_info(); - dispatch_info.weight = Weight::from_parts( + dispatch_info.call_weight = Weight::from_parts( frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, 0, ); @@ -1918,7 +1963,7 @@ mod tests { dispatch_result: DispatchResult, ) -> RelayerAccountAction { TestExtension::analyze_call_result( - Some(Some(pre_dispatch_data)), + Some(pre_dispatch_data), &dispatch_info(), &post_dispatch_info(), 1024, diff --git a/bridges/modules/relayers/src/extension/priority.rs b/bridges/modules/relayers/src/extension/priority.rs index da188eaf5bdd4a270368c8dad867e77e591ed12c..e09e8627c6730c443441e6bceec1d3dd40a08744 100644 --- a/bridges/modules/relayers/src/extension/priority.rs +++ b/bridges/modules/relayers/src/extension/priority.rs @@ -206,14 +206,17 @@ mod integrity_tests { // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(tx_call_size); - let transaction_weight = Runtime::WeightInfo::submit_finality_proof_weight( + let transaction_weight = >::WeightInfo::submit_finality_proof_weight( Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1, Runtime::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, ); pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, @@ -315,7 +318,8 @@ mod integrity_tests { pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, @@ -385,20 +389,27 @@ mod integrity_tests { // trie nodes to the proof (x0.5 because we expect some nodes to be reused) let estimated_message_size = 512; // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + let estimated_message_dispatch_weight = >::WeightInfo::message_dispatch_weight( + estimated_message_size + ); // messages proof argument size is (for every message) messages size + some additional // trie nodes. Some of them are reused by different messages, so let's take 2/3 of // default "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() - .saturating_mul(2) - .saturating_div(3) - .saturating_add(estimated_message_size) - .saturating_mul(messages as _); + let messages_proof_size = >::WeightInfo::expected_extra_storage_proof_size() + .saturating_mul(2) + .saturating_div(3) + .saturating_add(estimated_message_size) + .saturating_mul(messages as _); // finally we are able to estimate transaction size and weight let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + let transaction_weight = >::WeightInfo::receive_messages_proof_weight( &PreComputedSize(transaction_size as _), messages as _, estimated_message_dispatch_weight.saturating_mul(messages), @@ -406,7 +417,8 @@ mod integrity_tests { pallet_transaction_payment::ChargeTransactionPayment::::get_priority( &DispatchInfo { - weight: transaction_weight, + call_weight: transaction_weight, + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }, diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs index e83be59b23890036905ae1abb441c454c2ce29a5..a8abdb59bea3356dfeb7027aefe3bd17e987fcbb 100644 --- a/bridges/primitives/polkadot-core/src/lib.rs +++ b/bridges/primitives/polkadot-core/src/lib.rs @@ -24,8 +24,8 @@ use bp_runtime::{ self, extensions::{ ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, - CheckSpecVersion, CheckTxVersion, CheckWeight, GenericSignedExtension, - SignedExtensionSchema, + CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension, + TransactionExtensionSchema, }, EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra, }; @@ -229,8 +229,12 @@ pub type SignedBlock = generic::SignedBlock; pub type Balance = u128; /// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic, Signature, SignedExt>; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< + AccountAddress, + EncodedOrDecodedCall, + Signature, + TransactionExt, +>; /// Account address, used by the Polkadot-like chain. pub type Address = MultiAddress; @@ -275,7 +279,7 @@ impl AccountInfoStorageMapKeyProvider { } /// Extra signed extension data that is used by most chains. -pub type CommonSignedExtra = ( +pub type CommonTransactionExtra = ( CheckNonZeroSender, CheckSpecVersion, CheckTxVersion, @@ -286,12 +290,12 @@ pub type CommonSignedExtra = ( ChargeTransactionPayment, ); -/// Extra signed extension data that starts with `CommonSignedExtra`. -pub type SuffixedCommonSignedExtension = - GenericSignedExtension<(CommonSignedExtra, Suffix)>; +/// Extra transaction extension data that starts with `CommonTransactionExtra`. +pub type SuffixedCommonTransactionExtension = + GenericTransactionExtension<(CommonTransactionExtra, Suffix)>; -/// Helper trait to define some extra methods on `SuffixedCommonSignedExtension`. -pub trait SuffixedCommonSignedExtensionExt { +/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`. +pub trait SuffixedCommonTransactionExtensionExt { /// Create signed extension from its components. fn from_params( spec_version: u32, @@ -300,7 +304,7 @@ pub trait SuffixedCommonSignedExtensionExt { genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self; /// Return transaction nonce. @@ -310,9 +314,10 @@ pub trait SuffixedCommonSignedExtensionExt { fn tip(&self) -> Balance; } -impl SuffixedCommonSignedExtensionExt for SuffixedCommonSignedExtension +impl SuffixedCommonTransactionExtensionExt + for SuffixedCommonTransactionExtension where - Suffix: SignedExtensionSchema, + Suffix: TransactionExtensionSchema, { fn from_params( spec_version: u32, @@ -321,9 +326,9 @@ where genesis_hash: Hash, nonce: Nonce, tip: Balance, - extra: (Suffix::Payload, Suffix::AdditionalSigned), + extra: (Suffix::Payload, Suffix::Implicit), ) -> Self { - GenericSignedExtension::new( + GenericTransactionExtension::new( ( ( (), // non-zero sender @@ -365,7 +370,7 @@ where } /// Signed extension that is used by most chains. -pub type CommonSignedExtension = SuffixedCommonSignedExtension<()>; +pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>; #[cfg(test)] mod tests { diff --git a/bridges/primitives/relayers/src/extension.rs b/bridges/primitives/relayers/src/extension.rs index a9fa4df27ead317d6a34babe36dd289b3db568b0..8fd0f151e2a5e6f5660967da11f3ff73c0fae991 100644 --- a/bridges/primitives/relayers/src/extension.rs +++ b/bridges/primitives/relayers/src/extension.rs @@ -138,7 +138,7 @@ pub trait ExtensionConfig { /// Lane identifier type. type LaneId: Clone + Copy + Decode + Encode + Debug; - /// Given runtime call, check if it is supported by the signed extension. Additionally, + /// Given runtime call, check if it is supported by the transaction extension. Additionally, /// check if call (or any of batched calls) are obsolete. fn parse_and_check_for_obsolete_call( call: &::RuntimeCall, diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index d896bc92efffc4e8fcb427ffa7057dece6f17241..25553f9c7b2e884511409b3804ea26b3a968cce0 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -20,135 +20,131 @@ use codec::{Compact, Decode, Encode}; use impl_trait_for_tuples::impl_for_tuples; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension}, transaction_validity::TransactionValidityError, }; use sp_std::{fmt::Debug, marker::PhantomData}; -/// Trait that describes some properties of a `SignedExtension` that are needed in order to send a -/// transaction to the chain. -pub trait SignedExtensionSchema: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo { +/// Trait that describes some properties of a `TransactionExtension` that are needed in order to +/// send a transaction to the chain. +pub trait TransactionExtensionSchema: + Encode + Decode + Debug + Eq + Clone + StaticTypeInfo +{ /// A type of the data encoded as part of the transaction. type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; /// Parameters which are part of the payload used to produce transaction signature, /// but don't end up in the transaction itself (i.e. inherent part of the runtime). - type AdditionalSigned: Encode + Debug + Eq + Clone + StaticTypeInfo; + type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; } -impl SignedExtensionSchema for () { +impl TransactionExtensionSchema for () { type Payload = (); - type AdditionalSigned = (); + type Implicit = (); } -/// An implementation of `SignedExtensionSchema` using generic params. +/// An implementation of `TransactionExtensionSchema` using generic params. #[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct GenericSignedExtensionSchema(PhantomData<(P, S)>); +pub struct GenericTransactionExtensionSchema(PhantomData<(P, S)>); -impl SignedExtensionSchema for GenericSignedExtensionSchema +impl TransactionExtensionSchema for GenericTransactionExtensionSchema where P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, - S: Encode + Debug + Eq + Clone + StaticTypeInfo, + S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, { type Payload = P; - type AdditionalSigned = S; + type Implicit = S; } -/// The `SignedExtensionSchema` for `frame_system::CheckNonZeroSender`. -pub type CheckNonZeroSender = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`. +pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckSpecVersion`. -pub type CheckSpecVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`. +pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckTxVersion`. -pub type CheckTxVersion = GenericSignedExtensionSchema<(), u32>; +/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`. +pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>; -/// The `SignedExtensionSchema` for `frame_system::CheckGenesis`. -pub type CheckGenesis = GenericSignedExtensionSchema<(), Hash>; +/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`. +pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>; -/// The `SignedExtensionSchema` for `frame_system::CheckEra`. -pub type CheckEra = GenericSignedExtensionSchema; +/// The `TransactionExtensionSchema` for `frame_system::CheckEra`. +pub type CheckEra = GenericTransactionExtensionSchema; -/// The `SignedExtensionSchema` for `frame_system::CheckNonce`. -pub type CheckNonce = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`. +pub type CheckNonce = GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `frame_system::CheckWeight`. -pub type CheckWeight = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`. +pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. -pub type ChargeTransactionPayment = GenericSignedExtensionSchema, ()>; +/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. +pub type ChargeTransactionPayment = + GenericTransactionExtensionSchema, ()>; -/// The `SignedExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. -pub type PrevalidateAttests = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. +pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. -pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<(), ()>; +/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. +pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>; -/// The `SignedExtensionSchema` for `RefundBridgedParachainMessages`. +/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`. /// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` /// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` -pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; +pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>; #[impl_for_tuples(1, 12)] -impl SignedExtensionSchema for Tuple { +impl TransactionExtensionSchema for Tuple { for_tuples!( type Payload = ( #( Tuple::Payload ),* ); ); - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); } /// A simplified version of signed extensions meant for producing signed transactions /// and signed payloads in the client code. #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct GenericSignedExtension { +pub struct GenericTransactionExtension { /// A payload that is included in the transaction. pub payload: S::Payload, #[codec(skip)] // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to - // read fields of the `payload`. And when resigning transaction, we're reconstructing - // `SignedExtensions` from scratch. - additional_signed: Option, + // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only + // used to read fields of the `payload`. And when resigning transaction, we're reconstructing + // `TransactionExtensions` from scratch. + implicit: Option, } -impl GenericSignedExtension { - /// Create new `GenericSignedExtension` object. - pub fn new(payload: S::Payload, additional_signed: Option) -> Self { - Self { payload, additional_signed } +impl GenericTransactionExtension { + /// Create new `GenericTransactionExtension` object. + pub fn new(payload: S::Payload, implicit: Option) -> Self { + Self { payload, implicit } } } -impl SignedExtension for GenericSignedExtension +impl TransactionExtension for GenericTransactionExtension where - S: SignedExtensionSchema, + C: Dispatchable, + S: TransactionExtensionSchema, S::Payload: Send + Sync, - S::AdditionalSigned: Send + Sync, + S::Implicit: Send + Sync, { const IDENTIFIER: &'static str = "Not needed."; - type AccountId = (); - type Call = (); - type AdditionalSigned = S::AdditionalSigned; - type Pre = (); + type Implicit = S::Implicit; - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { // we shall not ever see this error in relay, because we are never signing decoded // transactions. Instead we're constructing and signing new transactions. So the error code // is kinda random here - self.additional_signed.clone().ok_or( - frame_support::unsigned::TransactionValidityError::Unknown( + self.implicit + .clone() + .ok_or(frame_support::unsigned::TransactionValidityError::Unknown( frame_support::unsigned::UnknownTransaction::Custom(0xFF), - ), - ) + )) } + type Pre = (); + type Val = (); - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } + impl_tx_ext_default!(C; weight validate prepare); } diff --git a/bridges/relays/lib-substrate-relay/src/messages/mod.rs b/bridges/relays/lib-substrate-relay/src/messages/mod.rs index f7031648bc35d3b0dc249ae25888409eb368d0d9..b4ee57ed7742e684a3c04d133d28940d21d81e84 100644 --- a/bridges/relays/lib-substrate-relay/src/messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/messages/mod.rs @@ -428,7 +428,7 @@ where "Prepared {} -> {} messages delivery call. Weight: {}/{}, size: {}/{}", P::SourceChain::NAME, P::TargetChain::NAME, - call.get_dispatch_info().weight, + call.get_dispatch_info().call_weight, P::TargetChain::max_extrinsic_weight(), call.encode().len(), P::TargetChain::max_extrinsic_size(), @@ -521,7 +521,7 @@ where "Prepared {} -> {} delivery confirmation transaction. Weight: {}/{}, size: {}/{}", P::TargetChain::NAME, P::SourceChain::NAME, - call.get_dispatch_info().weight, + call.get_dispatch_info().call_weight, P::SourceChain::max_extrinsic_weight(), call.encode().len(), P::SourceChain::max_extrinsic_size(), diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 666ac3fbc8a2ab32b485c088c37b536e751cdd46..262d9a7f380ded22b3a280b791e2525b3a75400c 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -49,13 +49,7 @@ serde = { workspace = true, default-features = true } [features] default = ["std"] -fuzzing = [ - "hex-literal", - "pallet-timestamp", - "serde", - "serde_json", - "sp-io", -] +fuzzing = ["hex-literal", "pallet-timestamp", "serde", "serde_json", "sp-io"] std = [ "codec/std", "frame-support/std", diff --git a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml index bd4176875733f64f61026e165a2a03b221193bad..87f0cf9a551308d58e5eb080a9057f844d2543af 100644 --- a/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/fixtures/Cargo.toml @@ -29,6 +29,4 @@ std = [ "sp-core/std", "sp-std/std", ] -runtime-benchmarks = [ - "snowbridge-core/runtime-benchmarks", -] +runtime-benchmarks = ["snowbridge-core/runtime-benchmarks"] diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml index b66b57c3620ad5488d58b90a8f337ecd88f07e5d..6162a17728b61e55da68af95bba5800d6e0e47cc 100644 --- a/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/Cargo.toml @@ -29,6 +29,4 @@ std = [ "sp-core/std", "sp-std/std", ] -runtime-benchmarks = [ - "snowbridge-core/runtime-benchmarks", -] +runtime-benchmarks = ["snowbridge-core/runtime-benchmarks"] diff --git a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml index 9d4cffc98d789532ef13ed2a59f9f8d28d330ebb..16241428df80871fae21ade130bf0f72c7f2f1ba 100644 --- a/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/merkle-tree/Cargo.toml @@ -30,9 +30,4 @@ sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", -] +std = ["codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std"] diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index fbfc52d01c8373019054723fca8f05d90f33012d..a9324ac42470bf627e586e22c5f8984b8e3bfc5d 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -416,6 +416,8 @@ where // Final destination is a 32-byte account on AssetHub Destination::AccountId32 { id } => Ok(Location::new(0, [AccountId32 { network: None, id }])), + // Forwarding to a destination parachain is not allowed for PNA and is validated on the + // Ethereum side. https://github.com/Snowfork/snowbridge/blob/e87ddb2215b513455c844463a25323bb9c01ff36/contracts/src/Assets.sol#L216-L224 _ => Err(ConvertMessageError::InvalidDestination), }?; diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs index efc1ef56f30479f2f238ebccbdae0303bc99e07c..3b5dbdb77c89227d053548375b0c30b47792cea9 100644 --- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs @@ -207,9 +207,9 @@ where fn convert(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { let result = match self.peek() { - Ok(ReserveAssetDeposited { .. }) => self.send_native_tokens_message(), + Ok(ReserveAssetDeposited { .. }) => self.make_mint_foreign_token_command(), // Get withdraw/deposit and make native tokens create message. - Ok(WithdrawAsset { .. }) => self.send_tokens_message(), + Ok(WithdrawAsset { .. }) => self.make_unlock_native_token_command(), Err(e) => Err(e), _ => return Err(XcmConverterError::UnexpectedInstruction), }?; @@ -222,7 +222,9 @@ where Ok(result) } - fn send_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + fn make_unlock_native_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { use XcmConverterError::*; // Get the reserve assets from WithdrawAsset. @@ -271,7 +273,12 @@ where ensure!(reserve_assets.len() == 1, TooManyAssets); let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - // If there was a fee specified verify it. + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. if let Some(fee_asset) = fee_asset { // The fee asset must be the same as the reserve asset. if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { @@ -328,7 +335,9 @@ where /// # BuyExecution /// # DepositAsset /// # SetTopic - fn send_native_tokens_message(&mut self) -> Result<(Command, [u8; 32]), XcmConverterError> { + fn make_mint_foreign_token_command( + &mut self, + ) -> Result<(Command, [u8; 32]), XcmConverterError> { use XcmConverterError::*; // Get the reserve assets. @@ -377,7 +386,12 @@ where ensure!(reserve_assets.len() == 1, TooManyAssets); let reserve_asset = reserve_assets.get(0).ok_or(AssetResolutionFailed)?; - // If there was a fee specified verify it. + // Fees are collected on AH, up front and directly from the user, to cover the + // complete cost of the transfer. Any additional fees provided in the XCM program are + // refunded to the beneficiary. We only validate the fee here if its provided to make sure + // the XCM program is well formed. Another way to think about this from an XCM perspective + // would be that the user offered to pay X amount in fees, but we charge 0 of that X amount + // (no fee) and refund X to the user. if let Some(fee_asset) = fee_asset { // The fee asset must be the same as the reserve asset. if fee_asset.id != reserve_asset.id || fee_asset.fun > reserve_asset.fun { diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo-bridge.zndsl similarity index 100% rename from bridges/testing/environments/rococo-westend/rococo.zndsl rename to bridges/testing/environments/rococo-westend/rococo-bridge.zndsl diff --git a/bridges/testing/environments/rococo-westend/rococo-start.zndsl b/bridges/testing/environments/rococo-westend/rococo-start.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..8c719b010df684aac128b2d64bca887ddc2d505e --- /dev/null +++ b/bridges/testing/environments/rococo-westend/rococo-start.zndsl @@ -0,0 +1,8 @@ +Description: Check if the Rococo parachains started producing blocks reliably +Network: ./bridge_hub_westend_local_network.toml +Creds: config + +# ensure that initialization has completed +asset-hub-rococo-collator1: reports block height is at least 10 within 180 seconds +bridge-hub-rococo-collator1: reports block height is at least 10 within 180 seconds + diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index a0ab00be14448f92bf31f2eea2eba91c2ac5240e..83b3b0720bb823c97bf1fb55e833f2623837f5c2 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -35,9 +35,11 @@ start_zombienet $TEST_DIR $westend_def westend_dir westend_pid echo if [[ $init -eq 1 ]]; then + run_zndsl ${BASH_SOURCE%/*}/rococo-start.zndsl $rococo_dir + run_zndsl ${BASH_SOURCE%/*}/westend-start.zndsl $westend_dir + rococo_init_log=$logs_dir/rococo-init.log echo -e "Setting up the rococo side of the bridge. Logs available at: $rococo_init_log\n" - westend_init_log=$logs_dir/westend-init.log echo -e "Setting up the westend side of the bridge. Logs available at: $westend_init_log\n" @@ -47,7 +49,6 @@ if [[ $init -eq 1 ]]; then westend_init_pid=$! wait -n $rococo_init_pid $westend_init_pid - $helper_script init-bridge-hub-rococo-local >> $rococo_init_log 2>&1 & rococo_init_pid=$! $helper_script init-bridge-hub-westend-local >> $westend_init_log 2>&1 & diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index 9c57e4a6ab6e198e10e8c233c9c9e64a3499a0f4..150fce035071d9bc1d24699b97ac31df357fbf2b 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -29,8 +29,8 @@ messages_relayer_log=$logs_dir/relayer_messages.log echo -e "Starting rococo-westend messages relayer. Logs available at: $messages_relayer_log\n" start_background_process "$helper_script run-messages-relay" $messages_relayer_log messages_relayer_pid -run_zndsl ${BASH_SOURCE%/*}/rococo.zndsl $rococo_dir -run_zndsl ${BASH_SOURCE%/*}/westend.zndsl $westend_dir +run_zndsl ${BASH_SOURCE%/*}/rococo-bridge.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/westend-bridge.zndsl $westend_dir eval $__finality_relayer_pid="'$finality_relayer_pid'" eval $__parachains_relayer_pid="'$parachains_relayer_pid'" diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend-bridge.zndsl similarity index 100% rename from bridges/testing/environments/rococo-westend/westend.zndsl rename to bridges/testing/environments/rococo-westend/westend-bridge.zndsl diff --git a/bridges/testing/environments/rococo-westend/westend-start.zndsl b/bridges/testing/environments/rococo-westend/westend-start.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..fe587322edb6bda8d4a02cbb067c190bce495fec --- /dev/null +++ b/bridges/testing/environments/rococo-westend/westend-start.zndsl @@ -0,0 +1,8 @@ +Description: Check if the Westend parachains started producing blocks reliably +Network: ./bridge_hub_westend_local_network.toml +Creds: config + +# ensure that initialization has completed +asset-hub-westend-collator1: reports block height is at least 10 within 180 seconds +bridge-hub-westend-collator1: reports block height is at least 10 within 180 seconds + diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index c4c8440e5187dde760b686aae877403cdd00d579..2c531c39accd6058842190f65f2454bdb0a6b003 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -33,7 +33,7 @@ use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; use sp_trie::{MemoryDB, ProofSizeProvider}; use trie_recorder::SizeOnlyRecorderProvider; @@ -96,7 +96,7 @@ pub fn validate_block< ) -> ValidationResult where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); @@ -240,16 +240,13 @@ fn extract_parachain_inherent_data( ) -> &ParachainInherentData where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { block .extrinsics() .iter() // Inherents are at the front of the block and are unsigned. - // - // If `is_signed` is returning `None`, we keep it safe and assume that it is "signed". - // We are searching for unsigned transactions anyway. - .take_while(|e| !e.is_signed().unwrap_or(true)) + .take_while(|e| e.is_bare()) .filter_map(|e| e.call().is_sub_type()) .find_map(|c| match c { crate::Call::set_validation_data { data: validation_data } => Some(validation_data), diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 68926b04bfe6d8c86396ede5dce0dbd8d8b4396b..3ff5ed388a39d93cd088cca879b8b24914184497 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -451,3 +451,71 @@ macro_rules! test_dry_run_transfer_across_pk_bridge { } }; } + +#[macro_export] +macro_rules! test_xcm_fee_querying_apis_work_for_asset_hub { + ( $asset_hub:ty ) => { + $crate::macros::paste::paste! { + use emulated_integration_tests_common::USDT_ID; + use xcm_runtime_apis::fees::{Error as XcmPaymentApiError, runtime_decl_for_xcm_payment_api::XcmPaymentApiV1}; + + $asset_hub::execute_with(|| { + // Setup a pool between USDT and WND. + type RuntimeOrigin = <$asset_hub as Chain>::RuntimeOrigin; + type Assets = <$asset_hub as [<$asset_hub Pallet>]>::Assets; + type AssetConversion = <$asset_hub as [<$asset_hub Pallet>]>::AssetConversion; + let wnd = Location::new(1, []); + let usdt = Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(USDT_ID.into())]); + let sender = [<$asset_hub Sender>]::get(); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(sender.clone()), + Box::new(wnd.clone()), + Box::new(usdt.clone()), + )); + + type Runtime = <$asset_hub as Chain>::Runtime; + let acceptable_payment_assets = Runtime::query_acceptable_payment_assets(4).unwrap(); + assert_eq!(acceptable_payment_assets, vec![ + VersionedAssetId::from(AssetId(wnd.clone())), + VersionedAssetId::from(AssetId(usdt.clone())), + ]); + + let program = Xcm::<()>::builder() + .withdraw_asset((Parent, 100u128)) + .buy_execution((Parent, 10u128), Unlimited) + .deposit_asset(All, [0u8; 32]) + .build(); + let weight = Runtime::query_xcm_weight(VersionedXcm::from(program)).unwrap(); + let fee_in_wnd = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(wnd.clone()))).unwrap(); + // Assets not in a pool don't work. + assert!(Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(Location::new(0, [PalletInstance(ASSETS_PALLET_ID), GeneralIndex(1)])))).is_err()); + let fee_in_usdt_fail = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(usdt.clone()))); + // Weight to asset fee fails because there's not enough asset in the pool. + // We just created it, there's none. + assert_eq!(fee_in_usdt_fail, Err(XcmPaymentApiError::AssetNotFound)); + // We add some. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(sender.clone()), + USDT_ID.into(), + sender.clone().into(), + 5_000_000_000_000 + )); + // We make 1 WND = 4 USDT. + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(sender.clone()), + Box::new(wnd), + Box::new(usdt.clone()), + 1_000_000_000_000, + 4_000_000_000_000, + 0, + 0, + sender.into() + )); + // Now it works. + let fee_in_usdt = Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(usdt))); + assert_ok!(fee_in_usdt); + assert!(fee_in_usdt.unwrap() > fee_in_wnd); + }); + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 12f440fdefee130bd7a0fe030c49991ea8ce5458..1184b5fd7c4a4ba7909ca4c349777d3fbaaf0108 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -37,7 +37,7 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, - test_relay_is_trusted_teleporter, + test_relay_is_trusted_teleporter, test_xcm_fee_querying_apis_work_for_asset_hub, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index ac0c90ba198d0542f55412a7763efc6c2f0ee199..d9b32eaa357edc042497a39dc90d13e3ed21508a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -386,3 +386,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { ); }); } + +#[test] +fn xcm_fee_querying_apis_work() { + test_xcm_fee_querying_apis_work_for_asset_hub!(AssetHubRococo); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 906768b19b79f20810ab19c1854aecea23280475..179a44e14aa1c4818ac39360a7209f60feee7afd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -34,7 +34,7 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, - test_relay_is_trusted_teleporter, + test_relay_is_trusted_teleporter, test_xcm_fee_querying_apis_work_for_asset_hub, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 1a2821452155db77eca049f490a7ce6cacd873d0..4535fd431990d6e2d1eb03f945ad003933aeb502 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -389,3 +389,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { ); }); } + +#[test] +fn xcm_fee_querying_apis_work() { + test_xcm_fee_querying_apis_work_for_asset_hub!(AssetHubWestend); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs index 584bce8f1df76e3298bacf731986b84c9b55ff64..9915b1753ef6d2af0e5d4c49fa7b166ee68c959b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-rococo/src/tests/coretime_interface.rs @@ -46,7 +46,7 @@ fn transact_hardcoded_weights_are_sane() { // Create and populate schedule with the worst case assignment on this core. let mut schedule = Vec::new(); - for i in 0..27 { + for i in 0..80 { schedule.push(ScheduleItem { mask: CoreMask::void().set(i), assignment: CoreAssignment::Task(2000 + i), diff --git a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs index f61bc4285a0cf24a2184d301da64ea48670961f0..00530f80b958d25f3af36ea17a63059d239831e7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/coretime/coretime-westend/src/tests/coretime_interface.rs @@ -46,7 +46,7 @@ fn transact_hardcoded_weights_are_sane() { // Create and populate schedule with the worst case assignment on this core. let mut schedule = Vec::new(); - for i in 0..27 { + for i in 0..80 { schedule.push(ScheduleItem { mask: CoreMask::void().set(i), assignment: CoreAssignment::Task(2000 + i), diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 00f2cf8f636f2c5e4e5c62a1377b65e2ce61f7e5..42adaba7a27c50f1e3157c6b9a70d662e627ae90 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -117,6 +117,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", @@ -128,6 +129,7 @@ runtime-benchmarks = [ "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs index dc98d00f8f63a2d02a058869e750173322016ce3..d15dd971a8194812f6120858d35b3e0261088d34 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/genesis_config_presets.rs @@ -18,6 +18,7 @@ use crate::*; use alloc::{vec, vec::Vec}; use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; use hex_literal::hex; use parachains_common::{AccountId, AuraId}; use sp_core::crypto::UncheckedInto; @@ -33,15 +34,14 @@ fn asset_hub_rococo_genesis( endowment: Balance, id: ParaId, ) -> serde_json::Value { - let config = RuntimeGenesisConfig { + build_struct_json_patch!(RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(), }, - parachain_info: ParachainInfoConfig { parachain_id: id, ..Default::default() }, + parachain_info: ParachainInfoConfig { parachain_id: id }, collator_selection: CollatorSelectionConfig { invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), candidacy_bond: ASSET_HUB_ROCOCO_ED * 16, - ..Default::default() }, session: SessionConfig { keys: invulnerables @@ -54,16 +54,9 @@ fn asset_hub_rococo_genesis( ) }) .collect(), - ..Default::default() }, - polkadot_xcm: PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - serde_json::to_value(config).expect("Could not build genesis config.") + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + }) } /// Encapsulates names of predefined presets. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 67f810e1eb682566d92c64401cc054c9d253298e..420fa67f41f3824bbf3397dec7fd0dd972237831 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -82,7 +82,7 @@ use parachains_common::{ use sp_runtime::{Perbill, RuntimeDebug}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm_config::{ - ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, GovernanceLocation, + ForeignAssetsConvertedConcreteId, GovernanceLocation, LocationToAccountId, PoolAssetsConvertedConcreteId, TokenLocation, TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, }; @@ -103,7 +103,7 @@ use xcm::latest::prelude::{ }; use xcm::{ latest::prelude::{AssetId, BodyId}, - VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -123,7 +123,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_016_001, + spec_version: 1_016_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -175,6 +175,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -229,6 +230,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -422,7 +424,7 @@ impl pallet_assets::Config for Runtime { FromNetwork, xcm_config::bridging::to_westend::WestendOrEthereumAssetFromAssetHubWestend, ), - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, AccountId, xcm::v4::Location, >; @@ -818,6 +820,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, ResolveAssetTo, >; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -998,8 +1003,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1013,7 +1018,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( InitStorageVersions, @@ -1098,14 +1103,81 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (Location, Location) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = Location::new( + 1, + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], + ); + (asset_id.clone(), asset_id) + } + + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.clone().into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); + + let token_native = alloc::boxed::Box::new(TokenLocation::get()); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1116,6 +1188,7 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1339,31 +1412,41 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - acceptable_assets.extend( - pallet_asset_conversion::Pools::::iter_keys().filter_map( - |(asset_1, asset_2)| { - if asset_1 == native_token { - Some(asset_2.clone().into()) - } else if asset_2 == native_token { - Some(asset_1.clone().into()) - } else { - None - } - }, - ), - ); + let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v4::Location + >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); + acceptable_assets.extend(assets_in_pool_with_native); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let native_asset = xcm_config::TokenLocation::get(); + let fee_in_native = WeightToFee::weight_to_fee(&weight); match asset.try_as::() { - Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + Ok(asset_id) if asset_id.0 == native_asset => { // for native token - Ok(WeightToFee::weight_to_fee(&weight)) + Ok(fee_in_native) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); - Err(XcmPaymentApiError::AssetNotFound) + let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v4::Location + >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; + if assets_in_pool_with_this_asset + .into_iter() + .map(|asset_id| asset_id.0) + .any(|location| location == native_asset) { + pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_id.clone().0, + native_asset, + fee_in_native, + true, // We include the fee. + ).ok_or(XcmPaymentApiError::AssetNotFound) + } else { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + } }, Err(_) => { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); @@ -1443,6 +1526,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1477,6 +1561,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1798,6 +1883,15 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..182410f20fffe19f74f887fa7858c7072d855172 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index f20790cde39ced67a7d51c8678441ad423ee888f..33f111009ed0fb3e4be1e5277ed3d3e960a0dd09 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -19,8 +19,10 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; pub mod pallet_asset_conversion_ops; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -33,6 +35,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a639b368af2248bdaf1efa7538d58a935dbe2e0 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 209_000_000 picoseconds. + Weight::from_parts(212_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_228_000_000 picoseconds. + Weight::from_parts(1_268_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..035f9a6dbe5167cb651736f705b817d4ba9027c4 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --chain=asset-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 32fbfb6d0199f92db2564c332ae160235b02328e..637c5900f7da42030e552db9192b16a2dad1deaa 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -512,14 +512,6 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, - GlobalConsensusEthereumConvertsFor, - GlobalConsensusParachainConvertsFor, -); - /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index 6e10f9168990114d6518ebeaf832c3a57801b308..814323135325f4efab62b205545a76dafa577f1f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -21,8 +21,8 @@ use asset_hub_rococo_runtime::{ xcm_config, xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, - ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, + TokenLocation, TrustBackedAssetsPalletLocation, XcmConfig, }, AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, @@ -941,7 +941,7 @@ asset_test_utils::include_teleports_for_foreign_assets_works!( CheckingAccount, WeightToFee, ParachainSystem, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, collator_session_keys(), slot_durations(), @@ -1015,7 +1015,7 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p Runtime, XcmConfig, WeightToFee, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, Location, WithLatestLocationConverter, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 72a125cee2ad51feecff841fb9789fb2d0aae77c..d5eaa43ab83449254962b295d2b6a4a413257582 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -44,6 +44,7 @@ pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-uniques = { workspace = true } +pallet-revive = { workspace = true } pallet-utility = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } @@ -118,6 +119,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-asset-conversion-ops/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "pallet-assets-freezer/runtime-benchmarks", "pallet-assets/runtime-benchmarks", @@ -128,8 +130,10 @@ runtime-benchmarks = [ "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", + "pallet-revive/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", @@ -167,6 +171,7 @@ try-runtime = [ "pallet-nft-fractionalization/try-runtime", "pallet-nfts/try-runtime", "pallet-proxy/try-runtime", + "pallet-revive/try-runtime", "pallet-session/try-runtime", "pallet-state-trie-migration/try-runtime", "pallet-timestamp/try-runtime", @@ -219,6 +224,7 @@ std = [ "pallet-nfts-runtime-api/std", "pallet-nfts/std", "pallet-proxy/std", + "pallet-revive/std", "pallet-session/std", "pallet-state-trie-migration/std", "pallet-timestamp/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index aaeb3919987171d462bbcc5c8a6a0f2299186c3c..066a1c2bdfdcccf06b4528e51fc6738e80f7b4dd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -47,7 +47,7 @@ use frame_support::{ fungible, fungibles, tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, InstanceFilter, - TransformOrigin, + Nothing, TransformOrigin, }, weights::{ConstantMultiplier, Weight, WeightToFee as _}, BoundedVec, PalletId, @@ -58,13 +58,14 @@ use frame_system::{ }; use pallet_asset_conversion_tx_payment::SwapAssetAdapter; use pallet_nfts::{DestroyWitness, PalletFeatures}; +use pallet_revive::evm::runtime::EthExtra; use parachains_common::{ impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, @@ -78,9 +79,9 @@ use testnet_parachains_constants::westend::{ consensus::*, currency::*, fee::WeightToFee, snowbridge::EthereumNetwork, time::*, }; use xcm_config::{ - ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, - PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, - TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, + ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId, + TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, WestendLocation, + XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -93,7 +94,7 @@ use assets_common::{ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use xcm::{ latest::prelude::AssetId, - prelude::{VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, + prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, }; #[cfg(feature = "runtime-benchmarks")] @@ -123,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_016_001, + spec_version: 1_016_002, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, @@ -175,6 +176,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -229,6 +231,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -420,7 +423,7 @@ impl pallet_assets::Config for Runtime { FromNetwork, xcm_config::bridging::to_rococo::RococoAssetFromAssetHubRococo, ), - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, AccountId, xcm::v4::Location, >; @@ -811,6 +814,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, ResolveAssetTo, >; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } parameter_types! { @@ -929,6 +935,52 @@ impl pallet_xcm_bridge_hub_router::Config for Runtime type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; } +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30); +} + +type EventRecord = frame_system::EventRecord< + ::RuntimeEvent, + ::Hash, +>; + +impl pallet_revive::Config for Runtime { + type Time = Timestamp; + type Currency = Balances; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type CallFilter = Nothing; + type DepositPerItem = DepositPerItem; + type DepositPerByte = DepositPerByte; + type WeightPrice = pallet_transaction_payment::Pallet; + type WeightInfo = pallet_revive::weights::SubstrateWeight; + type ChainExtension = (); + type AddressMapper = pallet_revive::AccountId32Mapper; + type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; + type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; + type UnsafeUnstableInterface = ConstBool; + type UploadOrigin = EnsureSigned; + type InstantiateOrigin = EnsureSigned; + type RuntimeHoldReason = RuntimeHoldReason; + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Debug = (); + type Xcm = pallet_xcm::Pallet; + type ChainId = ConstU64<420_420_421>; +} + +impl TryFrom for pallet_revive::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Revive(call) => Ok(call), + _ => Err(()), + } + } +} + // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime @@ -977,6 +1029,7 @@ construct_runtime!( AssetsFreezer: pallet_assets_freezer:: = 57, ForeignAssetsFreezer: pallet_assets_freezer:: = 58, PoolAssetsFreezer: pallet_assets_freezer:: = 59, + Revive: pallet_revive = 60, StateTrieMigration: pallet_state_trie_migration = 70, @@ -994,8 +1047,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1007,9 +1060,34 @@ pub type SignedExtra = ( cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, frame_metadata_hash_extension::CheckMetadataHash, ); + +/// Default extensions applied to Ethereum transactions. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct EthExtraImpl; + +impl EthExtra for EthExtraImpl { + type Config = Runtime; + type Extension = TxExtension; + + fn get_eth_extension(nonce: u32, tip: Balance) -> Self::Extension { + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::from(generic::Era::Immortal), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), + ) + } +} + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + pallet_revive::evm::runtime::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -1148,14 +1226,86 @@ pub type Executive = frame_executive::Executive< Migrations, >; +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter( + seed: u32, + ) -> (cumulus_primitives_core::Location, cumulus_primitives_core::Location) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = cumulus_primitives_core::Location::new( + 1, + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], + ); + (asset_id.clone(), asset_id) + } + + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.clone().into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); + + let token_native = alloc::boxed::Box::new(cumulus_primitives_core::Location::new( + 1, + cumulus_primitives_core::Junctions::Here, + )); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 2).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_assets, Local] [pallet_assets, Foreign] [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetTxPayment] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -1166,11 +1316,13 @@ mod benches { [pallet_uniques, Uniques] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] [pallet_asset_conversion_ops, AssetConversionMigration] + [pallet_revive, Revive] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. @@ -1372,31 +1524,42 @@ impl_runtime_apis! { // We accept the native token to pay fees. let mut acceptable_assets = vec![AssetId(native_token.clone())]; // We also accept all assets in a pool with the native token. - acceptable_assets.extend( - pallet_asset_conversion::Pools::::iter_keys().filter_map( - |(asset_1, asset_2)| { - if asset_1 == native_token { - Some(asset_2.clone().into()) - } else if asset_2 == native_token { - Some(asset_1.clone().into()) - } else { - None - } - }, - ), - ); + let assets_in_pool_with_native = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v4::Location + >(&native_token).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?.into_iter(); + acceptable_assets.extend(assets_in_pool_with_native); PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) } fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let native_asset = xcm_config::WestendLocation::get(); + let fee_in_native = WeightToFee::weight_to_fee(&weight); match asset.try_as::() { - Ok(asset_id) if asset_id.0 == xcm_config::WestendLocation::get() => { - // for native token - Ok(WeightToFee::weight_to_fee(&weight)) + Ok(asset_id) if asset_id.0 == native_asset => { + // for native asset + Ok(fee_in_native) }, Ok(asset_id) => { - log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); - Err(XcmPaymentApiError::AssetNotFound) + // We recognize assets in a pool with the native one. + let assets_in_pool_with_this_asset: Vec<_> = assets_common::get_assets_in_pool_with::< + Runtime, + xcm::v4::Location + >(&asset_id.0).map_err(|()| XcmPaymentApiError::VersionedConversionFailed)?; + if assets_in_pool_with_this_asset + .into_iter() + .map(|asset_id| asset_id.0) + .any(|location| location == native_asset) { + pallet_asset_conversion::Pallet::::quote_price_tokens_for_exact_tokens( + asset_id.clone().0, + native_asset, + fee_in_native, + true, // We include the fee. + ).ok_or(XcmPaymentApiError::AssetNotFound) + } else { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + } }, Err(_) => { log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); @@ -1539,6 +1702,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; @@ -1573,6 +1737,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1894,6 +2059,118 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + + impl pallet_revive::ReviveApi for Runtime + { + fn eth_transact( + from: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> pallet_revive::EthContractResult + { + use pallet_revive::AddressMapper; + let blockweights = ::BlockWeights::get(); + let origin = ::AddressMapper::to_account_id(&from); + + let encoded_size = |pallet_call| { + let call = RuntimeCall::Revive(pallet_call); + let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }; + + Revive::bare_eth_transact( + origin, + dest, + value, + input, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), + encoded_size, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + + fn call( + origin: AccountId, + dest: H160, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + input_data: Vec, + ) -> pallet_revive::ContractResult { + let blockweights= ::BlockWeights::get(); + Revive::bare_call( + RuntimeOrigin::signed(origin), + dest, + value, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), + input_data, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + code: pallet_revive::Code, + data: Vec, + salt: Option<[u8; 32]>, + ) -> pallet_revive::ContractResult + { + let blockweights= ::BlockWeights::get(); + Revive::bare_instantiate( + RuntimeOrigin::signed(origin), + value, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), + code, + data, + salt, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> pallet_revive::CodeUploadResult + { + Revive::bare_upload_code( + RuntimeOrigin::signed(origin), + code, + storage_deposit_limit.unwrap_or(u128::MAX), + ) + } + + fn get_storage( + address: H160, + key: [u8; 32], + ) -> pallet_revive::GetStorageResult { + Revive::get_storage( + address, + key + ) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8dd9763c28261c19928d05719ccd3a4f5492416 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_206_000 picoseconds. + Weight::from_parts(6_212_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_851_000 picoseconds. + Weight::from_parts(8_847_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 631_000 picoseconds. + Weight::from_parts(3_086_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_446_000 picoseconds. + Weight::from_parts(5_911_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 481_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_927_000 picoseconds. + Weight::from_parts(6_613_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 4eebb1f8d78678b9f6a037e6fd3e4c5decfd8513..b0f986768f408fac1a31877f9f86839d4fffb921 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -18,8 +18,10 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_conversion; pub mod pallet_asset_conversion_ops; +pub mod pallet_asset_conversion_tx_payment; pub mod pallet_assets_foreign; pub mod pallet_assets_local; pub mod pallet_assets_pool; @@ -32,6 +34,7 @@ pub mod pallet_nfts; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..8fe302630fb9510d0e1c01ca9de37a1bd0be3a4f --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 214_000_000 picoseconds. + Weight::from_parts(219_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_211_000_000 picoseconds. + Weight::from_parts(1_243_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..b4c78a78b489073648156133730bd39cd3c68497 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --chain=asset-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 40_847_000 picoseconds. + Weight::from_parts(49_674_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index cfd9fd2fd463e37fd6a7dd2fdf08a397db26eff1..b554d00508be18f107b2e5c7c2c10b2dbd9e5b4c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -535,14 +535,6 @@ impl cumulus_pallet_xcm::Config for Runtime { type XcmExecutor = XcmExecutor; } -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, - GlobalConsensusEthereumConvertsFor, - GlobalConsensusParachainConvertsFor, -); - /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index ff84bdea69f42aa8868cd9d700a6448d51d9b574..6a48b19ce9468f8f9de9dab54e7be9938ad4f714 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -21,9 +21,8 @@ use asset_hub_westend_runtime::{ xcm_config, xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, - ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, WestendLocation, - XcmConfig, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, LocationToAccountId, StakingPot, + TrustBackedAssetsPalletLocation, WestendLocation, XcmConfig, }, AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, @@ -966,7 +965,7 @@ asset_test_utils::include_teleports_for_foreign_assets_works!( CheckingAccount, WeightToFee, ParachainSystem, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, collator_session_keys(), slot_durations(), @@ -1044,7 +1043,7 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p Runtime, XcmConfig, WeightToFee, - ForeignCreatorsSovereignAccountOf, + LocationToAccountId, ForeignAssetsInstance, xcm::v4::Location, WithLatestLocationConverter, diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index deda5fa4ab9c43d10101032eda49d975a85614df..26046e5974b5b6c16cc19e8726cf9aa5c89d978b 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -26,6 +26,9 @@ pub mod runtime_api; extern crate alloc; use crate::matching::{LocalLocationPattern, ParentLocation}; +use alloc::vec::Vec; +use codec::{Decode, EncodeLike}; +use core::cmp::PartialEq; use frame_support::traits::{Equals, EverythingBut}; use parachains_common::{AssetIdForTrustBackedAssets, CollectionId, ItemId}; use sp_runtime::traits::TryConvertInto; @@ -134,6 +137,36 @@ pub type PoolAssetsConvertedConcreteId = TryConvertInto, >; +/// Returns an iterator of all assets in a pool with `asset`. +/// +/// Should only be used in runtime APIs since it iterates over the whole +/// `pallet_asset_conversion::Pools` map. +/// +/// It takes in any version of an XCM Location but always returns the latest one. +/// This is to allow some margin of migrating the pools when updating the XCM version. +/// +/// An error of type `()` is returned if the version conversion fails for XCM locations. +/// This error should be mapped by the caller to a more descriptive one. +pub fn get_assets_in_pool_with< + Runtime: pallet_asset_conversion::Config, + L: TryInto + Clone + Decode + EncodeLike + PartialEq, +>( + asset: &L, +) -> Result, ()> { + pallet_asset_conversion::Pools::::iter_keys() + .filter_map(|(asset_1, asset_2)| { + if asset_1 == *asset { + Some(asset_2) + } else if asset_2 == *asset { + Some(asset_1) + } else { + None + } + }) + .map(|location| location.try_into().map_err(|_| ()).map(AssetId)) + .collect::, _>>() +} + #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index fd5782d68e434c23a4868e0ca39dfd35e3000bba..4af8a9f4385047ef547d4cb58368e0b9a57d9cf6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -241,6 +241,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index c971fa59c68d7d4a3b6558274d61ad631246d1f6..c226ed9c4fa06ca6e80702e604f2194319452cff 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -38,7 +38,7 @@ use frame_support::{ use frame_system::{EnsureNever, EnsureRoot}; use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; use pallet_xcm_bridge_hub::XcmAsPlainPayload; use polkadot_parachain_primitives::primitives::Sibling; @@ -92,9 +92,9 @@ type FromRococoBulletinMessageBlobDispatcher = BridgeBlobDispatcher< BridgeRococoToRococoBulletinMessagesPalletInstance, >; -/// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin -/// chain. -pub type OnBridgeHubRococoRefundRococoBulletinMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Rococo +/// Bulletin chain. +pub type OnBridgeHubRococoRefundRococoBulletinMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubRococoRefundRococoBulletinMessages, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 8fe045723107c58974c696ad9edf18a424b090b4..29ea4e05f29ee396df1aa351b796113e5b009468 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -37,7 +37,7 @@ use frame_support::{parameter_types, traits::PalletInfoAccess}; use frame_system::{EnsureNever, EnsureRoot}; use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; use parachains_common::xcm_config::{AllSiblingSystemParachains, RelayOrOtherSystemParachains}; use polkadot_parachain_primitives::primitives::Sibling; @@ -84,8 +84,9 @@ pub type ToWestendBridgeHubMessagesDeliveryProof = type FromWestendMessageBlobDispatcher = BridgeBlobDispatcher; -/// Signed extension that refunds relayers that are delivering messages from the Westend parachain. -pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Westend +/// parachain. +pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubRococoRefundBridgeHubWestendMessages, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e3435f35f1788a5bfc49a9ceb1bc4271807f7eff..f63e1f8fcf65bd95c2ac6dcb5f37bf1aae17aee3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -120,8 +120,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -132,13 +132,13 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, frame_metadata_hash_extension::CheckMetadataHash, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -298,6 +298,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -358,6 +360,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -650,12 +653,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1037,6 +1042,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1069,6 +1075,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1521,6 +1528,15 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } #[cfg(test)] @@ -1529,16 +1545,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtension, Zero}, }; #[test] - fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + fn ensure_transaction_extension_definition_is_compatible_with_relay() { + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1551,13 +1567,13 @@ mod tests { ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); // for BridgeHubRococo { - let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( + let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1568,8 +1584,8 @@ mod tests { ); assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bhr_indirect_payload.additional_signed().unwrap().encode() + TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1, + sp_runtime::traits::TransactionExtension::::implicit(&bhr_indirect_payload).unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..64eef1b4f7405abe40327241fe538ed1373edeab --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(5_842_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_771_000 picoseconds. + Weight::from_parts(8_857_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 732_000 picoseconds. + Weight::from_parts(2_875_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_627_000 picoseconds. + Weight::from_parts(6_322_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_455_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_916_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_798_000 picoseconds. + Weight::from_parts(6_272_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 517b3eb69fc8020f0b22b182227eb3a8528d4152..74796e626a2ec24b9eec32e5a336819fdb6bc507 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages_rococo_to_rococo_bulletin; @@ -38,6 +39,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..71d17e7259f72c023be3d75f37300c1a45c41d6f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --chain=bridge-hub-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 34_956_000 picoseconds. + Weight::from_parts(40_788_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 7a0f1462e7a7d150c1a4bb5c016bd2af410c9d55..8be2993c68f4f3d4a5034062dcf710c345b2ee5d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -21,7 +21,7 @@ use bridge_hub_rococo_runtime::{ bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - SignedExtra, UncheckedExtrinsic, + TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -170,7 +170,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -183,12 +183,12 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 20bd5d1291384d0ea6aee8b5adf8ba7c5aa42927..01674287fde19b6ea116aad19b136b14ad95a39e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,13 +18,11 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_common_config, bridge_to_bulletin_config, - bridge_to_ethereum_config::EthereumGatewayAddress, - bridge_to_westend_config, - xcm_config::{LocationToAccountId, RelayNetwork, TokenLocation, XcmConfig}, + bridge_common_config, bridge_to_bulletin_config, bridge_to_westend_config, + xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_hub_test_utils::SlotDurations; use codec::{Decode, Encode}; @@ -51,7 +49,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -64,12 +62,13 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( @@ -128,6 +127,9 @@ mod bridge_hub_westend_tests { BridgeGrandpaWestendInstance, BridgeParachainWestendInstance, DeliveryRewardInBalance, RelayersForLegacyLaneIdsMessagesInstance, }; + use bridge_hub_rococo_runtime::{ + bridge_to_ethereum_config::EthereumGatewayAddress, xcm_config::LocationToAccountId, + }; use bridge_hub_test_utils::test_cases::from_parachain; use bridge_to_westend_config::{ BridgeHubWestendLocation, WestendGlobalConsensusNetwork, @@ -498,7 +500,10 @@ mod bridge_hub_bulletin_tests { use super::*; use bp_messages::{HashedLaneId, LaneIdType}; use bridge_common_config::BridgeGrandpaRococoBulletinInstance; - use bridge_hub_rococo_runtime::bridge_common_config::RelayersForPermissionlessLanesInstance; + use bridge_hub_rococo_runtime::{ + bridge_common_config::RelayersForPermissionlessLanesInstance, + xcm_config::LocationToAccountId, + }; use bridge_hub_test_utils::test_cases::from_grandpa_chain; use bridge_to_bulletin_config::{ RococoBulletinGlobalConsensusNetwork, RococoBulletinGlobalConsensusNetworkLocation, @@ -818,9 +823,10 @@ fn location_conversion_works() { let expected = AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); - let got = LocationToAccountHelper::::convert_location( - tc.location.into(), - ) + let got = LocationToAccountHelper::< + AccountId, + bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + >::convert_location(tc.location.into()) .unwrap(); assert_eq!(got, expected, "{}", tc.description); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 471158d9645d717469215d5c09ed49129463906b..637e7c71064091c4f2736bf2dfd9c2b25e7201bd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -238,6 +238,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm-bridge-hub/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index e45654bc62bd23f32f03c342ee3eb37f4c8ad1a5..aca51b320e9beadf61ee8c42c1eca80b09aad7e7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -38,7 +38,7 @@ use frame_support::{ use frame_system::{EnsureNever, EnsureRoot}; use pallet_bridge_messages::LaneIdOf; use pallet_bridge_relayers::extension::{ - BridgeRelayersSignedExtension, WithMessagesExtensionConfig, + BridgeRelayersTransactionExtension, WithMessagesExtensionConfig, }; use parachains_common::xcm_config::{AllSiblingSystemParachains, RelayOrOtherSystemParachains}; use polkadot_parachain_primitives::primitives::Sibling; @@ -91,8 +91,9 @@ pub type ToRococoBridgeHubMessagesDeliveryProof = type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher; -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = BridgeRelayersSignedExtension< +/// Transaction extension that refunds relayers that are delivering messages from the Rococo +/// parachain. +pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = BridgeRelayersTransactionExtension< Runtime, WithMessagesExtensionConfig< StrOnBridgeHubWestendRefundBridgeHubRococoMessages, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index b73d8e5c67faef52bd0a8cf2f64e3d4ecc77fe92..1d7cd5de40eb45f3d901b7a4f3c434381a8b950c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -116,8 +116,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -128,13 +128,13 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, frame_metadata_hash_extension::CheckMetadataHash, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -283,6 +283,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -343,6 +345,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -589,12 +592,14 @@ bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -925,6 +930,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -954,6 +960,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1337,6 +1344,15 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { @@ -1350,16 +1366,16 @@ mod tests { use codec::Encode; use sp_runtime::{ generic::Era, - traits::{SignedExtension, Zero}, + traits::{TransactionExtension, Zero}, }; #[test] - fn ensure_signed_extension_definition_is_compatible_with_relay() { - use bp_polkadot_core::SuffixedCommonSignedExtensionExt; + fn ensure_transaction_extension_definition_is_compatible_with_relay() { + use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; sp_io::TestExternalities::default().execute_with(|| { frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( + let payload: TxExtension = ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), frame_system::CheckTxVersion::new(), @@ -1372,12 +1388,12 @@ mod tests { ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), ); { - let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params( VERSION.spec_version, VERSION.transaction_version, bp_runtime::TransactionEra::Immortal, @@ -1388,8 +1404,8 @@ mod tests { ); assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode()); assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bh_indirect_payload.additional_signed().unwrap().encode() + TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1, + sp_runtime::traits::TransactionExtension::::implicit(&bh_indirect_payload).unwrap().encode() ) } }); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..459b137d3b8419765362e4501db525db31135a46 --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(6_021_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_651_000 picoseconds. + Weight::from_parts(9_177_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(2_805_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_727_000 picoseconds. + Weight::from_parts(6_051_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(2_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 521_000 picoseconds. + Weight::from_parts(2_655_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_808_000 picoseconds. + Weight::from_parts(6_402_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index d60529f9a23727d1e80d6a6885a301f7e7082c9a..c1c5c337aca8900e7b6314b0e1aa56ed9a5fdec2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -27,6 +27,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_bridge_grandpa; pub mod pallet_bridge_messages; @@ -37,6 +38,7 @@ pub mod pallet_message_queue; pub mod pallet_multisig; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..92c53b918792257ddf932148fa10a4ba8016653f --- /dev/null +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/ +// --chain=bridge-hub-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `3` + // Estimated: `3593` + // Minimum execution time: 40_286_000 picoseconds. + Weight::from_parts(45_816_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs index c5f3871c07905e079e72dea86f521b65e1904196..1a1ce2a28ea35a62b3cd2d1fb90059be37c32b18 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs @@ -22,7 +22,7 @@ use bp_polkadot_core::Signature; use bridge_hub_westend_runtime::{ bridge_to_rococo_config, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, UncheckedExtrinsic, + RuntimeCall, RuntimeEvent, SessionKeys, TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -171,7 +171,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let extra: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -184,8 +184,8 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index c5a9b8c53a93e694e6efb1df7f03cc401b6e302c..e5b67353c0f3f1a39ecd8025447fcee83e589f9f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -29,7 +29,7 @@ use bridge_hub_westend_runtime::{ xcm_config::{LocationToAccountId, RelayNetwork, WestendLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, SessionKeys, - SignedExtra, TransactionPayment, UncheckedExtrinsic, + TransactionPayment, TxExtension, UncheckedExtrinsic, }; use bridge_to_rococo_config::{ BridgeGrandpaRococoInstance, BridgeHubRococoLocation, BridgeParachainRococoInstance, @@ -81,7 +81,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -94,12 +94,13 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + ) + .into(); + let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); - UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra) + UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext) } fn construct_and_apply_extrinsic( diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index 9c5d6269dc0e799ef6fe89fb18fa5389c5cceca0..24372f57ae7d829bfc4f60552cde13537c489a6e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -131,7 +131,7 @@ pub fn initialize_bridge_by_governance_works( // execute XCM with Transacts to `initialize bridge` as governance does assert_ok!(RuntimeHelper::::execute_as_governance( initialize_call.encode(), - initialize_call.get_dispatch_info().weight, + initialize_call.get_dispatch_info().call_weight, ) .ensure_complete()); @@ -172,7 +172,7 @@ pub fn change_bridge_grandpa_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, + set_operating_mode_call.get_dispatch_info().call_weight, ) .ensure_complete()); @@ -225,7 +225,7 @@ pub fn change_bridge_parachains_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, + set_operating_mode_call.get_dispatch_info().call_weight, ) .ensure_complete()); @@ -278,7 +278,7 @@ pub fn change_bridge_messages_pallet_mode_by_governance_works::execute_as_governance( set_operating_mode_call.encode(), - set_operating_mode_call.get_dispatch_info().weight, + set_operating_mode_call.get_dispatch_info().call_weight, ) .ensure_complete()); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 8a47af18524d9a6a77ccac51f7b02c365d228323..e03fc934ceaf62e6174a6f91c5040ba384549ec4 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -124,6 +124,7 @@ runtime-benchmarks = [ "pallet-scheduler/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 7f87d4701f9629deb9edaeeb438fc6cde5e3bef7..8cb2e42cb31bda1bf494762f306eda48760e0dcc 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -185,6 +185,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; @@ -239,6 +240,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -730,8 +732,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -743,7 +745,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations executed on runtime upgrade as a nested tuple of types implementing /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( @@ -774,6 +776,7 @@ pub type Executive = frame_executive::Executive< mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -781,6 +784,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] @@ -1044,6 +1048,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -1061,6 +1066,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -1165,6 +1171,15 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..f32f2730313570cad4a759e64b2156f30110f60b --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_497_000 picoseconds. + Weight::from_parts(5_961_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_240_000 picoseconds. + Weight::from_parts(8_175_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 671_000 picoseconds. + Weight::from_parts(3_005_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_426_000 picoseconds. + Weight::from_parts(6_131_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_715_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(2_635_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_958_000 picoseconds. + Weight::from_parts(6_753_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index a9a298e547edb49738b9a0612d04d4141301d4ba..00b3bd92d5ef9ce0081ca9e95e43bd35098c6aa0 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -18,6 +18,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_alliance; pub mod pallet_asset_rate; pub mod pallet_balances; @@ -39,6 +40,7 @@ pub mod pallet_salary_fellowship_salary; pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_xcm; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d077b89d56421a05cd64bec99ecdad445049528 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ +// --chain=collectives-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 39_815_000 picoseconds. + Weight::from_parts(46_067_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index dfa75b8d3cf3acf1bdf5e04adaf7c369c2ff3983..c98ca7ba3e74ec9ab9c493b7cf6dc0f4ef0b495c 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -161,6 +161,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 3377802e91de1ab6c9bccb7ec9408776d59c0114..2fc3fe4f31415cf6fd060626cf7eea169f4c8014 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -87,8 +87,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -101,7 +101,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -250,6 +250,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } parameter_types! { @@ -434,6 +435,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] @@ -757,6 +759,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -774,6 +777,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); @@ -879,6 +883,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 80417ea00362c69ca738ee02c982695fa6a946fb..a38b7400cfa3e792517019039dc2430a364e7d3d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -163,6 +163,7 @@ runtime-benchmarks = [ "pallet-proxy/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index 76ee06a87e8d82dea17914e4f03647da2a5f307a..3910a747e9bb2d6026ec32406a8ccb4b1a21b3af 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -218,6 +218,36 @@ impl CoretimeInterface for CoretimeAllocator { end_hint: Option>, ) { use crate::coretime::CoretimeProviderCalls::AssignCore; + + // The relay chain currently only allows `assign_core` to be called with a complete mask + // and only ever with increasing `begin`. The assignments must be truncated to avoid + // dropping that core's assignment completely. + + // This shadowing of `assignment` is temporary and can be removed when the relay can accept + // multiple messages to assign a single core. + let assignment = if assignment.len() > 28 { + let mut total_parts = 0u16; + // Account for missing parts with a new `Idle` assignment at the start as + // `assign_core` on the relay assumes this is sorted. We'll add the rest of the + // assignments and sum the parts in one pass, so this is just initialized to 0. + let mut assignment_truncated = vec![(CoreAssignment::Idle, 0)]; + // Truncate to first 27 non-idle assignments. + assignment_truncated.extend( + assignment + .into_iter() + .filter(|(a, _)| *a != CoreAssignment::Idle) + .take(27) + .inspect(|(_, parts)| total_parts += *parts) + .collect::>(), + ); + + // Set the parts of the `Idle` assignment we injected at the start of the vec above. + assignment_truncated[0].1 = 57_600u16.saturating_sub(total_parts); + assignment_truncated + } else { + assignment + }; + let assign_core_call = RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index f16dae04f217c819b28f488278833a53929a66a6..f2ccb9c552e3375cce1e578998cd22b6465f6467 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -98,8 +98,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -114,7 +114,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -208,6 +208,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -265,6 +267,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -1148,6 +1151,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..a4d09696a1a116ea5add5d9a1d41b86169bfe272 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs index 216f41a5a666fd0e3f1d9c2b2e05866a1196402a..24c4f50e6ab8b8c46ae1bfa177c8afada04a795e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -30,6 +31,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..29d48abab8956c79724b9544b99f06ef9303abaa --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --chain=coretime-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 25bf777047d0978e430f5cb4aacc97d3223261cf..149fa5d0b045c22da61def379263bad9d2182b53 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -161,6 +161,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 865ff68d4c659fd8c324595eadf790be655f9655..86769cb2da112d1c7b37c36306216f09b90f034d 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -224,8 +224,6 @@ impl CoretimeInterface for CoretimeAllocator { end_hint: Option>, ) { use crate::coretime::CoretimeProviderCalls::AssignCore; - let assign_core_call = - RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); // Weight for `assign_core` from westend benchmarks: // `ref_time` = 10177115 + (1 * 25000000) + (2 * 100000000) + (57600 * 13932) = 937660315 @@ -233,6 +231,38 @@ impl CoretimeInterface for CoretimeAllocator { // Add 5% to each component and round to 2 significant figures. let call_weight = Weight::from_parts(980_000_000, 3800); + // The relay chain currently only allows `assign_core` to be called with a complete mask + // and only ever with increasing `begin`. The assignments must be truncated to avoid + // dropping that core's assignment completely. + + // This shadowing of `assignment` is temporary and can be removed when the relay can accept + // multiple messages to assign a single core. + let assignment = if assignment.len() > 28 { + let mut total_parts = 0u16; + // Account for missing parts with a new `Idle` assignment at the start as + // `assign_core` on the relay assumes this is sorted. We'll add the rest of the + // assignments and sum the parts in one pass, so this is just initialized to 0. + let mut assignment_truncated = vec![(CoreAssignment::Idle, 0)]; + // Truncate to first 27 non-idle assignments. + assignment_truncated.extend( + assignment + .into_iter() + .filter(|(a, _)| *a != CoreAssignment::Idle) + .take(27) + .inspect(|(_, parts)| total_parts += *parts) + .collect::>(), + ); + + // Set the parts of the `Idle` assignment we injected at the start of the vec above. + assignment_truncated[0].1 = 57_600u16.saturating_sub(total_parts); + assignment_truncated + } else { + assignment + }; + + let assign_core_call = + RelayRuntimePallets::Coretime(AssignCore(core, begin, assignment, end_hint)); + let message = Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 187856b6c6122a3aa0e8c6411251ad2b8cc4c2da..2f944e79fe00e21899e66cd71dfcbf6a9aad0d8c 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -98,8 +98,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -114,7 +114,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -208,6 +208,8 @@ impl frame_system::Config for Runtime { type DbWeight = RocksDbWeight; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). @@ -266,6 +268,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -1141,6 +1144,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..d928b73613a3f110dc373ea91070377b644c4bbb --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs index 216f41a5a666fd0e3f1d9c2b2e05866a1196402a..24c4f50e6ab8b8c46ae1bfa177c8afada04a795e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs @@ -22,6 +22,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_broker; pub mod pallet_collator_selection; @@ -30,6 +31,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..f159f877afe77d7b7b98524d726e3867c6f80588 --- /dev/null +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --chain=coretime-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index bc76f174b50752a6b10cda99b3451f7374bed6fd..ad656cdbb83a89dd74f50092dd0208c2a0c1348f 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -290,8 +290,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( pallet_sudo::CheckOnlySudoAccount, frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, @@ -303,7 +303,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -318,6 +318,7 @@ mod benches { frame_benchmarking::define_benchmarks!( [cumulus_pallet_parachain_system, ParachainSystem] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_glutton, Glutton] [pallet_message_queue, MessageQueue] [pallet_timestamp, Timestamp] @@ -447,6 +448,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -463,6 +465,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime { fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..4fbbb8d6f781d97aeedfa7d84779190464ff70a2 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,130 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ +// --chain=glutton-westend-dev-1300 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_908_000 picoseconds. + Weight::from_parts(4_007_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_510_000 picoseconds. + Weight::from_parts(6_332_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(851_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_387_000 picoseconds. + Weight::from_parts(3_646_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 491_000 picoseconds. + Weight::from_parts(651_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 451_000 picoseconds. + Weight::from_parts(662_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_537_000 picoseconds. + Weight::from_parts(4_208_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index c969bb2985bd239781f15fe92acfe4893e14bf89..373b82639def6c234565a7fabceafa259679610c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -157,6 +157,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index aa04d01cf037267868afe0a5c934dd3afdf240e2..f9499f9d1ebe9799159f3314814ecd1ab2dcf579 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -91,8 +91,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The TransactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -106,7 +106,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -189,6 +189,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -241,6 +242,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -587,6 +589,7 @@ mod benches { [pallet_session, SessionBench::] [pallet_utility, Utility] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus @@ -1059,6 +1062,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..fb2b69e23e82b690e225517377aafd54fc72a240 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs index dce959e817becee6c2d568fed290b8ff96477652..58480231f06856d4af2ffa94c9c8e2a50a8a7bc7 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -28,6 +29,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..555fd5a32fa874f3f046cbf819a2a4e938b4926b --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/ +// --chain=people-rococo-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index 64e956d8b6b5a506e72f725e6edebc69ef7bdef2..efb67adba49d5f663acd0c663cafaff81d0c5361 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -157,6 +157,7 @@ runtime-benchmarks = [ "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index c8131c94f15b93f58c4d5cea4bf2e986bf658532..7e3cd1670fe506b3756feaa060cfd2db7b336fae 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -91,8 +91,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The transactionExtension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -106,7 +106,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. pub type Migrations = ( @@ -188,6 +188,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = ConstU32<16>; @@ -240,6 +241,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -1058,6 +1060,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..0a4b9e8e26812bfe82aaaccd3dc19221d7327e37 --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_637_000 picoseconds. + Weight::from_parts(6_382_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_841_000 picoseconds. + Weight::from_parts(8_776_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(2_705_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_316_000 picoseconds. + Weight::from_parts(5_771_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 511_000 picoseconds. + Weight::from_parts(2_575_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 501_000 picoseconds. + Weight::from_parts(2_595_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 3_687_000 picoseconds. + Weight::from_parts(6_192_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs index dce959e817becee6c2d568fed290b8ff96477652..58480231f06856d4af2ffa94c9c8e2a50a8a7bc7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_balances; pub mod pallet_collator_selection; pub mod pallet_identity; @@ -28,6 +29,7 @@ pub mod pallet_multisig; pub mod pallet_proxy; pub mod pallet_session; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_utility; pub mod pallet_xcm; pub mod paritydb_weights; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..30e4524e586e24247f604e82c0ea974d43ff060c --- /dev/null +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/ +// --chain=people-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 33_363_000 picoseconds. + Weight::from_parts(38_793_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 3b38eee244f10931bdd7703cf5a59f88a363efaa..36cf2bf4f83b504315146fa9e4a9091370c76e5f 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -475,7 +475,7 @@ impl< BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Xcm, - require_weight_at_most: call.get_dispatch_info().weight, + require_weight_at_most: call.get_dispatch_info().call_weight, call: call.encode().into(), }, ExpectTransactStatus(MaybeErrorCode::Success), diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index 96338b64558106d865aa251a651eecfda6b401ec..14c4fe5203847e8789a7432d323d51abfaae8a89 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -162,6 +162,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 05773846164f66c800612fc9037c4c596c7d1a55..136592c5602601072e843f9d28c0bf60c7bd5bd7 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -92,7 +92,7 @@ use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::{AssetId as AssetLocationId, BodyId}, - VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, + VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, }; use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, @@ -129,8 +129,8 @@ pub type BlockId = generic::BlockId; // Id used for identifying assets. pub type AssetId = u32; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -143,7 +143,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Migrations = ( pallet_balances::migration::MigrateToTrackInactive, @@ -435,6 +435,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } parameter_types! { @@ -745,6 +746,19 @@ impl pallet_collator_selection::Config for Runtime { type WeightInfo = (); } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_tx_payment::BenchmarkHelperTrait for AssetTxHelper { + fn create_asset_id_parameter(_id: u32) -> (u32, u32) { + unimplemented!("Penpal uses default weights"); + } + fn setup_balances_and_pool(_asset_id: u32, _account: AccountId) { + unimplemented!("Penpal uses default weights"); + } +} + impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -757,6 +771,9 @@ impl pallet_asset_tx_payment::Config for Runtime { >, AssetsToBlockAuthor, >; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetTxHelper; } impl pallet_sudo::Config for Runtime { @@ -807,6 +824,7 @@ construct_runtime!( mod benches { frame_benchmarking::define_benchmarks!( [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_message_queue, MessageQueue] [pallet_session, SessionBench::] @@ -1087,6 +1105,7 @@ impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; let mut list = Vec::::new(); @@ -1103,6 +1122,7 @@ impl_runtime_apis! { use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; impl frame_system_benchmarking::Config for Runtime {} use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -1143,6 +1163,15 @@ impl_runtime_apis! { vec![] } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 9c905c8762778447809acb3f7592585223aca865..bbc1185db0d87cd263888197809ade39c63100dc 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -126,6 +126,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 5abdc995c00dbfcc89af8ef0a68c9ceb0903815d..34bd45b6ef990b85ede27c56f9291dcdee59dc80 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -267,6 +267,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { @@ -655,8 +656,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -669,7 +670,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index 3dd482f4adafb236e7e12678323776dd7ec83bee..a690229f16959d464ffb385743e12dce0cbf1702 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -105,6 +105,7 @@ runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/polkadot-omni-node/lib/src/cli.rs b/cumulus/polkadot-omni-node/lib/src/cli.rs index 6ca328912bba1fc447252f753fb4f125a80e04a5..dc59c185d90998ec96b0811a3ef32e241e63933d 100644 --- a/cumulus/polkadot-omni-node/lib/src/cli.rs +++ b/cumulus/polkadot-omni-node/lib/src/cli.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! CLI options of the omni-node. See [`Command`]. + use crate::{ chain_spec::DiskChainSpecLoader, common::{ @@ -103,6 +105,7 @@ pub enum Subcommand { Benchmark(frame_benchmarking_cli::BenchmarkCmd), } +/// CLI Options shipped with `polkadot-omni-node`. #[derive(clap::Parser)] #[command( propagate_version = true, @@ -113,9 +116,11 @@ pub struct Cli { #[arg(skip)] pub(crate) chain_spec_loader: Option>, + /// Possible subcommands. See [`Subcommand`]. #[command(subcommand)] pub subcommand: Option, + /// The shared parameters with all cumulus-based parachain nodes. #[command(flatten)] pub run: cumulus_client_cli::RunCmd, @@ -200,6 +205,7 @@ impl SubstrateCli for Cli { } } +/// The relay chain CLI flags. These are passed in after a `--` at the end. #[derive(Debug)] pub struct RelayChainCli { /// The actual relay chain cli object. diff --git a/cumulus/polkadot-omni-node/lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs index 350dcfee1cdb7aaebf28259f952d9d95ded9fb6c..fe935a03cc95862dc2db4cf85b7c8c34a0532f6b 100644 --- a/cumulus/polkadot-omni-node/lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -266,13 +266,15 @@ pub fn run(cmd_config: RunConfig) -> Result<() } let hwbench = (!cli.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(database_path); - sc_sysinfo::gather_hwbench( - Some(database_path), - &SUBSTRATE_REFERENCE_HARDWARE, - ) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(database_path); + sc_sysinfo::gather_hwbench( + Some(database_path), + &SUBSTRATE_REFERENCE_HARDWARE, + ) + }) + }) .flatten(); let parachain_account = diff --git a/cumulus/polkadot-omni-node/lib/src/lib.rs b/cumulus/polkadot-omni-node/lib/src/lib.rs index a293ab225c6f575d0fee14c6663391f7d59692b5..3f01f42111444342ef85e68996570df34e30a649 100644 --- a/cumulus/polkadot-omni-node/lib/src/lib.rs +++ b/cumulus/polkadot-omni-node/lib/src/lib.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +//! # Polkadot Omni Node Library +//! //! Helper library that can be used to run a parachain node. //! //! ## Overview @@ -37,11 +39,18 @@ //! //! ## Examples //! -//! For an example, see the `polkadot-parachain-bin` crate. +//! For an example, see the [`polkadot-parachain-bin`](https://crates.io/crates/polkadot-parachain-bin) crate. +//! +//! ## Binary +//! +//! It can be used to start a parachain node from a provided chain spec file. +//! It is only compatible with runtimes that use block number `u32` and `Aura` consensus. +//! +//! Example: `polkadot-omni-node --chain ` #![deny(missing_docs)] -mod cli; +pub mod cli; mod command; mod common; mod fake_runtime_api; diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs index d00d7adf27e1d35e71f4a6b122dc34e223b981ae..e8043bd7b2aac6c8104262b4e1e5dfd3a76de577 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs @@ -28,7 +28,6 @@ use sc_network::NetworkBackend; use sc_service::{build_polkadot_syncing_strategy, Configuration, PartialComponents, TaskManager}; use sc_telemetry::TelemetryHandle; use sp_runtime::traits::Header; -use sp_timestamp::Timestamp; use std::{marker::PhantomData, sync::Arc}; pub struct ManualSealNode(PhantomData); @@ -182,7 +181,10 @@ impl ManualSealNode { additional_key_values: None, }; Ok(( - sp_timestamp::InherentDataProvider::new(Timestamp::new(0)), + // This is intentional, as the runtime that we expect to run against this + // will never receive the aura-related inherents/digests, and providing + // real timestamps would cause aura <> timestamp checking to fail. + sp_timestamp::InherentDataProvider::new(sp_timestamp::Timestamp::new(0)), mocked_parachain, )) } diff --git a/cumulus/polkadot-omni-node/src/main.rs b/cumulus/polkadot-omni-node/src/main.rs index a86ec6f6fde61ae07faf07985dfcb19ef5ecf255..5bca81e2e78b39bba86aa428c9084e04d302f5a6 100644 --- a/cumulus/polkadot-omni-node/src/main.rs +++ b/cumulus/polkadot-omni-node/src/main.rs @@ -14,12 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! Basic polkadot omni-node. +//! White labeled polkadot omni-node. //! -//! It can be used to start a parachain node from a provided chain spec file. -//! It is only compatible with runtimes that use block number `u32` and `Aura` consensus. -//! -//! Example: `polkadot-omni-node --chain [chain_spec.json]` +//! For documentation, see [`polkadot_omni_node_lib`]. #![warn(missing_docs)] #![warn(unused_extern_crates)] diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml index 3a98fdd017aef9b7e411ef59d56fecc89bf0c897..e1ae6743335abac6be8de4ef41db7c7940764029 100644 --- a/cumulus/primitives/storage-weight-reclaim/Cargo.toml +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -14,6 +14,7 @@ codec = { features = ["derive"], workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -24,8 +25,8 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true } docify = { workspace = true } [dev-dependencies] -sp-trie = { workspace = true } sp-io = { workspace = true } +sp-trie = { workspace = true } cumulus-test-runtime = { workspace = true } [features] @@ -34,6 +35,7 @@ std = [ "codec/std", "cumulus-primitives-core/std", "cumulus-primitives-proof-size-hostfunction/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 2529297691e8bcc15e31d070e20975c1eeaa1282..5471640695ca2939628af0c19bb0606926ebf177 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -30,11 +30,15 @@ use frame_support::{ use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension}, transaction_validity::TransactionValidityError, DispatchResult, }; +#[cfg(test)] +mod tests; + const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. @@ -43,7 +47,7 @@ const LOG_TARGET: &'static str = "runtime::storage_reclaim"; /// reclaim it computes the real consumed storage weight and refunds excess weight. /// /// # Example -#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +#[doc = docify::embed!("src/tests.rs", simple_reclaimer_example)] pub struct StorageWeightReclaimer { previous_remaining_proof_size: u64, previous_reported_proof_size: Option, @@ -119,43 +123,35 @@ impl core::fmt::Debug for StorageWeightReclaim { } } -impl SignedExtension for StorageWeightReclaim +impl TransactionExtension for StorageWeightReclaim where T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "StorageWeightReclaim"; - - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); + type Val = (); type Pre = Option; - fn additional_signed( - &self, - ) -> Result - { - Ok(()) - } - - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, + _val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> Result { + ) -> Result { Ok(get_proof_size()) } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let Some(Some(pre_dispatch_proof_size)) = pre else { - return Ok(()); + ) -> Result { + let Some(pre_dispatch_proof_size) = pre else { + return Ok(Weight::zero()); }; let Some(post_dispatch_proof_size) = get_proof_size() else { @@ -163,13 +159,13 @@ where target: LOG_TARGET, "Proof recording enabled during pre-dispatch, now disabled. This should not happen." ); - return Ok(()) + return Ok(Weight::zero()) }; // Unspent weight according to the `actual_weight` from `PostDispatchInfo` // This unspent weight will be refunded by the `CheckWeight` extension, so we need to // account for that. let unspent = post_info.calc_unspent(info).proof_size(); - let benchmarked_weight = info.weight.proof_size().saturating_sub(unspent); + let benchmarked_weight = info.total_weight().proof_size().saturating_sub(unspent); let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); let storage_size_diff = benchmarked_weight.abs_diff(consumed_weight as u64); @@ -209,678 +205,8 @@ where current.accrue(Weight::from_parts(0, missing_from_node), info.class); } }); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use core::marker::PhantomData; - use frame_support::{ - assert_ok, - dispatch::{DispatchClass, PerDispatchClass}, - weights::{Weight, WeightMeter}, - }; - use frame_system::{BlockWeight, CheckWeight}; - use sp_runtime::{AccountId32, BuildStorage}; - use sp_trie::proof_size_extension::ProofSizeExt; - - type Test = cumulus_test_runtime::Runtime; - const CALL: &::RuntimeCall = - &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { - pages: 0u64, - }); - const ALICE: AccountId32 = AccountId32::new([1u8; 32]); - const LEN: usize = 150; - - pub fn new_test_ext() -> sp_io::TestExternalities { - let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() - .build_storage() - .unwrap() - .into(); - ext - } - - struct TestRecorder { - return_values: Box<[usize]>, - counter: std::sync::atomic::AtomicUsize, - } - - impl TestRecorder { - fn new(values: &[usize]) -> Self { - TestRecorder { return_values: values.into(), counter: Default::default() } - } - } - - impl sp_trie::ProofSizeProvider for TestRecorder { - fn estimate_encoded_size(&self) -> usize { - let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); - self.return_values[counter] - } - } - - fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { - let mut test_ext = new_test_ext(); - let test_recorder = TestRecorder::new(proof_values); - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - test_ext - } - - fn set_current_storage_weight(new_weight: u64) { - BlockWeight::::mutate(|current_weight| { - current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); - }); - } - - fn get_storage_weight() -> PerDispatchClass { - BlockWeight::::get() - } - - #[test] - fn basic_refund() { - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Should add 500 + 150 (len) to weight. - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect a refund of 400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1250); - }) - } - - #[test] - fn underestimating_refund() { - // We fixed a bug where `pre dispatch info weight > consumed weight > post info weight` - // resulted in error. - - // The real cost will be 100 bytes of storage size - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 101), ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(0, 99)), - pays_fee: Default::default(), - }; - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // We expect an accrue of 1 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1250); - }) - } - - #[test] - fn sets_to_node_storage_proof_if_higher() { - // The storage proof reported by the proof recorder is higher than what is stored on - // the runtime side. - { - let mut test_ext = setup_test_externalities(&[1000, 1005]); - - test_ext.execute_with(|| { - // Stored in BlockWeight is 5 - set_current_storage_weight(5); - - // Benchmarked storage weight: 10 - let info = DispatchInfo { weight: Weight::from_parts(0, 10), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(1000)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // We expect that the storage weight was set to the node-side proof size (1005) + - // extrinsics length (150) - assert_eq!(get_storage_weight().total().proof_size(), 1155); - }) - } - - // In this second scenario the proof size on the node side is only lower - // after reclaim happened. - { - let mut test_ext = setup_test_externalities(&[175, 180]); - test_ext.execute_with(|| { - set_current_storage_weight(85); - - // Benchmarked storage weight: 100 - let info = - DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // After this pre_dispatch, the BlockWeight proof size will be - // 85 (initial) + 100 (benched) + 150 (tx length) = 335 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(175)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower - // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // We expect that the storage weight was set to the node-side proof weight - assert_eq!(get_storage_weight().total().proof_size(), 330); - }) - } - } - - #[test] - fn does_nothing_without_extension() { - let mut test_ext = new_test_ext(); - - // Proof size extension not registered - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 500 - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Adds 500 + 150 (len) weight - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, None); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1650); - }) - } - - #[test] - fn negative_refund_is_added_to_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Weight added should be 100 + 150 (len) - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // We expect no refund - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!( - get_storage_weight().total().proof_size(), - 1100 + LEN as u64 + info.weight.proof_size() - ); - }) - } - - #[test] - fn test_zero_proof_size() { - let mut test_ext = setup_test_externalities(&[0, 0]); - - test_ext.execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(0)); - - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Proof size should be exactly equal to extrinsic length - assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); - }); - } - - #[test] - fn test_larger_pre_dispatch_proof_size() { - let mut test_ext = setup_test_externalities(&[300, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(1300); - - let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; - let post_info = PostDispatchInfo::default(); - - // Adds 500 + 150 (len) weight, total weight is 1950 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(300)); - - // Refund 500 unspent weight according to `post_info`, total weight is now 1650 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // Recorded proof size is negative -200, total weight is now 1450 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1450); - }); - } - - #[test] - fn test_incorporates_check_weight_unspent_weight() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - // Should add 300 + 150 (len) of weight - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Reclaimed 100 - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - // Adds 50 + 150 (len) weight, total weight 1200 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - - // Refunds unspent 25 weight according to `post_info`, 1175 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_nothing_relcaimed() { - let mut test_ext = setup_test_externalities(&[0, 100]); - - test_ext.execute_with(|| { - set_current_storage_weight(0); - // Benchmarked storage weight: 100 - let info = DispatchInfo { weight: Weight::from_parts(100, 100), ..Default::default() }; - - // Actual proof size is 100 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 100)), - pays_fee: Default::default(), - }; - - // Adds benchmarked weight 100 + 150 (len), total weight is now 250 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - // Weight should go up by 150 len + 100 proof size weight, total weight 250 - assert_eq!(get_storage_weight().total().proof_size(), 250); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - // Should return `setup_test_externalities` proof recorder value: 100. - assert_eq!(pre, Some(0)); - - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - // Nothing to refund, unspent is 0, total weight 250 - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, LEN, &Ok(()))); - // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic - // actually used 100 proof size. - // Nothing to refund or add, weight matches proof recorder - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - - // Check block len weight was not reclaimed: - // 100 weight + 150 extrinsic len == 250 proof size - assert_eq!(get_storage_weight().total().proof_size(), 250); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - - // Benchmarked storage weight: 300 - let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; - - // Actual weight is 50 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 250)), - pays_fee: Default::default(), - }; - - // Adds 300 + 150 (len) weight, total weight 1450 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // This refunds 100 - 50(unspent), total weight is now 1400 - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - // Above call refunds 50 (unspent), total weight is 1350 now - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { - let mut test_ext = setup_test_externalities(&[100, 300]); - - test_ext.execute_with(|| { - set_current_storage_weight(1000); - // Benchmarked storage weight: 50 - let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; - - // Actual weight is 25 - let post_info = PostDispatchInfo { - actual_weight: Some(Weight::from_parts(50, 25)), - pays_fee: Default::default(), - }; - - // Adds 50 + 150 (len) weight, total weight is 1200 - assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); - - let pre = StorageWeightReclaim::(PhantomData) - .pre_dispatch(&ALICE, CALL, &info, LEN) - .unwrap(); - assert_eq!(pre, Some(100)); - - // Adds additional 150 weight recorded - assert_ok!(StorageWeightReclaim::::post_dispatch( - Some(pre), - &info, - &post_info, - LEN, - &Ok(()) - )); - // `CheckWeight` gets called after `StorageWeightReclaim` this time. - // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` - // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); - - assert_eq!(get_storage_weight().total().proof_size(), 1350); - }) - } - - #[test] - fn storage_size_reported_correctly() { - let mut test_ext = setup_test_externalities(&[1000]); - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(1000)); - }); - - let mut test_ext = new_test_ext(); - - let test_recorder = TestRecorder::new(&[0]); - - test_ext.register_extension(ProofSizeExt::new(test_recorder)); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), Some(0)); - }); - } - - #[test] - fn storage_size_disabled_reported_correctly() { - let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); - - test_ext.execute_with(|| { - assert_eq!(get_proof_size(), None); - }); - } - - #[test] - fn test_reclaim_helper() { - let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 500)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); - - remaining_weight_meter.consume(Weight::from_parts(0, 800)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); - }); + Ok(Weight::zero()) } - #[test] - fn test_reclaim_helper_does_not_reclaim_negative() { - // Benchmarked weight does not change at all - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); - }); - - // Benchmarked weight increases less than storage proof consumes - let mut test_ext = setup_test_externalities(&[1000, 1300]); - - test_ext.execute_with(|| { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - remaining_weight_meter.consume(Weight::from_parts(0, 0)); - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); - }); - } - - /// Just here for doc purposes - fn get_benched_weight() -> Weight { - Weight::from_parts(0, 5) - } - - /// Just here for doc purposes - fn do_work() {} - - #[docify::export_content(simple_reclaimer_example)] - fn reclaim_with_weight_meter() { - let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); - - let benched_weight = get_benched_weight(); - - // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight - // for a piece of work from the weight meter. - let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); - - if remaining_weight_meter.try_consume(benched_weight).is_ok() { - // Perform some work that takes has `benched_weight` storage weight. - do_work(); - - // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. - let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); - - // We reclaimed 3 bytes of storage size! - assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); - assert_eq!(get_storage_weight().total().proof_size(), 10); - assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); - } - } - - #[test] - fn test_reclaim_helper_works_with_meter() { - // The node will report 12 - 10 = 2 consumed storage size between the calls. - let mut test_ext = setup_test_externalities(&[10, 12]); - - test_ext.execute_with(|| { - // Initial storage size is 10. - set_current_storage_weight(10); - reclaim_with_weight_meter(); - }); - } + impl_tx_ext_default!(T::RuntimeCall; weight validate); } diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5552b0f0a33e60929e17e324f283a9c7ab826dc --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -0,0 +1,706 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use core::marker::PhantomData; +use frame_support::{ + assert_ok, + dispatch::{DispatchClass, PerDispatchClass}, + weights::{Weight, WeightMeter}, +}; +use frame_system::{BlockWeight, CheckWeight}; +use sp_runtime::{traits::DispatchTransaction, AccountId32, BuildStorage}; +use sp_trie::proof_size_extension::ProofSizeExt; + +type Test = cumulus_test_runtime::Runtime; +const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +const LEN: usize = 150; + +fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext +} + +struct TestRecorder { + return_values: Box<[usize]>, + counter: core::sync::atomic::AtomicUsize, +} + +impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } +} + +impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } +} + +fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext +} + +fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); +} + +fn get_storage_weight() -> PerDispatchClass { + BlockWeight::::get() +} + +#[test] +fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Should add 500 + 150 (len) to weight. + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1250); + }) +} + +#[test] +fn underestimating_refund() { + // We fixed a bug where `pre dispatch info weight > consumed weight > post info weight` + // resulted in error. + + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 101), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(0, 99)), + pays_fee: Default::default(), + }; + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()))); + // We expect an accrue of 1 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1250); + }) +} + +#[test] +fn sets_to_node_storage_proof_if_higher() { + // The storage proof reported by the proof recorder is higher than what is stored on + // the runtime side. + { + let mut test_ext = setup_test_externalities(&[1000, 1005]); + + test_ext.execute_with(|| { + // Stored in BlockWeight is 5 + set_current_storage_weight(5); + + // Benchmarked storage weight: 10 + let info = + DispatchInfo { call_weight: Weight::from_parts(0, 10), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(1000)); + + assert_ok!(CheckWeight::::post_dispatch_details( + (), + &info, + &post_info, + 0, + &Ok(()) + )); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof size (1005) + + // extrinsics length (150) + assert_eq!(get_storage_weight().total().proof_size(), 1155); + }) + } + + // In this second scenario the proof size on the node side is only lower + // after reclaim happened. + { + let mut test_ext = setup_test_externalities(&[175, 180]); + test_ext.execute_with(|| { + set_current_storage_weight(85); + + // Benchmarked storage weight: 100 + let info = + DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // After this pre_dispatch, the BlockWeight proof size will be + // 85 (initial) + 100 (benched) + 150 (tx length) = 335 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(175)); + + assert_ok!(CheckWeight::::post_dispatch_details( + (), + &info, + &post_info, + 0, + &Ok(()) + )); + + // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower + // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof weight + assert_eq!(get_storage_weight().total().proof_size(), 330); + }) + } +} + +#[test] +fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Adds 500 + 150 (len) weight + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1650); + }) +} + +#[test] +fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Weight added should be 100 + 150 (len) + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!( + get_storage_weight().total().proof_size(), + 1100 + LEN as u64 + info.total_weight().proof_size() + ); + }) +} + +#[test] +fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + // Proof size should be exactly equal to extrinsic length + assert_eq!(get_storage_weight().total().proof_size(), LEN as u64); + }); +} + +#[test] +fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // Adds 500 + 150 (len) weight, total weight is 1950 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(300)); + + // Refund 500 unspent weight according to `post_info`, total weight is now 1650 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // Recorded proof size is negative -200, total weight is now 1450 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1450); + }); +} + +#[test] +fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + // Should add 300 + 150 (len) of weight + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + // Reclaimed 100 + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + // Adds 50 + 150 (len) weight, total weight 1200 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Refunds unspent 25 weight according to `post_info`, 1175 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_nothing_relcaimed() { + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(0); + // Benchmarked storage weight: 100 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 100), ..Default::default() }; + + // Actual proof size is 100 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 100)), + pays_fee: Default::default(), + }; + + // Adds benchmarked weight 100 + 150 (len), total weight is now 250 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + // Weight should go up by 150 len + 100 proof size weight, total weight 250 + assert_eq!(get_storage_weight().total().proof_size(), 250); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + // Should return `setup_test_externalities` proof recorder value: 100. + assert_eq!(pre, Some(0)); + + // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + // Nothing to refund, unspent is 0, total weight 250 + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, LEN, &Ok(()))); + // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic + // actually used 100 proof size. + // Nothing to refund or add, weight matches proof recorder + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()) + )); + + // Check block len weight was not reclaimed: + // 100 weight + 150 extrinsic len == 250 proof size + assert_eq!(get_storage_weight().total().proof_size(), 250); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + // Adds 300 + 150 (len) weight, total weight 1450 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // This refunds 100 - 50(unspent), total weight is now 1400 + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + + // Above call refunds 50 (unspent), total weight is 1350 now + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + // Adds 50 + 150 (len) weight, total weight is 1200 + let (_, next_len) = CheckWeight::::do_validate(&info, LEN).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&info, LEN, next_len)); + + let (pre, _) = StorageWeightReclaim::(PhantomData) + .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // Adds additional 150 weight recorded + assert_ok!(StorageWeightReclaim::::post_dispatch_details( + pre, + &info, + &post_info, + LEN, + &Ok(()), + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + + assert_eq!(get_storage_weight().total().proof_size(), 1350); + }) +} + +#[test] +fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); +} + +#[test] +fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); +} + +#[test] +fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); +} + +#[test] +fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); +} + +/// Just here for doc purposes +fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) +} + +/// Just here for doc purposes +fn do_work() {} + +#[docify::export_content(simple_reclaimer_example)] +fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(get_storage_weight().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } +} + +#[test] +fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); +} diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index fbbaab73ce7695c62cfbe1e6ce62d67ba5a68509..33023816c718cd0a41335e8481fb6428ea6b6bdb 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -53,6 +53,7 @@ runtime-benchmarks = [ "cumulus-test-service/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "sc-service/runtime-benchmarks", diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index f26413e441e72b7fca558043f0507de91db4746f..eaf81699f6d7437138c0a991115cff13e9dda78f 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -25,7 +25,7 @@ pub use polkadot_parachain_primitives::primitives::{ BlockData, HeadData, ValidationParams, ValidationResult, }; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedExtra, SignedPayload, + Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, UncheckedExtrinsic, VERSION, }; use sc_consensus_aura::standalone::{seal, slot_author}; @@ -117,7 +117,7 @@ impl DefaultTestClientBuilderExt for TestClientBuilder { /// Create an unsigned extrinsic from a runtime call. pub fn generate_unsigned(function: impl Into) -> UncheckedExtrinsic { - UncheckedExtrinsic::new_unsigned(function.into()) + UncheckedExtrinsic::new_bare(function.into()) } /// Create a signed extrinsic from a runtime call and sign @@ -135,7 +135,7 @@ pub fn generate_extrinsic_with_pair( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -144,13 +144,14 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let function = function.into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); @@ -159,7 +160,7 @@ pub fn generate_extrinsic_with_pair( function, origin.public().into(), Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 92a6ab73a2ebe3dbc33a970a0ca368373de2a767..5443bb5f526b3f86564bf391d8732b8f0858111c 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -279,6 +279,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { @@ -371,8 +372,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckGenesis, @@ -384,7 +385,7 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -395,7 +396,7 @@ pub type Executive = frame_executive::Executive< TestOnRuntimeUpgrade, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub struct TestOnRuntimeUpgrade; diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index a1b70c5239525a641da43fdf328897a727d26d1f..3ef9424b9ed6b953337cef5a505eeaab41fb59dc 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -110,6 +110,7 @@ runtime-benchmarks = [ "cumulus-test-client/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "parachains-common/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 67ffbdd1d2129921782626300292d2267f26a8a0..76717b4136faae328b5a80b193d87f9302ee0076 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -68,12 +68,11 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { let best_number = client.usage_info().chain.best_number; let timestamp = best_number as u64 * cumulus_test_runtime::MinimumPeriod::get(); - cumulus_test_runtime::UncheckedExtrinsic { - signature: None, - function: cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { + cumulus_test_runtime::UncheckedExtrinsic::new_bare( + cumulus_test_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: timestamp, }), - } + ) .into() } @@ -101,12 +100,11 @@ pub fn extrinsic_set_validation_data( horizontal_messages: Default::default(), }; - cumulus_test_runtime::UncheckedExtrinsic { - signature: None, - function: cumulus_test_runtime::RuntimeCall::ParachainSystem( + cumulus_test_runtime::UncheckedExtrinsic::new_bare( + cumulus_test_runtime::RuntimeCall::ParachainSystem( cumulus_pallet_parachain_system::Call::set_validation_data { data }, ), - } + ) .into() } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index a13399d3a40e61cb5c556ff8de2b3bd810a7697b..a3e519a68b91c74491550852ef71d65f4c734020 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -969,7 +969,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckGenesis::::new(), @@ -981,10 +981,11 @@ pub fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), - ); + ) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); @@ -992,7 +993,7 @@ pub fn construct_extrinsic( function, caller.public().into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 37417497e1f84f3a54fb3e55da631c9ce363e700..dcf9806dcb6233af6f3ed762ef97ee3b7df1657e 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,13 +1,12 @@ flowchart parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] - polkadot_network[polkadot.network] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs devhub --> guides + devhub --> external_resources polkadot_sdk --> substrate polkadot_sdk --> frame - polkadot_sdk --> cumulus - polkadot_sdk --> polkadot[polkadot node] polkadot_sdk --> xcm + polkadot_sdk --> templates diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index adc1c1a8efbca343e6693ca0027654cba3ea532b..0c39367eeed35ec02e6e930d63b427a8a26b0190 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -29,6 +29,7 @@ pallet-example-offchain-worker = { workspace = true, default-features = true } # How we build docs in rust-docs simple-mermaid = "0.1.1" docify = { workspace = true } +serde_json = { workspace = true } # Polkadot SDK deps, typically all should only be in scope such that we can link to their doc item. polkadot-sdk = { features = ["runtime-full"], workspace = true, default-features = true } @@ -39,6 +40,8 @@ subkey = { workspace = true, default-features = true } frame-system = { workspace = true } frame-support = { workspace = true } frame-executive = { workspace = true } +frame-benchmarking = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true, default-features = true } pallet-example-single-block-migrations = { workspace = true, default-features = true } frame-metadata-hash-extension = { workspace = true, default-features = true } log = { workspace = true, default-features = true } @@ -69,6 +72,9 @@ cumulus-primitives-proof-size-hostfunction = { workspace = true, default-feature cumulus-client-service = { workspace = true, default-features = true } cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true } +# Omni Node +polkadot-omni-node-lib = { workspace = true, default-features = true } + # Pallets and FRAME internals pallet-aura = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } @@ -91,6 +97,7 @@ pallet-scheduler = { workspace = true, default-features = true } pallet-referenda = { workspace = true, default-features = true } pallet-broker = { workspace = true, default-features = true } pallet-babe = { workspace = true, default-features = true } +pallet-grandpa = { workspace = true, default-features = true } # Primitives sp-io = { workspace = true, default-features = true } @@ -105,6 +112,7 @@ sp-arithmetic = { workspace = true, default-features = true } sp-genesis-builder = { workspace = true, default-features = true } sp-offchain = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } +sp-weights = { workspace = true, default-features = true } # XCM @@ -116,9 +124,18 @@ xcm-simulator = { workspace = true } pallet-xcm = { workspace = true } # runtime guides -chain-spec-guide-runtime = { workspace = true } + +chain-spec-guide-runtime = { workspace = true, default-features = true } # Templates -minimal-template-runtime = { workspace = true } -solochain-template-runtime = { workspace = true } -parachain-template-runtime = { workspace = true } +minimal-template-runtime = { workspace = true, default-features = true } +solochain-template-runtime = { workspace = true, default-features = true } +parachain-template-runtime = { workspace = true, default-features = true } + +# local packages +first-runtime = { workspace = true, default-features = true } +first-pallet = { workspace = true, default-features = true } + +[dev-dependencies] +assert_cmd = "2.0.14" +rand = "0.8" diff --git a/docs/sdk/assets/theme.css b/docs/sdk/assets/theme.css index 1f47a8ef5b0c4c399c35cd1fb0f6c9740d6b0230..f9aa4760275ef555d7c645b248998d00dc86ff46 100644 --- a/docs/sdk/assets/theme.css +++ b/docs/sdk/assets/theme.css @@ -6,6 +6,27 @@ --polkadot-purple: #552BBF; } +/* Light theme */ +html[data-theme="light"] { + --quote-background: #f9f9f9; + --quote-border: #ccc; + --quote-text: #333; +} + +/* Dark theme */ +html[data-theme="dark"] { + --quote-background: #333; + --quote-border: #555; + --quote-text: #f9f9f9; +} + +/* Ayu theme */ +html[data-theme="ayu"] { + --quote-background: #272822; + --quote-border: #383830; + --quote-text: #f8f8f2; +} + body.sdk-docs { nav.sidebar>div.sidebar-crate>a>img { width: 190px; @@ -20,3 +41,17 @@ body.sdk-docs { html[data-theme="light"] .sidebar-crate > .logo-container > img { content: url("https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png"); } + +/* Custom styles for blockquotes */ +blockquote { + background-color: var(--quote-background); + border-left: 5px solid var(--quote-border); + color: var(--quote-text); + margin: 1em 0; + padding: 1em 1.5em; + /* font-style: italic; */ +} + +blockquote p { + margin: 0; +} diff --git a/docs/sdk/packages/guides/first-pallet/Cargo.toml b/docs/sdk/packages/guides/first-pallet/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..dad5b88634945ec02ec5da793ba93ae4e831543a --- /dev/null +++ b/docs/sdk/packages/guides/first-pallet/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "polkadot-sdk-docs-first-pallet" +description = "A simple pallet created for the polkadot-sdk-docs guides" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +scale-info = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } +docify = { workspace = true } + +[features] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/docs/sdk/packages/guides/first-pallet/src/lib.rs b/docs/sdk/packages/guides/first-pallet/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..168b7ca44aba2146e24182402d9d4c78aa0f96ac --- /dev/null +++ b/docs/sdk/packages/guides/first-pallet/src/lib.rs @@ -0,0 +1,480 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Pallets used in the `your_first_pallet` guide. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod shell_pallet { + use frame::prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("InsufficientBalance".into()) + } + let remainder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, remainder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "InsufficientBalance"); + let remainder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let remainder = sender_balance.checked_sub(amount).ok_or("InsufficientBalance")?; + + // .. snip + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub(crate) mod tests { + use crate::pallet::*; + + #[docify::export(testing_prelude)] + use frame::testing_prelude::*; + + pub(crate) const ALICE: u64 = 1; + pub(crate) const BOB: u64 = 2; + pub(crate) const CHARLIE: u64 = 3; + + #[docify::export] + // This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod + // tests { .. }` + mod runtime { + use super::*; + // we need to reference our `mod pallet` as an identifier to pass to + // `construct_runtime`. + // YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE + use crate::pallet as pallet_currency; + + construct_runtime!( + pub enum Runtime { + // ---^^^^^^ This is where `enum Runtime` is defined. + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + // within pallet we just said `::AccountId`, now we + // finally specified it. + type AccountId = u64; + } + + // our simple pallet has nothing to be configured. + impl pallet_currency::Config for Runtime {} + } + + pub(crate) use runtime::*; + + #[allow(unused)] + #[docify::export] + fn new_test_state_basic() -> TestState { + let mut state = TestState::new_empty(); + let accounts = vec![(ALICE, 100), (BOB, 100)]; + state.execute_with(|| { + for (who, amount) in &accounts { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + state + } + + #[docify::export] + pub(crate) struct StateBuilder { + balances: Vec<(::AccountId, Balance)>, + } + + #[docify::export(default_state_builder)] + impl Default for StateBuilder { + fn default() -> Self { + Self { balances: vec![(ALICE, 100), (BOB, 100)] } + } + } + + #[docify::export(impl_state_builder_add)] + impl StateBuilder { + fn add_balance( + mut self, + who: ::AccountId, + amount: Balance, + ) -> Self { + self.balances.push((who, amount)); + self + } + } + + #[docify::export(impl_state_builder_build)] + impl StateBuilder { + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = TestState::new_empty(); + ext.execute_with(|| { + for (who, amount) in &self.balances { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + ext.execute_with(test); + + // assertions that must always hold + ext.execute_with(|| { + assert_eq!( + Balances::::iter().map(|(_, x)| x).sum::(), + TotalIssuance::::get().unwrap_or_default() + ); + }) + } + } + + #[docify::export] + #[test] + fn first_test() { + TestState::new_empty().execute_with(|| { + // We expect Alice's account to have no funds. + assert_eq!(Balances::::get(&ALICE), None); + assert_eq!(TotalIssuance::::get(), None); + + // mint some funds into Alice's account. + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + ALICE, + 100 + )); + + // re-check the above + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(100)); + }) + } + + #[docify::export] + #[test] + fn state_builder_works() { + StateBuilder::default().build_and_execute(|| { + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn state_builder_add_balance() { + StateBuilder::default().add_balance(CHARLIE, 42).build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), Some(42)); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[test] + #[should_panic] + fn state_builder_duplicate_genesis_fails() { + StateBuilder::default() + .add_balance(CHARLIE, 42) + .add_balance(CHARLIE, 43) + .build_and_execute(|| { + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[docify::export] + #[test] + fn mint_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(ALICE), BOB, 100)); + + // then: + assert_eq!(Balances::::get(&BOB), Some(200)); + assert_eq!(TotalIssuance::::get(), Some(300)); + + // given: + assert_ok!(Pallet::::mint_unsafe( + RuntimeOrigin::signed(ALICE), + CHARLIE, + 100 + )); + + // then: + assert_eq!(Balances::::get(&CHARLIE), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(400)); + }); + } + + #[docify::export] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(BOB), ALICE, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn transfer_from_non_existent_fails() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_err!( + Pallet::::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10), + "NonExistentAccount" + ); + + // then nothing has changed. + assert_eq!(Balances::::get(&ALICE), Some(100)); + assert_eq!(Balances::::get(&BOB), Some(100)); + assert_eq!(Balances::::get(&CHARLIE), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_v2 { + use super::pallet::Balance; + use frame::prelude::*; + + #[docify::export(config_v2)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + + TryInto>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + #[pallet::error] + pub enum Error { + /// Account does not exist. + NonExistentAccount, + /// Account does not have enough balance. + InsufficientBalance, + } + + #[docify::export] + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transfer succeeded. + Transferred { from: T::AccountId, to: T::AccountId, amount: Balance }, + } + + #[pallet::call] + impl Pallet { + #[docify::export(transfer_v2)] + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = + Balances::::get(&sender).ok_or(Error::::NonExistentAccount)?; + let remainder = + sender_balance.checked_sub(amount).ok_or(Error::::InsufficientBalance)?; + + Balances::::mutate(&dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, remainder); + + Self::deposit_event(Event::::Transferred { from: sender, to: dest, amount }); + + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub mod tests { + use super::{super::pallet::tests::StateBuilder, *}; + use frame::testing_prelude::*; + const ALICE: u64 = 1; + const BOB: u64 = 2; + + #[docify::export] + pub mod runtime_v2 { + use super::*; + use crate::pallet_v2 as pallet_currency; + + construct_runtime!( + pub enum Runtime { + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountId = u64; + } + + impl pallet_currency::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } + } + + pub(crate) use runtime_v2::*; + + #[docify::export(transfer_works_v2)] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // skip the genesis block, as events are not deposited there and we need them for + // the final assertion. + System::set_block_number(ALICE); + + // given the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(ALICE), BOB, 50)); + + // then: + assert_eq!(Balances::::get(&ALICE), Some(50)); + assert_eq!(Balances::::get(&BOB), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // now we can also check that an event has been deposited: + assert_eq!( + System::read_events_for_pallet::>(), + vec![Event::Transferred { from: ALICE, to: BOB, amount: 50 }] + ); + }); + } + } +} diff --git a/docs/sdk/packages/guides/first-runtime/Cargo.toml b/docs/sdk/packages/guides/first-runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..303d5c5e7f5fc8a11c52d48d0105f4872e77bceb --- /dev/null +++ b/docs/sdk/packages/guides/first-runtime/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "polkadot-sdk-docs-first-runtime" +description = "A simple runtime created for the polkadot-sdk-docs guides" +version = "0.0.0" +license = "MIT-0" +authors.workspace = true +homepage.workspace = true +repository.workspace = true +edition.workspace = true +publish = false + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true } +scale-info = { workspace = true } +serde_json = { workspace = true } + +# this is a frame-based runtime, thus importing `frame` with runtime feature enabled. +frame = { workspace = true, features = ["experimental", "runtime"] } + +# pallets that we want to use +pallet-balances = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } + +# other polkadot-sdk-deps +sp-keyring = { workspace = true } + +# local pallet templates +first-pallet = { workspace = true } + +docify = { workspace = true } + +[build-dependencies] +substrate-wasm-builder = { workspace = true, optional = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "serde_json/std", + + "frame/std", + + "pallet-balances/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + + "first-pallet/std", + "sp-keyring/std", + + "substrate-wasm-builder", +] diff --git a/docs/sdk/packages/guides/first-runtime/build.rs b/docs/sdk/packages/guides/first-runtime/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7676a70dfe843e2cd47fc600ef599bbe7bff591 --- /dev/null +++ b/docs/sdk/packages/guides/first-runtime/build.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +fn main() { + #[cfg(feature = "std")] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build(); + } +} diff --git a/docs/sdk/packages/guides/first-runtime/src/lib.rs b/docs/sdk/packages/guides/first-runtime/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..92d51962b6febb03a4da80b3003e70ca174bbaf8 --- /dev/null +++ b/docs/sdk/packages/guides/first-runtime/src/lib.rs @@ -0,0 +1,301 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime used in `your_first_runtime`. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +use alloc::{vec, vec::Vec}; +use first_pallet::pallet_v2 as our_first_pallet; +use frame::{ + prelude::*, + runtime::{apis, prelude::*}, +}; +use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, RuntimeDispatchInfo}; + +#[docify::export] +#[runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("first-runtime"), + impl_name: create_runtime_str!("first-runtime"), + authoring_version: 1, + spec_version: 0, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + system_version: 1, +}; + +#[docify::export(cr)] +construct_runtime!( + pub struct Runtime { + // Mandatory for all runtimes + System: frame_system, + + // A number of other pallets from FRAME. + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Sudo: pallet_sudo, + TransactionPayment: pallet_transaction_payment, + + // Our local pallet + FirstPallet: our_first_pallet, + } +); + +#[docify::export_content] +mod runtime_types { + use super::*; + pub(super) type SignedExtra = ( + // `frame` already provides all the signed extensions from `frame-system`. We just add the + // one related to tx-payment here. + frame::runtime::types_common::SystemTransactionExtensionsOf, + pallet_transaction_payment::ChargeTransactionPayment, + ); + + pub(super) type Block = frame::runtime::types_common::BlockOf; + pub(super) type Header = HeaderFor; + + pub(super) type RuntimeExecutive = Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + >; +} +use runtime_types::*; + +#[docify::export_content] +mod config_impls { + use super::*; + + parameter_types! { + pub const Version: RuntimeVersion = VERSION; + } + + #[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)] + impl frame_system::Config for Runtime { + type Block = Block; + type Version = Version; + type AccountData = + pallet_balances::AccountData<::Balance>; + } + + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] + impl pallet_balances::Config for Runtime { + type AccountStore = System; + } + + #[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)] + impl pallet_sudo::Config for Runtime {} + + #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)] + impl pallet_timestamp::Config for Runtime {} + + #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] + impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + // We specify a fixed length to fee here, which essentially means all transactions charge + // exactly 1 unit of fee. + type LengthToFee = FixedFee<1, ::Balance>; + type WeightToFee = NoFee<::Balance>; + } +} + +#[docify::export(our_config_impl)] +impl our_first_pallet::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +/// Provides getters for genesis configuration presets. +pub mod genesis_config_presets { + use super::*; + use crate::{ + interface::{Balance, MinimumBalance}, + BalancesConfig, RuntimeGenesisConfig, SudoConfig, + }; + use serde_json::Value; + + /// Returns a development genesis config preset. + #[docify::export] + pub fn development_config_genesis() -> Value { + let endowment = >::get().max(1) * 1000; + let config = RuntimeGenesisConfig { + balances: BalancesConfig { + balances: AccountKeyring::iter() + .map(|a| (a.to_account_id(), endowment)) + .collect::>(), + }, + sudo: SudoConfig { key: Some(AccountKeyring::Alice.to_account_id()) }, + ..Default::default() + }; + + serde_json::to_value(config).expect("Could not build genesis config.") + } + + /// Get the set of the available genesis config presets. + #[docify::export] + pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.try_into() { + Ok(DEV_RUNTIME_PRESET) => development_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) + } + + /// List of supported presets. + #[docify::export] + pub fn preset_names() -> Vec { + vec![PresetId::from(DEV_RUNTIME_PRESET)] + } +} + +impl_runtime_apis! { + impl apis::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + RuntimeExecutive::execute_block(block) + } + + fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { + RuntimeExecutive::initialize_block(header) + } + } + + impl apis::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> Vec { + Runtime::metadata_versions() + } + } + + impl apis::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ExtrinsicFor) -> ApplyExtrinsicResult { + RuntimeExecutive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> HeaderFor { + RuntimeExecutive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: InherentData, + ) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl apis::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ExtrinsicFor, + block_hash: ::Hash, + ) -> TransactionValidity { + RuntimeExecutive::validate_transaction(source, tx, block_hash) + } + } + + impl apis::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &HeaderFor) { + RuntimeExecutive::offchain_worker(header) + } + } + + impl apis::SessionKeys for Runtime { + fn generate_session_keys(_seed: Option>) -> Vec { + Default::default() + } + + fn decode_session_keys( + _encoded: Vec, + ) -> Option, apis::KeyTypeId)>> { + Default::default() + } + } + + impl apis::AccountNonceApi for Runtime { + fn account_nonce(account: interface::AccountId) -> interface::Nonce { + System::account_nonce(account) + } + } + + impl apis::GenesisBuilder for Runtime { + fn build_state(config: Vec) -> GenesisBuilderResult { + build_state::(config) + } + + fn get_preset(id: &Option) -> Option> { + get_preset::(id, self::genesis_config_presets::get_preset) + } + + fn preset_names() -> Vec { + crate::genesis_config_presets::preset_names() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + interface::Balance, + > for Runtime { + fn query_info(uxt: ExtrinsicFor, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ExtrinsicFor, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> interface::Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> interface::Balance { + TransactionPayment::length_to_fee(length) + } + } +} + +/// Just a handy re-definition of some types based on what is already provided to the pallet +/// configs. +pub mod interface { + use super::Runtime; + use frame::prelude::frame_system; + + pub type AccountId = ::AccountId; + pub type Nonce = ::Nonce; + pub type Hash = ::Hash; + pub type Balance = ::Balance; + pub type MinimumBalance = ::ExistentialDeposit; +} diff --git a/docs/sdk/src/external_resources.rs b/docs/sdk/src/external_resources.rs new file mode 100644 index 0000000000000000000000000000000000000000..939874d12f1371ed5c73432fe10189f34312b40c --- /dev/null +++ b/docs/sdk/src/external_resources.rs @@ -0,0 +1,14 @@ +//! # External Resources +//! +//! A non-exhaustive, un-opinionated list of external resources about Polkadot SDK. +//! +//! Unlike [`crate::guides`], or [`crate::polkadot_sdk::templates`] that contain material directly +//! maintained in the `polkadot-sdk` repository, the list of resources here are maintained by +//! third-parties, and are therefore subject to more variability. Any further resources may be added +//! by opening a pull request to the `polkadot-sdk` repository. +//! +//! - [Polkadot NFT Marketplace Tutorial by Polkadot Fellow Shawn Tabrizi](https://www.shawntabrizi.com/substrate-collectables-workshop/) +//! - [DOT Code School](https://dotcodeschool.com/) +//! - [Polkadot Developers](https://github.com/polkadot-developers/) +//! - [Polkadot Blockchain Academy](https://github.com/Polkadot-Blockchain-Academy) +//! - [Polkadot Wiki: Build](https://wiki.polkadot.network/docs/build-guide) diff --git a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs index 812e674d163bb098ae34a35d9154ce49bd341245..2339088abed46c590e3aa5f1286862c89bf68ad2 100644 --- a/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs +++ b/docs/sdk/src/guides/enable_elastic_scaling_mvp.rs @@ -70,10 +70,11 @@ //! - Ensure enough coretime is assigned to the parachain. For maximum throughput the upper bound is //! 3 cores. //! -//!
Phase 1 is not needed if using the polkadot-parachain binary -//! built from the latest polkadot-sdk release! Simply pass the -//! --experimental-use-slot-based parameter to the command line and jump to Phase -//! 2.
+//!
Phase 1 is NOT needed if using the polkadot-parachain or +//! polkadot-omni-node binary, or polkadot-omni-node-lib built from the +//! latest polkadot-sdk release! Simply pass the --experimental-use-slot-based +//! ([`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`]) parameter to the command +//! line and jump to Phase 2.
//! //! The following steps assume using the cumulus parachain template. //! diff --git a/docs/sdk/src/guides/enable_pov_reclaim.rs b/docs/sdk/src/guides/enable_pov_reclaim.rs index 13b27d18956b49fdb66b79fa44208321ad43c6f0..cb6960b3df4ef1b5a507eb083baba808c9e99a0f 100644 --- a/docs/sdk/src/guides/enable_pov_reclaim.rs +++ b/docs/sdk/src/guides/enable_pov_reclaim.rs @@ -58,9 +58,9 @@ //! > that this step in the guide was not //! > set up correctly. //! -//! ## 3. Add the SignedExtension to your runtime +//! ## 3. Add the TransactionExtension to your runtime //! -//! In your runtime, you will find a list of SignedExtensions. +//! In your runtime, you will find a list of TransactionExtensions. //! To enable the reclaiming, //! add [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim) //! to that list. For maximum efficiency, make sure that `StorageWeightReclaim` is last in the list. diff --git a/docs/sdk/src/guides/mod.rs b/docs/sdk/src/guides/mod.rs index a7fd146ccdf3a7be21e0a17382ee3abdaa4d961a..747128a728d0ae9f6a6d0980dbbe348a5ce135e8 100644 --- a/docs/sdk/src/guides/mod.rs +++ b/docs/sdk/src/guides/mod.rs @@ -1,14 +1,22 @@ //! # Polkadot SDK Docs Guides //! -//! This crate contains a collection of guides that are foundational to the developers of Polkadot -//! SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. +//! This crate contains a collection of guides that are foundational to the developers of +//! Polkadot SDK. They are common user-journeys that are traversed in the Polkadot ecosystem. //! -//! 1. [`crate::guides::your_first_pallet`] is your starting point with Polkadot SDK. It contains -//! the basics of -//! building a simple crypto currency with FRAME. -//! 2. [`crate::guides::your_first_runtime`] is the next step in your journey. It contains the -//! basics of building a runtime that contains this pallet, plus a few common pallets from FRAME. +//! The main user-journey covered by these guides is: //! +//! * [`your_first_pallet`], where you learn what a FRAME pallet is, and write your first +//! application logic. +//! * [`your_first_runtime`], where you learn how to compile your pallets into a WASM runtime. +//! * [`your_first_node`], where you learn how to run the said runtime in a node. +//! +//! > By this step, you have already launched a full Polkadot-SDK-based blockchain! +//! +//! Once done, feel free to step up into one of our templates: [`crate::polkadot_sdk::templates`]. +//! +//! [`your_first_pallet`]: crate::guides::your_first_pallet +//! [`your_first_runtime`]: crate::guides::your_first_runtime +//! [`your_first_node`]: crate::guides::your_first_node //! //! Other guides are related to other miscellaneous topics and are listed as modules below. @@ -19,19 +27,12 @@ pub mod your_first_pallet; /// compiling it to [WASM](crate::polkadot_sdk::substrate#wasm-build). pub mod your_first_runtime; -// /// Running the given runtime with a node. No specific consensus mechanism is used at this stage. -// TODO -// pub mod your_first_node; - -// /// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain -// /// and connect it to a relay-chain. -// TODO -// pub mod cumulus_enabled_parachain; +/// Running the given runtime with a node. No specific consensus mechanism is used at this stage. +pub mod your_first_node; -// /// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between -// /// itself and the relay chain to which it is connected. -// TODO -// pub mod xcm_enabled_parachain; +/// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain +/// and connect it to a relay-chain. +// pub mod your_first_parachain; /// How to enable storage weight reclaiming in a parachain node and runtime. pub mod enable_pov_reclaim; diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index d12349c990632deb03bf24006ffc63b493347715..da37c11c206f3fc2417277b712738efab7700b4a 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -1 +1,314 @@ //! # Your first Node +//! +//! In this guide, you will learn how to run a runtime, such as the one created in +//! [`your_first_runtime`], in a node. Within the context of this guide, we will focus on running +//! the runtime with an [`omni-node`]. Please first read this page to learn about the OmniNode, and +//! other options when it comes to running a node. +//! +//! [`your_first_runtime`] is a runtime with no consensus related code, and therefore can only be +//! executed with a node that also expects no consensus ([`sc_consensus_manual_seal`]). +//! `polkadot-omni-node`'s [`--dev-block-time`] precisely does this. +//! +//! > All of the following steps are coded as unit tests of this module. Please see `Source` of the +//! > page for more information. +//! +//! ## Running The Omni Node +//! +//! ### Installs +//! +//! The `polkadot-omni-node` can either be downloaded from the latest [Release](https://github.com/paritytech/polkadot-sdk/releases/) of `polkadot-sdk`, +//! or installed using `cargo`: +//! +//! ```text +//! cargo install polkadot-omni-node +//! ``` +//! +//! Next, we need to install the [`chain-spec-builder`]. This is the tool that allows us to build +//! chain-specifications, through interacting with the genesis related APIs of the runtime, as +//! described in [`crate::guides::your_first_runtime#genesis-configuration`]. +//! +//! ```text +//! cargo install staging-chain-spec-builder +//! ``` +//! +//! > The name of the crate is prefixed with `staging` as the crate name `chain-spec-builder` on +//! > crates.io is already taken and is not controlled by `polkadot-sdk` developers. +//! +//! ### Building Runtime +//! +//! Next, we need to build the corresponding runtime that we wish to interact with. +//! +//! ```text +//! cargo build --release -p path-to-runtime +//! ``` +//! Equivalent code in tests: +#![doc = docify::embed!("./src/guides/your_first_node.rs", build_runtime)] +//! +//! This creates the wasm file under `./target/{release}/wbuild/release` directory. +//! +//! ### Building Chain Spec +//! +//! Next, we can generate the corresponding chain-spec file. For this example, we will use the +//! `development` (`sp_genesis_config::DEVELOPMENT`) preset. +//! +//! Note that we intend to run this chain-spec with `polkadot-omni-node`, which is tailored for +//! running parachains. This requires the chain-spec to always contain the `para_id` and a +//! `relay_chain` fields, which are provided below as CLI arguments. +//! +//! ```text +//! chain-spec-builder \ +//! -c \ +//! create \ +//! --para-id 42 \ +//! --relay-chain dontcare \ +//! --runtime polkadot_sdk_docs_first_runtime.wasm \ +//! named-preset development +//! ``` +//! +//! Equivalent code in tests: +#![doc = docify::embed!("./src/guides/your_first_node.rs", csb)] +//! +//! +//! ### Running `polkadot-omni-node` +//! +//! Finally, we can run the node with the generated chain-spec file. We can also specify the block +//! time using the `--dev-block-time` flag. +//! +//! ```text +//! polkadot-omni-node \ +//! --tmp \ +//! --dev-block-time 1000 \ +//! --chain .json +//! ``` +//! +//! > Note that we always prefer to use `--tmp` for testing, as it will save the chain state to a +//! > temporary folder, allowing the chain-to be easily restarted without `purge-chain`. See +//! > [`sc_cli::commands::PurgeChainCmd`] and [`sc_cli::commands::RunCmd::tmp`] for more info. +//! +//! This will start the node and import the blocks. Note while using `--dev-block-time`, the node +//! will use the testing-specific manual-seal consensus. This is an efficient way to test the +//! application logic of your runtime, without needing to yet care about consensus, block +//! production, relay-chain and so on. +//! +//! ### Next Steps +//! +//! * See the rest of the steps in [`crate::reference_docs::omni_node#user-journey`]. +//! +//! [`runtime`]: crate::reference_docs::glossary#runtime +//! [`node`]: crate::reference_docs::glossary#node +//! [`build_config`]: first_runtime::Runtime#method.build_config +//! [`omni-node`]: crate::reference_docs::omni_node +//! [`--dev-block-time`]: (polkadot_omni_node_lib::cli::Cli::dev_block_time) + +#[cfg(test)] +mod tests { + use assert_cmd::Command; + use rand::Rng; + use sc_chain_spec::{DEV_RUNTIME_PRESET, LOCAL_TESTNET_RUNTIME_PRESET}; + use sp_genesis_builder::PresetId; + use std::path::PathBuf; + + const PARA_RUNTIME: &'static str = "parachain-template-runtime"; + const FIRST_RUNTIME: &'static str = "polkadot-sdk-docs-first-runtime"; + const MINIMAL_RUNTIME: &'static str = "minimal-template-runtime"; + + const CHAIN_SPEC_BUILDER: &'static str = "chain-spec-builder"; + const OMNI_NODE: &'static str = "polkadot-omni-node"; + + fn cargo() -> Command { + Command::new(std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) + } + + fn get_target_directory() -> Option { + let output = cargo().arg("metadata").arg("--format-version=1").output().ok()?; + + if !output.status.success() { + return None; + } + + let metadata: serde_json::Value = serde_json::from_slice(&output.stdout).ok()?; + let target_directory = metadata["target_directory"].as_str()?; + + Some(PathBuf::from(target_directory)) + } + + fn find_release_binary(name: &str) -> Option { + let target_dir = get_target_directory()?; + let release_path = target_dir.join("release").join(name); + + if release_path.exists() { + Some(release_path) + } else { + None + } + } + + fn find_wasm(runtime_name: &str) -> Option { + let target_dir = get_target_directory()?; + let wasm_path = target_dir + .join("release") + .join("wbuild") + .join(runtime_name) + .join(format!("{}.wasm", runtime_name.replace('-', "_"))); + + if wasm_path.exists() { + Some(wasm_path) + } else { + None + } + } + + fn maybe_build_runtimes() { + if find_wasm(&PARA_RUNTIME).is_none() { + println!("Building parachain-template-runtime..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg(PARA_RUNTIME) + .assert() + .success(); + } + if find_wasm(&FIRST_RUNTIME).is_none() { + println!("Building polkadot-sdk-docs-first-runtime..."); + #[docify::export_content] + fn build_runtime() { + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg(FIRST_RUNTIME) + .assert() + .success(); + } + build_runtime() + } + + assert!(find_wasm(PARA_RUNTIME).is_some()); + assert!(find_wasm(FIRST_RUNTIME).is_some()); + } + + fn maybe_build_chain_spec_builder() { + if find_release_binary(CHAIN_SPEC_BUILDER).is_none() { + println!("Building chain-spec-builder..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg("staging-chain-spec-builder") + .assert() + .success(); + } + assert!(find_release_binary(CHAIN_SPEC_BUILDER).is_some()); + } + + fn maybe_build_omni_node() { + if find_release_binary(OMNI_NODE).is_none() { + println!("Building polkadot-omni-node..."); + Command::new("cargo") + .arg("build") + .arg("--release") + .arg("-p") + .arg("polkadot-omni-node") + .assert() + .success(); + } + } + + fn test_runtime_preset(runtime: &'static str, block_time: u64, maybe_preset: Option) { + sp_tracing::try_init_simple(); + maybe_build_runtimes(); + maybe_build_chain_spec_builder(); + maybe_build_omni_node(); + + let chain_spec_builder = + find_release_binary(&CHAIN_SPEC_BUILDER).expect("we built it above; qed"); + let omni_node = find_release_binary(OMNI_NODE).expect("we built it above; qed"); + let runtime_path = find_wasm(runtime).expect("we built it above; qed"); + + let random_seed: u32 = rand::thread_rng().gen(); + let chain_spec_file = std::env::current_dir() + .unwrap() + .join(format!("{}_{}_{}.json", runtime, block_time, random_seed)); + + Command::new(chain_spec_builder) + .args(["-c", chain_spec_file.to_str().unwrap()]) + .arg("create") + .args(["--para-id", "1000", "--relay-chain", "dontcare"]) + .args(["-r", runtime_path.to_str().unwrap()]) + .args(match maybe_preset { + Some(preset) => vec!["named-preset".to_string(), preset.to_string()], + None => vec!["default".to_string()], + }) + .assert() + .success(); + + let output = Command::new(omni_node) + .arg("--tmp") + .args(["--chain", chain_spec_file.to_str().unwrap()]) + .args(["--dev-block-time", block_time.to_string().as_str()]) + .timeout(std::time::Duration::from_secs(10)) + .output() + .unwrap(); + + std::fs::remove_file(chain_spec_file).unwrap(); + + // uncomment for debugging. + // println!("output: {:?}", output); + + let expected_blocks = (10_000 / block_time).saturating_div(2); + assert!(expected_blocks > 0, "test configuration is bad, should give it more time"); + assert!(String::from_utf8(output.stderr) + .unwrap() + .contains(format!("Imported #{}", expected_blocks).to_string().as_str())); + } + + #[test] + fn works_with_different_block_times() { + test_runtime_preset(PARA_RUNTIME, 100, Some(DEV_RUNTIME_PRESET.into())); + test_runtime_preset(PARA_RUNTIME, 3000, Some(DEV_RUNTIME_PRESET.into())); + + // we need this snippet just for docs + #[docify::export_content(csb)] + fn build_para_chain_spec_works() { + let chain_spec_builder = find_release_binary(&CHAIN_SPEC_BUILDER).unwrap(); + let runtime_path = find_wasm(PARA_RUNTIME).unwrap(); + let output = "/tmp/demo-chain-spec.json"; + Command::new(chain_spec_builder) + .args(["-c", output]) + .arg("create") + .args(["--para-id", "1000", "--relay-chain", "dontcare"]) + .args(["-r", runtime_path.to_str().unwrap()]) + .args(["named-preset", "development"]) + .assert() + .success(); + std::fs::remove_file(output).unwrap(); + } + build_para_chain_spec_works(); + } + + #[test] + fn parachain_runtime_works() { + // TODO: None doesn't work. But maybe it should? it would be misleading as many users might + // use it. + [Some(DEV_RUNTIME_PRESET.into()), Some(LOCAL_TESTNET_RUNTIME_PRESET.into())] + .into_iter() + .for_each(|preset| { + test_runtime_preset(PARA_RUNTIME, 1000, preset); + }); + } + + #[test] + fn minimal_runtime_works() { + [None, Some(DEV_RUNTIME_PRESET.into())].into_iter().for_each(|preset| { + test_runtime_preset(MINIMAL_RUNTIME, 1000, preset); + }); + } + + #[test] + fn guide_first_runtime_works() { + [Some(DEV_RUNTIME_PRESET.into())].into_iter().for_each(|preset| { + test_runtime_preset(FIRST_RUNTIME, 1000, preset); + }); + } +} diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index fcfaab00e5522e054326aff5d273fa86b9d9b3da..aef8981b4d4a3bdb726fa0fa5988c9b6eaed41a4 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -47,7 +47,7 @@ //! //! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any //! pallet. Refer to the documentation of each to get an overview of what they do. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", shell_pallet)] //! //! All of the code that follows in this guide should live inside of the `mod pallet`. //! @@ -61,17 +61,17 @@ //! > For the rest of this guide, we will opt for a balance type of `u128`. For the sake of //! > simplicity, we are hardcoding this type. In a real pallet is best practice to define it as a //! > generic bounded type in the `Config` trait, and then specify it in the implementation. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Balance)] //! //! The definition of these two storage items, based on [`pallet::storage`] details, is as follows: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", TotalIssuance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Balances)] //! //! ### Dispatchables //! //! Next, we will define the dispatchable functions. As per [`pallet::call`], these will be defined //! as normal `fn`s attached to `struct Pallet`. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_pallet)] //! //! The logic of these functions is self-explanatory. Instead, we will focus on the FRAME-related //! details: @@ -108,14 +108,14 @@ //! How we handle error in the above snippets is fairly rudimentary. Let's look at how this can be //! improved. First, we can use [`frame::prelude::ensure`] to express the error slightly better. //! This macro will call `.into()` under the hood. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_better)] //! //! Moreover, you will learn in the [Defensive Programming //! section](crate::reference_docs::defensive_programming) that it is always recommended to use //! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not //! only take a step in that direction, but also improve the error handing and make it slightly more //! ergonomic. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better_checked)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_better_checked)] //! //! This is more or less all the logic that there is in this basic currency pallet! //! @@ -145,7 +145,7 @@ //! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include //! [`frame::prelude::frame_system`]. So we expect to see a runtime with two pallet, `frame_system` //! and the one we just wrote. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", runtime)] //! //! > [`frame::pallet_macros::derive_impl`] is a FRAME feature that enables developers to have //! > defaults for associated types. @@ -182,7 +182,7 @@ //! to learn is that all of your pallet testing code should be wrapped in //! [`frame::testing_prelude::TestState`]. This is a type that provides access to an in-memory state //! to be used in our tests. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", first_test)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", first_test)] //! //! In the first test, we simply assert that there is no total issuance, and no balance associated //! with Alice's account. Then, we mint some balance into Alice's, and re-check. @@ -206,16 +206,16 @@ //! //! Let's see how we can implement a better test setup using this pattern. First, we define a //! `struct StateBuilder`. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", StateBuilder)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", StateBuilder)] //! //! This struct is meant to contain the same list of accounts and balances that we want to have at //! the beginning of each block. We hardcoded this to `let accounts = vec![(ALICE, 100), (2, 100)];` //! so far. Then, if desired, we attach a default value for this struct. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", default_state_builder)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", default_state_builder)] //! //! Like any other builder pattern, we attach functions to the type to mutate its internal //! properties. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_add)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_state_builder_add)] //! //! Finally --the useful part-- we write our own custom `build_and_execute` function on //! this type. This function will do multiple things: @@ -227,23 +227,23 @@ //! after each test. For example, in this test, we do some additional checking about the //! correctness of the `TotalIssuance`. We leave it up to you as an exercise to learn why the //! assertion should always hold, and how it is checked. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_build)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", impl_state_builder_build)] //! //! We can write tests that specifically check the initial state, and making sure our `StateBuilder` //! is working exactly as intended. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_works)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_add_balance)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", state_builder_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", state_builder_add_balance)] //! //! ### More Tests //! //! Now that we have a more ergonomic test setup, let's see how a well written test for transfer and //! mint would look like. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_works)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", mint_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_works)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", mint_works)] //! //! It is always a good idea to build a mental model where you write *at least* one test for each //! "success path" of a dispatchable, and one test for each "failure path", such as: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_from_non_existent_fails)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_from_non_existent_fails)] //! //! We leave it up to you to write a test that triggers the `InsufficientBalance` error. //! @@ -272,8 +272,8 @@ //! With the explanation out of the way, let's see how these components can be added. Both follow a //! fairly familiar syntax: normal Rust enums, with extra [`pallet::event`] and [`pallet::error`] //! attributes attached. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Event)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", Error)] //! //! One slightly custom part of this is the [`pallet::generate_deposit`] part. Without going into //! too much detail, in order for a pallet to emit events to the rest of the system, it needs to do @@ -288,17 +288,17 @@ //! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME //! provides a default way of storing events, and this is what [`pallet::generate_deposit`] is //! doing. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", config_v2)] //! //! > These `Runtime*` types are better explained in //! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", transfer_v2)] //! //! Then, notice how now we would need to provide this `type RuntimeEvent` in our test runtime //! setup. -#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] +#![doc = docify::embed!("./packages/guides/first-pallet/src/lib.rs", runtime_v2)] //! //! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = //! RuntimeEvent`) is generated by diff --git a/docs/sdk/src/guides/your_first_pallet/with_event.rs b/docs/sdk/src/guides/your_first_pallet/with_event.rs deleted file mode 100644 index a5af29c9c319498f4aee62e591d922144db4d8b0..0000000000000000000000000000000000000000 --- a/docs/sdk/src/guides/your_first_pallet/with_event.rs +++ /dev/null @@ -1,101 +0,0 @@ -#[frame::pallet(dev_mode)] -pub mod pallet { - use frame::prelude::*; - - #[docify::export] - pub type Balance = u128; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(_); - - #[docify::export] - /// Single storage item, of type `Balance`. - #[pallet::storage] - pub type TotalIssuance = StorageValue<_, Balance>; - - #[docify::export] - /// A mapping from `T::AccountId` to `Balance` - #[pallet::storage] - pub type Balances = StorageMap<_, _, T::AccountId, Balance>; - - #[docify::export(impl_pallet)] - #[pallet::call] - impl Pallet { - /// An unsafe mint that can be called by anyone. Not a great idea. - pub fn mint_unsafe( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - // ensure that this is a signed account, but we don't really check `_anyone`. - let _anyone = ensure_signed(origin)?; - - // update the balances map. Notice how all `` remains as ``. - Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); - // update total issuance. - TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); - - Ok(()) - } - - /// Transfer `amount` from `origin` to `dest`. - pub fn transfer( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - // ensure sender has enough balance, and if so, calculate what is left after `amount`. - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - if sender_balance < amount { - return Err("NotEnoughBalance".into()) - } - let remainder = sender_balance - amount; - - // update sender and dest balances. - Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); - Balances::::insert(&sender, remainder); - - Ok(()) - } - } - - #[allow(unused)] - impl Pallet { - #[docify::export] - pub fn transfer_better( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - ensure!(sender_balance >= amount, "NotEnoughBalance"); - let remainder = sender_balance - amount; - - // .. snip - Ok(()) - } - - #[docify::export] - /// Transfer `amount` from `origin` to `dest`. - pub fn transfer_better_checked( - origin: T::RuntimeOrigin, - dest: T::AccountId, - amount: Balance, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - - let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; - let remainder = sender_balance.checked_sub(amount).ok_or("NotEnoughBalance")?; - - // .. snip - Ok(()) - } - } -} diff --git a/docs/sdk/src/guides/your_first_runtime.rs b/docs/sdk/src/guides/your_first_runtime.rs index c58abc1120c13f5d9d724f7dd78464380f8840bf..79f01e66979aa72b3206292f19bbc5fe2406a80a 100644 --- a/docs/sdk/src/guides/your_first_runtime.rs +++ b/docs/sdk/src/guides/your_first_runtime.rs @@ -1,3 +1,170 @@ //! # Your first Runtime //! -//! 🚧 +//! This guide will walk you through the steps to add your pallet to a runtime. +//! +//! The good news is, in [`crate::guides::your_first_pallet`], we have already created a _test_ +//! runtime that was used for testing, and a real runtime is not that much different! +//! +//! ## Setup +//! +//! A runtime shares a few similar setup requirements as with a pallet: +//! +//! * importing [`frame`], [`codec`], and [`scale_info`] crates. +//! * following the [`std` feature-gating](crate::polkadot_sdk::substrate#wasm-build) pattern. +//! +//! But, more specifically, it also contains: +//! +//! * a `build.rs` that uses [`substrate_wasm_builder`]. This entails declaring +//! `[build-dependencies]` in the Cargo manifest file: +//! +//! ```ignore +//! [build-dependencies] +//! substrate-wasm-builder = { ... } +//! ``` +//! +//! >Note that a runtime must always be one-runtime-per-crate. You cannot define multiple runtimes +//! per rust crate. +//! +//! You can find the full code of this guide in [`first_runtime`]. +//! +//! ## Your First Runtime +//! +//! The first new property of a real runtime that it must define its +//! [`frame::runtime::prelude::RuntimeVersion`]: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", VERSION)] +//! +//! The version contains a number of very important fields, such as `spec_version` and `spec_name` +//! that play an important role in identifying your runtime and its version, more importantly in +//! runtime upgrades. More about runtime upgrades in +//! [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! +//! Then, a real runtime also contains the `impl` of all individual pallets' `trait Config` for +//! `struct Runtime`, and a [`frame::runtime::prelude::construct_runtime`] macro that amalgamates +//! them all. +//! +//! In the case of our example: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", our_config_impl)] +//! +//! In this example, we bring in a number of other pallets from [`frame`] into the runtime, each of +//! their `Config` need to be implemented for `struct Runtime`: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", config_impls)] +//! +//! Notice how we use [`frame::pallet_macros::derive_impl`] to provide "default" configuration items +//! for each pallet. Feel free to dive into the definition of each default prelude (eg. +//! [`frame::prelude::frame_system::pallet::config_preludes`]) to learn more which types are exactly +//! used. +//! +//! Recall that in test runtime in [`crate::guides::your_first_pallet`], we provided `type AccountId +//! = u64` to `frame_system`, while in this case we rely on whatever is provided by +//! [`SolochainDefaultConfig`], which is indeed a "real" 32 byte account id. +//! +//! Then, a familiar instance of `construct_runtime` amalgamates all of the pallets: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", cr)] +//! +//! Recall from [`crate::reference_docs::wasm_meta_protocol`] that every (real) runtime needs to +//! implement a set of runtime APIs that will then let the node to communicate with it. The final +//! steps of crafting a runtime are related to achieving exactly this. +//! +//! First, we define a number of types that eventually lead to the creation of an instance of +//! [`frame::runtime::prelude::Executive`]. The executive is a handy FRAME utility that, through +//! amalgamating all pallets and further types, implements some of the very very core pieces of the +//! runtime logic, such as how blocks are executed and other runtime-api implementations. +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", runtime_types)] +//! +//! Finally, we use [`frame::runtime::prelude::impl_runtime_apis`] to implement all of the runtime +//! APIs that the runtime wishes to expose. As you will see in the code, most of these runtime API +//! implementations are merely forwarding calls to `RuntimeExecutive` which handles the actual +//! logic. Given that the implementation block is somewhat large, we won't repeat it here. You can +//! look for `impl_runtime_apis!` in [`first_runtime`]. +//! +//! ```ignore +//! impl_runtime_apis! { +//! impl apis::Core for Runtime { +//! fn version() -> RuntimeVersion { +//! VERSION +//! } +//! +//! fn execute_block(block: Block) { +//! RuntimeExecutive::execute_block(block) +//! } +//! +//! fn initialize_block(header: &Header) -> ExtrinsicInclusionMode { +//! RuntimeExecutive::initialize_block(header) +//! } +//! } +//! +//! // many more trait impls... +//! } +//! ``` +//! +//! And that more or less covers the details of how you would write a real runtime! +//! +//! Once you compile a crate that contains a runtime as above, simply running `cargo build` will +//! generate the wasm blobs and place them under `./target/release/wbuild`, as explained +//! [here](crate::polkadot_sdk::substrate#wasm-build). +//! +//! ## Genesis Configuration +//! +//! Every runtime specifies a number of runtime APIs that help the outer world (most notably, a +//! `node`) know what is the genesis state of this runtime. These APIs are then used to generate +//! what is known as a **Chain Specification, or chain spec for short**. A chain spec is the +//! primary way to run a new chain. +//! +//! These APIs are defined in [`sp_genesis_builder`], and are re-exposed as a part of +//! [`frame::runtime::apis`]. Therefore, the implementation blocks can be found inside of +//! `impl_runtime_apis!` similar to: +//! +//! ```ignore +//! impl_runtime_apis! { +//! impl apis::GenesisBuilder for Runtime { +//! fn build_state(config: Vec) -> GenesisBuilderResult { +//! build_state::(config) +//! } +//! +//! fn get_preset(id: &Option) -> Option> { +//! get_preset::(id, self::genesis_config_presets::get_preset) +//! } +//! +//! fn preset_names() -> Vec { +//! crate::genesis_config_presets::preset_names() +//! } +//! } +//! +//! } +//! ``` +//! +//! The implementation of these function can naturally vary from one runtime to the other, but the +//! overall pattern is common. For the case of this runtime, we do the following: +//! +//! 1. Expose one non-default preset, namely [`sp_genesis_builder::DEV_RUNTIME_PRESET`]. This means +//! our runtime has two "presets" of genesis state in total: `DEV_RUNTIME_PRESET` and `None`. +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", preset_names)] +//! +//! For `build_state` and `get_preset`, we use the helper functions provide by frame: +//! +//! * [`frame::runtime::prelude::build_state`] and [`frame::runtime::prelude::get_preset`]. +//! +//! Indeed, our runtime needs to specify what its `DEV_RUNTIME_PRESET` genesis state should be like: +#![doc = docify::embed!("./packages/guides/first-runtime/src/lib.rs", development_config_genesis)] +//! +//! For more in-depth information about `GenesisConfig`, `ChainSpec`, the `GenesisBuilder` API and +//! `chain-spec-builder`, see [`crate::reference_docs::chain_spec_genesis`]. +//! +//! ## Next Step +//! +//! See [`crate::guides::your_first_node`]. +//! +//! ## Further Reading +//! +//! 1. To learn more about signed extensions, see [`crate::reference_docs::signed_extensions`]. +//! 2. `AllPalletsWithSystem` is also generated by `construct_runtime`, as explained in +//! [`crate::reference_docs::frame_runtime_types`]. +//! 3. `Executive` supports more generics, most notably allowing the runtime to configure more +//! runtime migrations, as explained in +//! [`crate::reference_docs::frame_runtime_upgrades_and_migrations`]. +//! 4. Learn more about adding and implementing runtime apis in +//! [`crate::reference_docs::custom_runtime_api_rpc`]. +//! 5. To see a complete example of a runtime+pallet that is similar to this guide, please see +//! [`crate::polkadot_sdk::templates`]. +//! +//! [`SolochainDefaultConfig`]: struct@frame_system::pallet::config_preludes::SolochainDefaultConfig diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 35f73c290bf2c676bb010caee3a362fb5e14ee2b..e2c5fc93479cd8bdea3e8e330a068f045e52488f 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -5,9 +5,6 @@ //! This crate is a *minimal*, but *always-accurate* source of information for those wishing to //! build on the Polkadot SDK. //! -//! > **Work in Progress**: This crate is under heavy development. Expect content to be moved and -//! > changed. Do not use links to this crate yet. See [`meta_contributing`] for more information. -//! //! ## Getting Started //! //! We suggest the following reading sequence: @@ -15,7 +12,8 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over to the [`guides`]. This modules contains in-depth guides about the most //! important user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. +//! - [`external_resources`] for a list of 3rd party guides and tutorials. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -35,9 +33,13 @@ /// how one can contribute to it. pub mod meta_contributing; +/// A list of external resources and learning material about Polkadot SDK. +pub mod external_resources; + /// In-depth guides about the most common components of the Polkadot SDK. They are slightly more /// high level and broad than [`reference_docs`]. pub mod guides; + /// An introduction to the Polkadot SDK. Read this module to learn about the structure of the SDK, /// the tools that are provided as a part of it, and to gain a high level understanding of each. pub mod polkadot_sdk; diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index e1297151b2312f9828bdddecb4fbbb76995132cb..d68d9bca18b11772016359b56b24af7b4fe9fe6f 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -69,7 +69,8 @@ //! > what topics are already covered in this crate, and how you can build on top of the information //! > that they already pose, rather than repeating yourself**. //! -//! For more details see the [latest documenting guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/DOCUMENTATION_GUIDELINES.md). +//! For more details see the [latest documenting +//! guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/DOCUMENTATION_GUIDELINES.md). //! //! #### Example: Explaining `#[pallet::call]` //! @@ -132,6 +133,13 @@ //! compromise, but in the long term, we should work towards finding a way to maintain different //! revisions of this crate. //! +//! ## Versioning +//! +//! So long as not deployed in `crates.io`, please notice that all of the information in this crate, +//! namely in [`crate::guides`] and such are compatible with the master branch of `polkadot-sdk`. A +//! few solutions have been proposed to improve this, please see +//! [here](https://github.com/paritytech/polkadot-sdk/issues/6191). +//! //! ## How to Develop Locally //! //! To view the docs specific [`crate`] locally for development, including the correct HTML headers diff --git a/docs/sdk/src/polkadot_sdk/mod.rs b/docs/sdk/src/polkadot_sdk/mod.rs index 32cad72fba7e2e3b3bc9b74994309076eae43114..bf7346b871a117a49b58adc1a4bb5b2023bd50a3 100644 --- a/docs/sdk/src/polkadot_sdk/mod.rs +++ b/docs/sdk/src/polkadot_sdk/mod.rs @@ -75,6 +75,26 @@ //! runtimes are located under the //! [`polkadot-fellows/runtimes`](https://github.com/polkadot-fellows/runtimes) repository. //! +//! ### Binaries +//! +//! The main binaries that are part of the Polkadot SDK are: + +//! * [`polkadot`]: The Polkadot relay chain node binary, as noted above. +//! * [`polkadot-omni-node`]: A white-labeled parachain collator node. See more in +//! [`crate::reference_docs::omni_node`]. +//! * [`polkadot-parachain-bin`]: The collator node used to run collators for all Polkadot system +//! parachains. +//! * [`frame-omni-bencher`]: a benchmarking tool for FRAME-based runtimes. Nodes typically contain +//! a +//! `benchmark` subcommand that does the same. +//! * [`chain_spec_builder`]: Utility to build chain-specs Nodes typically contain a `build-spec` +//! subcommand that does the same. +//! * [`subkey`]: Substrate's key management utility. +//! * [`substrate-node`](node_cli) is an extensive substrate node that contains the superset of all +//! runtime and node side features. The corresponding runtime, called [`kitchensink_runtime`] +//! contains all of the modules that are provided with `FRAME`. This node and runtime is only used +//! for testing and demonstration. +//! //! ### Summary //! //! The following diagram summarizes how some of the components of Polkadot SDK work together: @@ -106,15 +126,19 @@ //! A list of projects and tools in the blockchain ecosystem that one way or another use parts of //! the Polkadot SDK: //! -//! * [Polygon's spin-off, Avail](https://github.com/availproject/avail) +//! * [Avail](https://github.com/availproject/avail) //! * [Cardano Partner Chains](https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/) //! * [Starknet's Madara Sequencer](https://github.com/keep-starknet-strange/madara) +//! * [Polymesh](https://polymesh.network/) //! //! [`substrate`]: crate::polkadot_sdk::substrate //! [`frame`]: crate::polkadot_sdk::frame_runtime //! [`cumulus`]: crate::polkadot_sdk::cumulus //! [`polkadot`]: crate::polkadot_sdk::polkadot //! [`xcm`]: crate::polkadot_sdk::xcm +//! [`frame-omni-bencher`]: https://crates.io/crates/frame-omni-bencher +//! [`polkadot-parachain-bin`]: https://crates.io/crates/polkadot-parachain-bin +//! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node /// Learn about Cumulus, the framework that transforms [`substrate`]-based chains into /// [`polkadot`]-enabled parachains. diff --git a/docs/sdk/src/polkadot_sdk/substrate.rs b/docs/sdk/src/polkadot_sdk/substrate.rs index 56b89f8c9c2a3f106ad293c8fc605bd7304739ea..ed654c2842c40767314f366b5e5da9326a83f4b4 100644 --- a/docs/sdk/src/polkadot_sdk/substrate.rs +++ b/docs/sdk/src/polkadot_sdk/substrate.rs @@ -90,22 +90,6 @@ //! //! In order to ensure that the WASM build is **deterministic**, the [Substrate Runtime Toolbox (srtool)](https://github.com/paritytech/srtool) can be used. //! -//! ### Binaries -//! -//! Multiple binaries are shipped with substrate, the most important of which are located in the -//! [`./bin`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin) folder. -//! -//! * [`node_cli`] is an extensive substrate node that contains the superset of all runtime and node -//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the -//! modules that are provided with `FRAME`. This node and runtime is only used for testing and -//! demonstration. -//! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned -//! node. Other projects typically contain a `build-spec` subcommand that does the same. -//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node): -//! a template node that contains a minimal set of features and can act as a starting point of a -//! project. -//! * [`subkey`]: Substrate's key management utility. -//! //! ### Anatomy of a Binary Crate //! //! From the above, [`node_cli`]/[`kitchensink_runtime`] and `node-template` are essentially diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs index a2e22d1ed1ebaedfd2eeebf75770d0df8885ed03..b7a0a648d0cfd84db0123b298ea8c581738f295b 100644 --- a/docs/sdk/src/reference_docs/chain_spec_genesis.rs +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -100,17 +100,22 @@ //! others useful for testing. //! //! Internally, presets can be provided in a number of ways: -//! - JSON in string form: -#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_1)] -//! - JSON using runtime types to serialize values: +//! - using [`build_struct_json_patch`] macro (**recommended**): #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_2)] +//! - JSON using runtime types to serialize values: #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_3)] +//! - JSON in string form: +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_1)] +//! //! It is worth noting that a preset does not have to be the full `RuntimeGenesisConfig`, in that //! sense that it does not have to contain all the keys of the struct. The preset is actually a JSON //! patch that will be merged with the default value of `RuntimeGenesisConfig`. This approach should //! simplify maintenance of built-in presets. The following example illustrates a runtime genesis -//! config patch: +//! config patch with a single key built using [`build_struct_json_patch`] macro: #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_4)] +//! This results in the following JSON blob: +#![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs", preset_4_json)] +//! //! //! ## Note on the importance of testing presets //! @@ -122,8 +127,8 @@ //! //! ## Note on the importance of using the `deny_unknown_fields` attribute //! -//! It is worth noting that it is easy to make a hard-to-spot mistake, as in the following example -//! ([`FooStruct`] does not contain `fieldC`): +//! It is worth noting that when manually building preset JSON blobs it is easy to make a +//! hard-to-spot mistake, as in the following example ([`FooStruct`] does not contain `fieldC`): #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", preset_invalid)] //! Even though `preset_invalid` contains a key that does not exist, the deserialization of the JSON //! blob does not fail. The misspelling is silently ignored due to the lack of the @@ -131,6 +136,10 @@ //! `GenesisConfig`. #![doc = docify::embed!("./src/reference_docs/chain_spec_runtime/src/presets.rs", invalid_preset_works)] //! +//! To avoid this problem [`build_struct_json_patch`] macro shall be used whenever possible (it +//! internally instantiates the struct before serializang it JSON blob, so all unknown fields shall +//! be caught at compilation time). +//! //! ## Runtime `GenesisConfig` raw format //! //! A raw format of genesis config contains just the state's keys and values as they are stored in @@ -152,10 +161,13 @@ //! presets and build the chain specification file. It is possible to use the tool with the //! [_demonstration runtime_][`chain_spec_guide_runtime`]. To build the required packages, just run //! the following command: +//! //! ```ignore //! cargo build -p staging-chain-spec-builder -p chain-spec-guide-runtime --release //! ``` +//! //! The `chain-spec-builder` util can also be installed with `cargo install`: +//! //! ```ignore //! cargo install staging-chain-spec-builder //! cargo build -p chain-spec-guide-runtime --release @@ -179,6 +191,7 @@ //! [`get_preset`]: frame_support::genesis_builder_helper::get_preset //! [`pallet::genesis_build`]: frame_support::pallet_macros::genesis_build //! [`pallet::genesis_config`]: frame_support::pallet_macros::genesis_config +//! [`build_struct_json_patch`]: frame_support::build_struct_json_patch //! [`BuildGenesisConfig`]: frame_support::traits::BuildGenesisConfig //! [`serde`]: https://serde.rs/field-attrs.html //! [`get_storage_for_patch`]: sc_chain_spec::GenesisConfigBuilderRuntimeCaller::get_storage_for_patch diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml index 02849571203285d5a9bbc19d4d3aecad8495c214..07c0342f5fbeaea4076aa4fd040668b912a297ed 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/Cargo.toml @@ -12,6 +12,7 @@ publish = false [dependencies] docify = { workspace = true } codec = { workspace = true } +frame-support = { workspace = true } scale-info = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } @@ -49,6 +50,7 @@ std = [ "codec/std", "scale-info/std", + "frame-support/std", "frame/std", "pallet-balances/std", diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs index 2ff2d9539e2dbdd13997f925474ecbaf5a2836c7..571632ecd272a26195a2316c0a5d6befd38bd3d5 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/pallets.rs @@ -118,7 +118,7 @@ pub mod pallet_foo { pub some_enum: FooEnum, pub some_struct: FooStruct, #[serde(skip)] - _phantom: PhantomData, + pub _phantom: PhantomData, } #[pallet::genesis_build] diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs index 02c2d90f7c827f51dfe913fd0743109b9f96618a..c1316b2f87347811409c0f6581abf57c20c3def1 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/presets.rs @@ -17,8 +17,12 @@ //! Presets for the chain-spec demo runtime. -use crate::pallets::{FooEnum, SomeFooData1, SomeFooData2}; +use crate::{ + pallets::{FooEnum, SomeFooData1, SomeFooData2}, + runtime::{BarConfig, FooConfig, RuntimeGenesisConfig}, +}; use alloc::vec; +use frame_support::build_struct_json_patch; use serde_json::{json, to_string, Value}; use sp_application_crypto::Ss58Codec; use sp_keyring::AccountKeyring; @@ -27,7 +31,7 @@ use sp_keyring::AccountKeyring; pub const PRESET_1: &str = "preset_1"; /// A demo preset with real types. pub const PRESET_2: &str = "preset_2"; -/// Another demo preset with real types. +/// Another demo preset with real types and manually created json object. pub const PRESET_3: &str = "preset_3"; /// A single value patch preset. pub const PRESET_4: &str = "preset_4"; @@ -58,21 +62,21 @@ fn preset_1() -> Value { } #[docify::export] -/// Function provides a preset demonstrating how use the actual types to create a preset. +/// Function provides a preset demonstrating how to create a preset using +/// [`build_struct_json_patch`] macro. fn preset_2() -> Value { - json!({ - "bar": { - "initialAccount": AccountKeyring::Ferdie.public().to_ss58check(), - }, - "foo": { - "someEnum": FooEnum::Data2(SomeFooData2 { values: vec![12,16] }), - "someInteger": 200 + build_struct_json_patch!(RuntimeGenesisConfig { + foo: FooConfig { + some_integer: 200, + some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, + bar: BarConfig { initial_account: Some(AccountKeyring::Ferdie.public().into()) }, }) } #[docify::export] -/// Function provides a preset demonstrating how use the actual types to create a preset. +/// Function provides a preset demonstrating how use the actual types to manually create a JSON +/// representing the preset. fn preset_3() -> Value { json!({ "bar": { @@ -92,22 +96,16 @@ fn preset_3() -> Value { #[docify::export] /// Function provides a minimal preset demonstrating how to patch single key in -/// `RuntimeGenesisConfig`. -fn preset_4() -> Value { - json!({ - "foo": { - "someEnum": { - "Data2": { - "values": "0x0c0f" - } - }, - }, +/// `RuntimeGenesisConfig` using [`build_struct_json_patch`] macro. +pub fn preset_4() -> Value { + build_struct_json_patch!(RuntimeGenesisConfig { + foo: FooConfig { some_enum: FooEnum::Data2(SomeFooData2 { values: vec![0x0c, 0x10] }) }, }) } #[docify::export] /// Function provides an invalid preset demonstrating how important is use of -/// [`deny_unknown_fields`] in data structures used in `GenesisConfig`. +/// `deny_unknown_fields` in data structures used in `GenesisConfig`. fn preset_invalid() -> Value { json!({ "foo": { diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs index cc273685fcb4a416a78ed906c31785e8e48d168e..c2fe5a6727e6b09b7ea82d8dd9903f3f615e43a9 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/tests/chain_spec_builder_tests.rs @@ -58,7 +58,7 @@ fn get_preset() { let output: serde_json::Value = serde_json::from_slice(&output.stdout).unwrap(); - //note: copy of chain_spec_guide_runtime::preset_1 + //note: copy of chain_spec_guide_runtime::preset_2 let expected_output = json!({ "bar": { "initialAccount": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", @@ -186,3 +186,20 @@ fn generate_para_chain_spec() { }); assert_eq!(output, expected_output, "Output did not match expected"); } + +#[test] +#[docify::export] +fn preset_4_json() { + assert_eq!( + chain_spec_guide_runtime::presets::preset_4(), + json!({ + "foo": { + "someEnum": { + "Data2": { + "values": "0x0c10" + } + }, + }, + }) + ); +} diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 31ce92c67e98a5911370472f2387bac3382a5230..1d4b0405b324bfe43a5c87d3d54aa40494623eeb 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -12,7 +12,7 @@ //! //! What follows is a description of how extrinsics based on this //! [`sp_runtime::generic::UncheckedExtrinsic`] type are encoded into bytes. Specifically, we are -//! looking at how extrinsics with a format version of 4 are encoded. This version is itself a part +//! looking at how extrinsics with a format version of 5 are encoded. This version is itself a part //! of the payload, and if it changes, it indicates that something about the encoding may have //! changed. //! @@ -24,7 +24,8 @@ //! ```text //! extrinsic_bytes = concat( //! compact_encoded_length, -//! version_and_maybe_signature, +//! version_and_extrinsic_type, +//! maybe_extension_data, //! call_data //! ) //! ``` @@ -56,18 +57,38 @@ //! version_and_signed, //! from_address, //! signature, -//! signed_extensions_extra, +//! transaction_extensions_extra, //! ) //! ``` //! //! Each of the details to be concatenated together is explained below: //! -//! ### version_and_signed +//! ## version_and_extrinsic_type //! -//! This is one byte, equal to `0x84` or `0b1000_0100` (i.e. an upper 1 bit to denote that it is -//! signed, and then the transaction version, 4, in the lower bits). +//! This byte has 2 components: +//! - the 2 most significant bits represent the extrinsic type: +//! - bare - `0b00` +//! - signed - `0b10` +//! - general - `0b01` +//! - the 6 least significant bits represent the extrinsic format version (currently 5) //! -//! ### from_address +//! ### Bare extrinsics +//! +//! If the extrinsic is _bare_, then `version_and_extrinsic_type` will be just the _transaction +//! protocol version_, which is 5 (or `0b0000_0101`). Bare extrinsics do not carry any other +//! extension data, so `maybe_extension_data` would not be included in the payload and the +//! `version_and_extrinsic_type` would always be followed by the encoded call bytes. +//! +//! ### Signed extrinsics +//! +//! If the extrinsic is _signed_ (all extrinsics submitted from users used to be signed up until +//! version 4), then `version_and_extrinsic_type` is obtained by having a MSB of `1` on the +//! _transaction protocol version_ byte (which translates to `0b1000_0101`). +//! +//! Additionally, _signed_ extrinsics also carry with them address and signature information encoded +//! as follows: +//! +//! #### from_address //! //! This is the [SCALE encoded][frame::deps::codec] address of the sender of the extrinsic. The //! address is the first generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], and so @@ -78,7 +99,7 @@ //! signed extrinsic to be submitted to a Polkadot node, you'll always use the //! [`sp_runtime::MultiAddress::Id`] variant to wrap your `AccountId32`. //! -//! ### signature +//! #### signature //! //! This is the [SCALE encoded][frame::deps::codec] signature. The signature type is configured via //! the third generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], which determines the @@ -90,32 +111,41 @@ //! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the //! variants there are the types of signature that can be provided. //! -//! ### signed_extensions_extra +//! ### General extrinsics +//! +//! If the extrinsic is _general_ (it doesn't carry a signature in the payload, only extension +//! data), then `version_and_extrinsic_type` is obtained by logical OR between the general +//! transaction type bits and the _transaction protocol version_ byte (which translates to +//! `0b0100_0101`). //! -//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of -//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the -//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about -//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! ### transaction_extensions_extra //! -//! When it comes to constructing an extrinsic, each signed extension has two things that we are -//! interested in here: +//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing first a +//! single byte describing the extension version (this is bumped whenever a change occurs in the +//! transaction extension pipeline) followed by the bytes of each of the [_transaction +//! extensions_][sp_runtime::traits::TransactionExtension], and are configured by the fourth generic +//! parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about transaction +//! extensions [here][crate::reference_docs::transaction_extensions]. //! -//! - The actual SCALE encoding of the signed extension type itself; this is what will form our -//! `signed_extensions_extra` bytes. -//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data -//! of the _signed payload_ (see below). +//! When it comes to constructing an extrinsic, each transaction extension has two things that we +//! are interested in here: +//! +//! - The actual SCALE encoding of the transaction extension type itself; this is what will form our +//! `transaction_extensions_extra` bytes. +//! - An `Implicit` type. This is SCALE encoded into the `transaction_extensions_implicit` data (see +//! below). //! //! Either (or both) of these can encode to zero bytes. //! -//! Each chain configures the set of signed extensions that it uses in its runtime configuration. -//! At the time of writing, Polkadot configures them +//! Each chain configures the set of transaction extensions that it uses in its runtime +//! configuration. At the time of writing, Polkadot configures them //! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). -//! Some of the common signed extensions are defined -//! [here][frame::deps::frame_system#signed-extensions]. +//! Some of the common transaction extensions are defined +//! [here][frame::deps::frame_system#transaction-extensions]. //! -//! Information about exactly which signed extensions are present on a chain and in what order is -//! also a part of the metadata for the chain. For V15 metadata, it can be -//! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. +//! Information about exactly which transaction extensions are present on a chain and in what order +//! is also a part of the metadata for the chain. For V15 metadata, it can be [found +//! here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. //! //! ## call_data //! @@ -150,53 +180,63 @@ //! are typically provided as values to the inner enum. //! //! Information about the pallets that exist for a chain (including their indexes), the calls -//! available in each pallet (including their indexes), and the arguments required for each call -//! can be found in the metadata for the chain. For V15 metadata, this information -//! [is here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. +//! available in each pallet (including their indexes), and the arguments required for each call can +//! be found in the metadata for the chain. For V15 metadata, this information [is +//! here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. //! //! # The Signed Payload Format //! -//! All extrinsics submitted to a node from the outside world (also known as _transactions_) need to -//! be _signed_. The data that needs to be signed for some extrinsic is called the _signed payload_, -//! and its shape is described by the following pseudo-code: +//! All _signed_ extrinsics submitted to a node from the outside world (also known as +//! _transactions_) need to be _signed_. The data that needs to be signed for some extrinsic is +//! called the _signed payload_, and its shape is described by the following pseudo-code: //! //! ```text -//! signed_payload = concat( -//! call_data, -//! signed_extensions_extra, -//! signed_extensions_additional, +//! signed_payload = blake2_256( +//! concat( +//! call_data, +//! transaction_extensions_extra, +//! transaction_extensions_implicit, +//! ) //! ) -//! -//! if length(signed_payload) > 256 { -//! signed_payload = blake2_256(signed_payload) -//! } //! ``` //! -//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as described -//! above. `signed_extensions_additional` is constructed by SCALE encoding the -//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each -//! signed extension that the chain is using, in order. +//! The bytes representing `call_data` and `transaction_extensions_extra` can be obtained as +//! descibed above. `transaction_extensions_implicit` is constructed by SCALE encoding the +//! ["implicit" data][sp_runtime::traits::TransactionExtension::Implicit] for each transaction +//! extension that the chain is using, in order. +//! +//! Once we've concatenated those together, we hash the result using a Blake2 256bit hasher. +//! +//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload for +//! us, given `call_data` and a tuple of transaction extensions. //! -//! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in -//! length using a Blake2 256bit hasher. +//! # The General Transaction Format //! -//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload -//! for us, given `call_data` and a tuple of signed extensions. +//! A General transaction does not have a signature method hardcoded in the check logic of the +//! extrinsic, such as a traditionally signed transaction. Instead, general transactions should have +//! one or more extensions in the transaction extension pipeline that auhtorize origins in some way, +//! one of which could be the traditional signature check that happens for all signed transactions +//! in the [Checkable](sp_runtime::traits::Checkable) implementation of +//! [UncheckedExtrinsic](sp_runtime::generic::UncheckedExtrinsic). Therefore, it is up to each +//! extension to define the format of the payload it will try to check and authorize the right +//! origin type. For an example, look into the [authorization example pallet +//! extensions](pallet_example_authorization_tx_extension::extensions) //! //! # Example Encoding //! -//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic -//! as follows: +//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic as +//! follows: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", encoding_example)] #[docify::export] pub mod call_data { use codec::{Decode, Encode}; + use sp_runtime::{traits::Dispatchable, DispatchResultWithInfo}; // The outer enum composes calls within // different pallets together. We have two // pallets, "PalletA" and "PalletB". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum Call { #[codec(index = 0)] PalletA(PalletACall), @@ -207,23 +247,33 @@ pub mod call_data { // An inner enum represents the calls within // a specific pallet. "PalletA" has one call, // "Foo". - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletACall { #[codec(index = 0)] Foo(String), } - #[derive(Encode, Decode)] + #[derive(Encode, Decode, Clone)] pub enum PalletBCall { #[codec(index = 0)] Bar(String), } + + impl Dispatchable for Call { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) + } + } } #[docify::export] pub mod encoding_example { use super::call_data::{Call, PalletACall}; - use crate::reference_docs::signed_extensions::signed_extensions_example; + use crate::reference_docs::transaction_extensions::transaction_extensions_example; use codec::Encode; use sp_core::crypto::AccountId32; use sp_keyring::sr25519::Keyring; @@ -232,34 +282,40 @@ pub mod encoding_example { MultiAddress, MultiSignature, }; - // Define some signed extensions to use. We'll use a couple of examples - // from the signed extensions reference doc. - type SignedExtensions = - (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + // Define some transaction extensions to use. We'll use a couple of examples + // from the transaction extensions reference doc. + type TransactionExtensions = ( + transaction_extensions_example::AddToPayload, + transaction_extensions_example::AddToSignaturePayload, + ); // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set // the address and signature type to those used on Polkadot, use our custom - // `Call` type, and use our custom set of `SignedExtensions`. - type Extrinsic = - UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + // `Call` type, and use our custom set of `TransactionExtensions`. + type Extrinsic = UncheckedExtrinsic< + MultiAddress, + Call, + MultiSignature, + TransactionExtensions, + >; pub fn encode_demo_extrinsic() -> Vec { // The "from" address will be our Alice dev account. let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); - // We provide some values for our expected signed extensions. - let signed_extensions = ( - signed_extensions_example::AddToPayload(1), - signed_extensions_example::AddToSignaturePayload, + // We provide some values for our expected transaction extensions. + let transaction_extensions = ( + transaction_extensions_example::AddToPayload(1), + transaction_extensions_example::AddToSignaturePayload, ); // Construct our call data: let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); // The signed payload. This takes care of encoding the call_data, - // signed_extensions_extra and signed_extensions_additional, and hashing + // transaction_extensions_extra and transaction_extensions_implicit, and hashing // the result if it's > 256 bytes: - let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + let signed_payload = SignedPayload::new(call_data.clone(), transaction_extensions.clone()); // Sign the signed payload with our Alice dev account's private key, // and wrap the signature into the expected type: @@ -269,7 +325,7 @@ pub mod encoding_example { }; // Now, we can build and encode our extrinsic: - let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + let ext = Extrinsic::new_signed(call_data, from_address, signature, transaction_extensions); let encoded_ext = ext.encode(); encoded_ext diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs index cf9e58791492330d827630e76ebbb51df37ecafd..68d7d31f67f3e855ec66a7d9d27b281bfe8a46c8 100644 --- a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs +++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs @@ -1,23 +1,212 @@ //! # FRAME Benchmarking and Weights. //! -//! Notes: +//! This reference doc explores the concept of weights within Polkadot-SDK runtimes, and more +//! specifically how FRAME-based runtimes handle it. //! -//! On Weight as a concept. +//! ## Metering //! -//! - Why we need it. Super important. People hate this. We need to argue why it is worth it. -//! - Axis of weight: PoV + Time. -//! - pre dispatch weight vs. metering and post dispatch correction. -//! - mention that we will do this for PoV -//! - you can manually refund using `DispatchResultWithPostInfo`. -//! - Technically you can have weights with any benchmarking framework. You just need one number to -//! be computed pre-dispatch. But FRAME gives you a framework for this. -//! - improve documentation of `#[weight = ..]` and `#[pallet::weight(..)]`. All syntax variation -//! should be covered. +//! The existence of "weight" as a concept in Polkadot-SDK is a direct consequence of the usage of +//! WASM as a virtual machine. Unlike a metered virtual machine like EVM, where every instruction +//! can have a (fairly) deterministic "cost" (also known as "gas price") associated with it, WASM is +//! a stack machine with more complex instruction set, and more unpredictable execution times. This +//! means that unlike EVM, it is not possible to implement a "metering" system in WASM. A metering +//! system is one in which instructions are executed one by one, and the cost/gas is stored in an +//! accumulator. The execution may then halt once a gas limit is reached. //! -//! On FRAME benchmarking machinery: +//! In Polkadot-SDK, the WASM runtime is not assumed to be metered. //! -//! - Component analysis, why everything must be linear. -//! - How to write benchmarks, how you must think of worst case. -//! - How to run benchmarks. +//! ## Trusted Code //! -//! - +//! Another important difference is that EVM is mostly used to express smart contracts, which are +//! foreign and untrusted codes from the perspective of the blockchain executing them. In such +//! cases, metering is crucial, in order to ensure a malicious code cannot consume more gas than +//! expected. +//! +//! This assumption does not hold about the runtime of Polkadot-SDK-based blockchains. The runtime +//! is trusted code, and it is assumed to be written by the same team/developers who are running the +//! blockchain itself. Therefore, this assumption of "untrusted foreign code" does not hold. +//! +//! This is why the runtime can opt for a more performant, more flexible virtual machine like WASM, +//! and get away without having metering. +//! +//! ## Benchmarking +//! +//! With the matter of untrusted code execution out of the way, the need for strict metering goes +//! out of the way. Yet, it would still be very beneficial for block producers to be able to know an +//! upper bound on how much resources a operation is going to consume before actually executing that +//! operation. This is why FRAME has a toolkit for benchmarking pallets: So that this upper bound +//! can be empirically determined. +//! +//! > Note: Benchmarking is a static analysis: It is all about knowing the upper bound of how much +//! > resources an operation takes statically, without actually executing it. In the context of +//! > FRAME extrinsics, this static-ness is expressed by the keyword "pre-dispatch". +//! +//! To understand why this upper bound is needed, consider the following: A block producer knows +//! they have 20ms left to finish producing their block, and wishes to include more transactions in +//! the block. Yet, in a metered environment, it would not know which transaction is likely to fit +//! the 20ms. In a benchmarked environment, it can examine the transactions for their upper bound, +//! and include the ones that are known to fit based on the worst case. +//! +//! The benchmarking code can be written as a part of FRAME pallet, using the macros provided in +//! [`frame_benchmarking`]. See any of the existing pallets in `polkadot-sdk`, or the pallets in our +//! [`crate::polkadot_sdk::templates`] for examples. +//! +//! ## Weight +//! +//! Finally, [`sp_weights::Weight`] is the output of the benchmarking process. It is a +//! two-dimensional data structure that demonstrates the resources consumed by a given block of +//! code (for example, a transaction). The two dimensions are: +//! +//! * reference time: The time consumed in pico-seconds, on a reference hardware. +//! * proof size: The amount of storage proof necessary to re-execute the block of code. This is +//! mainly needed for parachain <> relay-chain verification. +//! +//! ## How To Write Benchmarks: Worst Case +//! +//! The most important detail about writing benchmarking code is that it must be written such that +//! it captures the worst case execution of any block of code. +//! +//! Consider: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer)] +//! +//! If this block of code is to be benchmarked, then the benchmarking code must be written such that +//! it captures the worst case. +//! +//! ## Gluing Pallet Benchmarking with Runtime +//! +//! FRAME pallets are mandated to provide their own benchmarking code. Runtimes contain the +//! boilerplate needed to run these benchmarking (see [Running Benchmarks +//! below](#running-benchmarks)). The outcome of running these benchmarks are meant to be fed back +//! into the pallet via a conventional `trait WeightInfo` on `Config`: +#![doc = docify::embed!("src/reference_docs/frame_benchmarking_weight.rs", WeightInfo)] +//! +//! Then, individual functions of this trait are the final values that we assigned to the +//! [`frame::pallet_macros::weight`] attribute: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer_2)] +//! +//! ## Manual Refund +//! +//! Back to the assumption of writing benchmarks for worst case: Sometimes, the pre-dispatch weight +//! significantly differ from the post-dispatch actual weight consumed. This can be expressed with +//! the following FRAME syntax: +#![doc = docify::embed!("./src/reference_docs/frame_benchmarking_weight.rs", simple_transfer_3)] +//! +//! ## Running Benchmarks +//! +//! Two ways exist to run the benchmarks of a runtime. +//! +//! 1. The old school way: Most Polkadot-SDK based nodes (such as the ones integrated in +//! [`templates`]) have an a `benchmark` subcommand integrated into themselves. +//! 2. The more [`crate::reference_docs::omni_node`] compatible way of running the benchmarks would +//! be using [`frame-omni-bencher`] CLI, which only relies on a runtime. +//! +//! Note that by convention, the runtime and pallets always have their benchmarking code feature +//! gated as behind `runtime-benchmarks`. So, the runtime should be compiled with `--features +//! runtime-benchmarks`. +//! +//! ## Automatic Refund of `proof_size`. +//! +//! A new feature in FRAME allows the runtime to be configured for "automatic refund" of the proof +//! size weight. This is very useful for maximizing the throughput of parachains. Please see: +//! [`crate::guides::enable_pov_reclaim`]. +//! +//! ## Summary +//! +//! Polkadot-SDK runtimes use a more performant VM, namely WASM, which does not have metering. In +//! return they have to be benchmarked to provide an upper bound on the resources they consume. This +//! upper bound is represented as [`sp_weights::Weight`]. +//! +//! ## Future: PolkaVM +//! +//! With the transition of Polkadot relay chain to [JAM], a set of new features are being +//! introduced, one of which being a new virtual machine named [PolkaVM] that is as flexible as +//! WASM, but also capable of metering. This might alter the future of benchmarking in FRAME and +//! Polkadot-SDK, rendering them not needed anymore once PolkaVM is fully integrated into +//! Polkadot-sdk. For a basic explanation of JAM and PolkaVM, see [here](https://blog.kianenigma.com/posts/tech/demystifying-jam/#pvm). +//! +//! +//! [`frame-omni-bencher`]: https://crates.io/crates/frame-omni-bencher +//! [`templates`]: crate::polkadot_sdk::templates +//! [PolkaVM]: https://github.com/koute/polkavm +//! [JAM]: https://graypaper.com + +#[frame::pallet(dev_mode)] +#[allow(unused_variables, unreachable_code, unused, clippy::diverging_sub_expression)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub trait WeightInfo { + fn simple_transfer() -> Weight; + } + + #[pallet::config] + pub trait Config: frame_system::Config { + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + #[docify::export] + #[pallet::weight(10_000)] + pub fn simple_transfer( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResult { + let destination_exists = todo!(); + if destination_exists { + // simpler code path + } else { + // more complex code path + } + Ok(()) + } + + #[docify::export] + #[pallet::weight(T::WeightInfo::simple_transfer())] + pub fn simple_transfer_2( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResult { + let destination_exists = todo!(); + if destination_exists { + // simpler code path + } else { + // more complex code path + } + Ok(()) + } + + #[docify::export] + // This is the worst-case, pre-dispatch weight. + #[pallet::weight(T::WeightInfo::simple_transfer())] + pub fn simple_transfer_3( + origin: OriginFor, + destination: T::AccountId, + amount: u32, + ) -> DispatchResultWithPostInfo { + // ^^ Notice the new return type + let destination_exists = todo!(); + if destination_exists { + // simpler code path + // Note that need for .into(), to convert `()` to `PostDispatchInfo` + // See: https://paritytech.github.io/polkadot-sdk/master/frame_support/dispatch/struct.PostDispatchInfo.html#impl-From%3C()%3E-for-PostDispatchInfo + Ok(().into()) + } else { + // more complex code path + let actual_weight = + todo!("this can likely come from another benchmark that is NOT the worst case"); + let pays_fee = todo!("You can set this to `Pays::Yes` or `Pays::No` to change if this transaction should pay fees"); + Ok(frame::deps::frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee, + }) + } + } + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs index e99106ade878137ed308aa05c3bb43236727a3a4..ec7196cea662631ba05536ad6c15f3428457e6e1 100644 --- a/docs/sdk/src/reference_docs/frame_runtime_types.rs +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -134,7 +134,7 @@ //! * [`crate::reference_docs::frame_origin`] explores further details about the usage of //! `RuntimeOrigin`. //! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an -//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! extrinsic. See [`crate::reference_docs::transaction_extensions`] for more information. //! * See the documentation of [`construct_runtime`]. //! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). //! diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 7f2edb08d46e98e2cdc56944b4a1e54a097b8719..e47eece784c4ce6b34ff9507873e0df2495dd912 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -40,9 +40,12 @@ pub mod runtime_vs_smart_contract; /// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. pub mod extrinsic_encoding; -/// Learn about the signed extensions that form a part of extrinsics. +/// Deprecated in favor of transaction extensions. pub mod signed_extensions; +/// Learn about the transaction extensions that form a part of extrinsics. +pub mod transaction_extensions; + /// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; @@ -75,7 +78,6 @@ pub mod frame_system_accounts; pub mod development_environment_advice; /// Learn about benchmarking and weight. -// TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 pub mod frame_benchmarking_weight; /// Learn about the token-related logic in FRAME and how to apply it to your use case. @@ -106,3 +108,6 @@ pub mod umbrella_crate; /// Learn about how to create custom RPC endpoints and runtime APIs. pub mod custom_runtime_api_rpc; + +/// The [`polkadot-omni-node`](https://crates.io/crates/polkadot-omni-node) and its related binaries. +pub mod omni_node; diff --git a/docs/sdk/src/reference_docs/omni_node.rs b/docs/sdk/src/reference_docs/omni_node.rs new file mode 100644 index 0000000000000000000000000000000000000000..44d63704a45856370021780c5376d8763af090dd --- /dev/null +++ b/docs/sdk/src/reference_docs/omni_node.rs @@ -0,0 +1,185 @@ +//! # (Omni) Node +//! +//! This reference doc elaborates on what a Polkadot-SDK/Substrate node software is, and what +//! various ways exist to run one. +//! +//! The node software, as denoted in [`crate::reference_docs::wasm_meta_protocol`], is everything in +//! a blockchain other than the WASM runtime. It contains common components such as the database, +//! networking, RPC server and consensus. Substrate-based nodes are native binaries that are +//! compiled down from the Rust source code. The `node` folder in any of the [`templates`] are +//! examples of this source. +//! +//! > Note: A typical node also contains a lot of other tools (exposed as subcommands) that are +//! > useful for operating a blockchain, such as the ones noted in +//! > [`polkadot_omni_node_lib::cli::Cli::subcommand`]. +//! +//! ## Node <> Runtime Interdependence +//! +//! While in principle the node can be mostly independent of the runtime, for various reasons, such +//! as the [native runtime](crate::reference_docs::wasm_meta_protocol#native-runtime), the node and +//! runtime were historically tightly linked together. Another reason is that the node and the +//! runtime need to be in agreement about which consensus algorithm they use, as described +//! [below](#consensus-engine). +//! +//! Specifically, the node relied on the existence of a linked runtime, and *could only reliably run +//! that runtime*. This is why if you look at any of the [`templates`], they are all composed of a +//! node, and a runtime. +//! +//! Moreover, the code and API of each of these nodes was historically very advanced, and tailored +//! towards those who wish to customize many of the node components at depth. +//! +//! > The notorious `service.rs` in any node template is a good example of this. +//! +//! A [trend](https://github.com/paritytech/polkadot-sdk/issues/62) has already been undergoing in +//! order to de-couple the node and the runtime for a long time. The north star of this effort is +//! twofold : +//! +//! 1. develop what can be described as an "omni-node": A node that can run most runtimes. +//! 2. provide a cleaner abstraction for creating a custom node. +//! +//! While a single omni-node running *all possible runtimes* is not feasible, the +//! [`polkadot-omni-node`] is an attempt at creating the former, and the [`polkadot_omni_node_lib`] +//! is the latter. +//! +//! > Note: The OmniNodes are mainly focused on the development needs of **Polkadot +//! > parachains ONLY**, not (Substrate) solo-chains. For the time being, solo-chains are not +//! > supported by the OmniNodes. This might change in the future. +//! +//! ## Types of Nodes +//! +//! With the emergence of the OmniNodes, let's look at the various Node options available to a +//! builder. +//! +//! ### [`polkadot-omni-node`] +//! +//! [`polkadot-omni-node`] is a white-labeled binary, released as a part of Polkadot SDK that is +//! capable of meeting the needs of most Polkadot parachains. +//! +//! It can act as the collator of a parachain in production, with all the related auxillary +//! functionalities that a normal collator node has: RPC server, archiving state, etc. Moreover, it +//! can also run the wasm blob of the parachain locally for testing and development. +//! +//! ### [`polkadot_omni_node_lib`] +//! +//! [`polkadot_omni_node_lib`] is the library version of the above, which can be used to create a +//! fresh parachain node, with a some limited configuration options using a lean API. +//! +//! ### Old School Nodes +//! +//! The existing node architecture, as seen in the [`templates`], is still available for those who +//! want to have full control over the node software. +//! +//! ### Summary +//! +//! We can summarize the choices for the node software of any given user of Polkadot-SDK, wishing to +//! deploy a parachain into 3 categories: +//! +//! 1. **Use the [`polkadot-omni-node`]**: This is the easiest way to get started, and is the most +//! likely to be the best choice for most users. +//! * can run almost any runtime with [`--dev-block-time`] +//! 2. **Use the [`polkadot_omni_node_lib`]**: This is the best choice for those who want to have +//! slightly more control over the node software, such as embedding a custom chain-spec. +//! 3. **Use the old school nodes**: This is the best choice for those who want to have full control +//! over the node software, such as changing the consensus engine, altering the transaction pool, +//! and so on. +//! +//! ## _OmniTools_: User Journey +//! +//! All in all, the user journey of a team/builder, in the OmniNode world is as follows: +//! +//! * The [`templates`], most notably the [`parachain-template`] is the canonical starting point. +//! That being said, the node code of the templates (which may be eventually +//! removed/feature-gated) is no longer of relevance. The only focus is in the runtime, and +//! obtaining a `.wasm` file. References: +//! * [`crate::guides::your_first_pallet`] +//! * [`crate::guides::your_first_runtime`] +//! * If need be, the weights of the runtime need to be updated using `frame-omni-bencher`. +//! References: +//! * [`crate::reference_docs::frame_benchmarking_weight`] +//! * Next, [`chain-spec-builder`] is used to generate a `chain_spec.json`, either for development, +//! or for production. References: +//! * [`crate::reference_docs::chain_spec_genesis`] +//! * For local development, the following options are available: +//! * `polkadot-omni-node` (notably, with [`--dev-block-time`]). References: +//! * [`crate::guides::your_first_node`] +//! * External tools such as `chopsticks`, `zombienet`. +//! * See the `README.md` file of the `polkadot-sdk-parachain-template`. +//! * For production `polkadot-omni-node` can be used out of the box. +//! * For further customization [`polkadot_omni_node_lib`] can be used. +//! +//! ## Appendix +//! +//! This section describes how the interdependence between the node and the runtime is related to +//! the consensus engine. This information is useful for those who want to understand the +//! historical context of the node and the runtime. +//! +//! ### Consensus Engine +//! +//! In any given substrate-based chain, both the node and the runtime will have their own +//! opinion/information about what consensus engine is going to be used. +//! +//! In practice, the majority of the implementation of any consensus engine is in the node side, but +//! the runtime also typically needs to expose a custom runtime-api to enable the particular +//! consensus engine to work, and that particular runtime-api is implemented by a pallet +//! corresponding to that consensus engine. +//! +//! For example, taking a snippet from [`solochain_template_runtime`], the runtime has to provide +//! this additional runtime-api (compared to [`minimal_template_runtime`]), if the node software is +//! configured to use the Aura consensus engine: +//! +//! ```text +//! impl sp_consensus_aura::AuraApi for Runtime { +//! fn slot_duration() -> sp_consensus_aura::SlotDuration { +//! ... +//! } +//! fn authorities() -> Vec { +//! ... +//! } +//! } +//! ``` +//! +//! For simplicity, we can break down "consensus" into two main parts: +//! +//! * Block Authoring: Deciding who gets to produce the next block. +//! * Finality: Deciding when a block is considered final. +//! +//! For block authoring, there are a number of options: +//! +//! * [`sc_consensus_manual_seal`]: Useful for testing, where any node can produce a block at any +//! time. This is often combined with a fixed interval at which a block is produced. +//! * [`sc_consensus_aura`]/[`pallet_aura`]: A simple round-robin block authoring mechanism. +//! * [`sc_consensus_babe`]/[`pallet_babe`]: A more advanced block authoring mechanism, capable of +//! anonymizing the next block author. +//! * [`sc_consensus_pow`]: Proof of Work block authoring. +//! +//! For finality, there is one main option shipped with polkadot-sdk: +//! +//! * [`sc_consensus_grandpa`]/[`pallet_grandpa`]: A finality gadget that uses a voting mechanism to +//! decide when a block +//! +//! **The most important lesson here is that the node and the runtime must have matching consensus +//! components.** +//! +//! ### Consequences for OmniNode +//! +//! +//! The consequence of the above is that anyone using the OmniNode must also be aware of the +//! consensus system used in the runtime, and be aware if it is matching that of the OmniNode or +//! not. For the time being, [`polkadot-omni-node`] only supports: +//! +//! * Parachain-based Aura consensus, with 6s async-backing block-time, and before full elastic +//! scaling). [`polkadot_omni_node_lib::cli::Cli::experimental_use_slot_based`] for fixed factor +//! scaling (a step +//! * Ability to run any runtime with [`--dev-block-time`] flag. This uses +//! [`sc_consensus_manual_seal`] under the hood, and has no restrictions on the runtime's +//! consensus. +//! +//! [This](https://github.com/paritytech/polkadot-sdk/issues/5565) future improvement to OmniNode +//! aims to make such checks automatic. +//! +//! +//! [`templates`]: crate::polkadot_sdk::templates +//! [`parachain-template`]: https://github.com/paritytech/polkadot-sdk-parachain-template +//! [`--dev-block-time`]: polkadot_omni_node_lib::cli::Cli::dev_block_time +//! [`polkadot-omni-node`]: https://crates.io/crates/polkadot-omni-node +//! [`chain-spec-builder`]: https://crates.io/crates/staging-chain-spec-builder diff --git a/docs/sdk/src/reference_docs/signed_extensions.rs b/docs/sdk/src/reference_docs/signed_extensions.rs index c644aeaa41650817cd02b0dcc7a97e80c2b19bd0..6e44fea88ded1fb402860c47e5d3232867580a69 100644 --- a/docs/sdk/src/reference_docs/signed_extensions.rs +++ b/docs/sdk/src/reference_docs/signed_extensions.rs @@ -1,131 +1,2 @@ -//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic -//! format with custom data that can be checked by the runtime. -//! -//! # FRAME provided signed extensions -//! -//! FRAME by default already provides the following signed extensions: -//! -//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same -//! network. Determined based on genesis. -//! -//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable -//! mortality. -//! -//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a -//! transaction is not the *all zero account* (all bytes of the accountid are zero). -//! -//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay -//! of transactions and to provide ordering of transactions. -//! -//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for -//! the currently active runtime. -//! -//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the -//! correct encoding of the call. -//! -//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block -//! before dispatching it. -//! -//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges -//! transaction fees from the signer based on the weight of the call using the native token. -//! -//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction -//! fees from the signer based on the weight of the call using any supported asset (including the -//! native token). -//! -//! - [`ChargeAssetTxPayment`(using -//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction -//! fees from the signer based on the weight of the call using any supported asset (including the -//! native token). The asset is converted to the native token using a pool. -//! -//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions -//! to be processed without paying any fee. This requires that the `call` that should be -//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) -//! attribute. -//! -//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions -//! to include the so-called metadata hash. This is required by chains to support the generic -//! Ledger application and other similar offline wallets. -//! -//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A -//! signed extension for parachains that reclaims unused storage weight after executing a -//! transaction. -//! -//! For more information about these extensions, follow the link to the type documentation. -//! -//! # Building a custom signed extension -//! -//! Defining a couple of very simple signed extensions looks like the following: -#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] - -#[docify::export] -pub mod signed_extensions_example { - use codec::{Decode, Encode}; - use scale_info::TypeInfo; - use sp_runtime::traits::SignedExtension; - - // This doesn't actually check anything, but simply allows - // some arbitrary `u32` to be added to the extrinsic payload - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToPayload(pub u32); - - impl SignedExtension for AddToPayload { - const IDENTIFIER: &'static str = "AddToPayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(()) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } - - // This is the opposite; nothing will be added to the extrinsic payload, - // but the AdditionalSigned type (`1234u32`) will be added to the - // payload to be signed. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] - pub struct AddToSignaturePayload; - - impl SignedExtension for AddToSignaturePayload { - const IDENTIFIER: &'static str = "AddToSignaturePayload"; - type AccountId = (); - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - - fn additional_signed( - &self, - ) -> Result< - Self::AdditionalSigned, - sp_runtime::transaction_validity::TransactionValidityError, - > { - Ok(1234) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } - } -} +//! `SignedExtension`s are deprecated in favor of +//! [`TransactionExtension`s](crate::reference_docs::transaction_extensions). diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..0f8198e8372d35975f69d811d545bf20b900407f --- /dev/null +++ b/docs/sdk/src/reference_docs/transaction_extensions.rs @@ -0,0 +1,103 @@ +//! Transaction extensions are, briefly, a means for different chains to extend the "basic" +//! extrinsic format with custom data that can be checked by the runtime. +//! +//! # FRAME provided transaction extensions +//! +//! FRAME by default already provides the following transaction extensions: +//! +//! - [`CheckGenesis`](frame_system::CheckGenesis): Ensures that a transaction was sent for the same +//! network. Determined based on genesis. +//! +//! - [`CheckMortality`](frame_system::CheckMortality): Extends a transaction with a configurable +//! mortality. +//! +//! - [`CheckNonZeroSender`](frame_system::CheckNonZeroSender): Ensures that the sender of a +//! transaction is not the *all zero account* (all bytes of the accountid are zero). +//! +//! - [`CheckNonce`](frame_system::CheckNonce): Extends a transaction with a nonce to prevent replay +//! of transactions and to provide ordering of transactions. +//! +//! - [`CheckSpecVersion`](frame_system::CheckSpecVersion): Ensures that a transaction was built for +//! the currently active runtime. +//! +//! - [`CheckTxVersion`](frame_system::CheckTxVersion): Ensures that the transaction signer used the +//! correct encoding of the call. +//! +//! - [`CheckWeight`](frame_system::CheckWeight): Ensures that the transaction fits into the block +//! before dispatching it. +//! +//! - [`ChargeTransactionPayment`](pallet_transaction_payment::ChargeTransactionPayment): Charges +//! transaction fees from the signer based on the weight of the call using the native token. +//! +//! - [`ChargeAssetTxPayment`](pallet_asset_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). +//! +//! - [`ChargeAssetTxPayment`(using +//! conversion)](pallet_asset_conversion_tx_payment::ChargeAssetTxPayment): Charges transaction +//! fees from the signer based on the weight of the call using any supported asset (including the +//! native token). The asset is converted to the native token using a pool. +//! +//! - [`SkipCheckIfFeeless`](pallet_skip_feeless_payment::SkipCheckIfFeeless): Allows transactions +//! to be processed without paying any fee. This requires that the `call` that should be +//! dispatched is augmented with the [`feeless_if`](frame_support::pallet_macros::feeless_if) +//! attribute. +//! +//! - [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash): Extends transactions +//! to include the so-called metadata hash. This is required by chains to support the generic +//! Ledger application and other similar offline wallets. +//! +//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A +//! transaction extension for parachains that reclaims unused storage weight after executing a +//! transaction. +//! +//! For more information about these extensions, follow the link to the type documentation. +//! +//! # Building a custom transaction extension +//! +//! Defining a couple of very simple transaction extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/transaction_extensions.rs", transaction_extensions_example)] + +#[docify::export] +pub mod transaction_extensions_example { + use codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{Dispatchable, TransactionExtension}, + transaction_validity::TransactionValidityError, + }; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl TransactionExtension for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type Implicit = (); + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; weight validate prepare); + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the Implicit type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl TransactionExtension for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type Implicit = u32; + + fn implicit(&self) -> Result { + Ok(1234) + } + type Pre = (); + type Val = (); + + impl_tx_ext_default!(Call; weight validate prepare); + } +} diff --git a/docs/sdk/src/reference_docs/umbrella_crate.rs b/docs/sdk/src/reference_docs/umbrella_crate.rs index 1074cde37693d3aed03474ab3b89743ec538c188..8d9bcdfc208995330f9c0c18e5dd2af3c02b659d 100644 --- a/docs/sdk/src/reference_docs/umbrella_crate.rs +++ b/docs/sdk/src/reference_docs/umbrella_crate.rs @@ -5,6 +5,7 @@ //! crate. This helps with selecting the right combination of crate versions, since otherwise 3rd //! party tools are needed to select a compatible set of versions. //! +//! //! ## Features //! //! The umbrella crate supports no-std builds and can therefore be used in the runtime and node. diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index d124c8fb7eb745e2dec9af0a2dd35889675669b6..7c904e6658e710e84cb77e06b000356f94de6b01 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -203,10 +203,12 @@ where runner.run_node_until_exit(move |config| async move { let hwbench = (!cli.run.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(&database_path); - sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(&database_path); + sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) + }) + }) .flatten(); let database_source = config.database.clone(); diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 551f6a507c8e09769f2534483ba4184e5d49ea81..a48669c248254c406041dba7845264984ef7a8a9 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -49,9 +49,9 @@ use polkadot_primitives::{ CandidateDescriptorV2 as CandidateDescriptor, CandidateEvent, CandidateReceiptV2 as CandidateReceipt, }, - AuthorityDiscoveryId, CandidateCommitments, ExecutorParams, Hash, - PersistedValidationData, PvfExecKind as RuntimePvfExecKind, PvfPrepKind, SessionIndex, - ValidationCode, ValidationCodeHash, ValidatorId, + AuthorityDiscoveryId, CandidateCommitments, ExecutorParams, Hash, PersistedValidationData, + PvfExecKind as RuntimePvfExecKind, PvfPrepKind, SessionIndex, ValidationCode, + ValidationCodeHash, ValidatorId, }; use sp_application_crypto::{AppCrypto, ByteArray}; use sp_keystore::KeystorePtr; diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index e679f34792d0d9978ec0126a0161bb650075eb86..997a347631a0523864b5279af82bcec7e28186ee 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -27,7 +27,7 @@ use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; use polkadot_primitives::{ vstaging::CandidateDescriptorV2, CandidateDescriptor, CoreIndex, GroupIndex, HeadData, - Id as ParaId, SessionInfo, UpwardMessage, ValidatorId, OccupiedCoreAssumption + Id as ParaId, OccupiedCoreAssumption, SessionInfo, UpwardMessage, ValidatorId, }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor, diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index 79658591b6a793bf8a0b24c6472abfc7800b942c..613a514269ee6892e25962d5f95fe9671991f85b 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -38,9 +38,7 @@ use polkadot_node_subsystem_util::{ availability_chunks::availability_chunk_index, runtime::{get_occupied_cores, RuntimeInfo}, }; -use polkadot_primitives::{ - vstaging::OccupiedCore, CandidateHash, CoreIndex, Hash, SessionIndex, -}; +use polkadot_primitives::{vstaging::OccupiedCore, CandidateHash, CoreIndex, Hash, SessionIndex}; use super::{FatalError, Metrics, Result, LOG_TARGET}; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs index fbb3ff4328a51a7e0b726d03750e814840fe1751..35202fc96299b18ebc25cc9012de7b1e7ab357e4 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -110,7 +110,7 @@ impl ValidatorGroupsBuffer { .validators .iter() .enumerate() - .filter_map(|(idx, authority_id)| bits[idx].then_some(authority_id.clone())) + .filter_map(|(idx, authority_id)| bits[idx].then(|| authority_id.clone())) .collect(); if let Some(last_group) = self.group_infos.iter().last() { diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index 79c94cb8c59857880ec4e076ac2aa1c933c81555..e1b2af733b47afc2ae2233d766a10a77ddca3896 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -31,7 +31,9 @@ use polkadot_overseer::{ gen::{FromOrchestra, SpawnedSubsystem}, HeadSupportsParachains, SubsystemError, }; -use polkadot_primitives::{vstaging::CandidateReceiptV2 as CandidateReceipt, Hash, PvfExecKind}; +use polkadot_primitives::{ + vstaging::CandidateReceiptV2 as CandidateReceipt, Hash, PersistedValidationData, +}; use polkadot_primitives_test_helpers::{ dummy_candidate_descriptor, dummy_hash, dummy_validation_code, }; diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 6bd8b46a93c0fc1f5d625dc31e66bf252255d03d..e2e7aa92b11cfd112f268ac963ce6a902d417cfa 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.16.0"; +pub const NODE_VERSION: &'static str = "1.16.1"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 8d50b54b2fdc88bae6d3f132f46b74f4ce4f0efd..3edb3f4dadbed8ac45ebd71814fb552a93227203 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -200,6 +200,7 @@ runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "polkadot-test-client/runtime-benchmarks", @@ -217,7 +218,10 @@ try-runtime = [ "sp-runtime/try-runtime", "westend-runtime?/try-runtime", ] -fast-runtime = ["rococo-runtime?/fast-runtime", "westend-runtime?/fast-runtime"] +fast-runtime = [ + "rococo-runtime?/fast-runtime", + "westend-runtime?/fast-runtime", +] malus = ["full-node"] runtime-metrics = [ diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs index 4dcff2078419c9161ec752dc269f3f6a919748a2..186bea3960e8a92550cb6259c0a626bbc5665d82 100644 --- a/polkadot/node/service/src/benchmarking.rs +++ b/polkadot/node/service/src/benchmarking.rs @@ -189,7 +189,7 @@ fn westend_sign_call( use sp_core::Pair; use westend_runtime as runtime; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -202,11 +202,12 @@ fn westend_sign_call( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), frame_metadata_hash_extension::CheckMetadataHash::::new(false), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -225,7 +226,7 @@ fn westend_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature), - extra, + tx_ext, ) .into() } @@ -243,7 +244,7 @@ fn rococo_sign_call( use rococo_runtime as runtime; use sp_core::Pair; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -256,11 +257,12 @@ fn rococo_sign_call( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), frame_metadata_hash_extension::CheckMetadataHash::::new(false), - ); + ) + .into(); let payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -279,7 +281,7 @@ fn rococo_sign_call( call, sp_runtime::AccountId32::from(acc.public()).into(), polkadot_core_primitives::Signature::Sr25519(signature), - extra, + tx_ext, ) .into() } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index da3ab760ed223f6c826cfb50c8e39c095aaab30e..abba91a38a97c43bca584edb38efd00b6d4654d3 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -759,13 +759,12 @@ pub fn new_full< Some(backoff) }; - // Running approval voting in parallel is enabled by default on all networks except Polkadot and - // Kusama, unless explicitly enabled by the commandline option. + // Running approval voting in parallel is enabled by default on all networks except Polkadot + // unless explicitly enabled by the commandline option. // This is meant to be temporary until we have enough confidence in the new system to enable it // by default on all networks. - let enable_approval_voting_parallel = (!config.chain_spec.is_kusama() && - !config.chain_spec.is_polkadot()) || - enable_approval_voting_parallel; + let enable_approval_voting_parallel = + !config.chain_spec.is_polkadot() || enable_approval_voting_parallel; let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); @@ -1045,7 +1044,7 @@ pub fn new_full< is_validator: role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], - }) + })? .run(client.clone(), task_manager.spawn_handle()) .boxed(), ); @@ -1437,7 +1436,7 @@ pub fn new_chain_ops( } else if config.chain_spec.is_kusama() { chain_ops!(config, None) } else if config.chain_spec.is_westend() { - return chain_ops!(config, None) + return chain_ops!(config, None); } else { chain_ops!(config, None) } @@ -1489,7 +1488,7 @@ pub fn revert_backend( let revertible = blocks.min(best_number - finalized); if revertible == 0 { - return Ok(()) + return Ok(()); } let number = best_number - revertible; diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 8eb6105f98e2571bab0257694ccac49ae47c2ca9..4ef9d88621fb4ee23044a56d7492aad9fa19d0a3 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -71,6 +71,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index aa7295dddc5d2f6b904f86ed1c646ad871db6815..6e09bb9e4310549d904b1d9fb4b01141dd3f96dd 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_service::{ Error, FullClient, IsParachainNode, NewFull, OverseerGen, PrometheusConfig, }; use polkadot_test_runtime::{ - ParasCall, ParasSudoWrapperCall, Runtime, SignedExtra, SignedPayload, SudoCall, + ParasCall, ParasSudoWrapperCall, Runtime, SignedPayload, SudoCall, TxExtension, UncheckedExtrinsic, VERSION, }; @@ -414,7 +414,7 @@ pub fn construct_extrinsic( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -423,10 +423,11 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); + ) + .into(); let raw_payload = SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), VERSION.spec_version, @@ -443,7 +444,7 @@ pub fn construct_extrinsic( function.clone(), polkadot_test_runtime::Address::Id(caller.public().into()), polkadot_primitives::Signature::Sr25519(signature), - extra.clone(), + tx_ext.clone(), ) } diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs index f908632a01cb34fb7cc2d29117723228b8f09af4..fdcb9fe8fb7e344f1574ff156cd2fe4462bf39ef 100644 --- a/polkadot/primitives/src/v8/mod.rs +++ b/polkadot/primitives/src/v8/mod.rs @@ -2093,7 +2093,9 @@ pub struct SchedulerParams { pub lookahead: u32, /// How many cores are managed by the coretime chain. pub num_cores: u32, - /// The max number of times a claim can time out in availability. + /// Deprecated and no longer used by the runtime. + /// Removal is tracked by . + #[deprecated] pub max_availability_timeouts: u32, /// The maximum queue size of the pay as you go module. pub on_demand_queue_max_size: u32, @@ -2104,13 +2106,14 @@ pub struct SchedulerParams { pub on_demand_fee_variability: Perbill, /// The minimum amount needed to claim a slot in the spot pricing queue. pub on_demand_base_fee: Balance, - /// The number of blocks a claim stays in the scheduler's claim queue before getting cleared. - /// This number should go reasonably higher than the number of blocks in the async backing - /// lookahead. + /// Deprecated and no longer used by the runtime. + /// Removal is tracked by . + #[deprecated] pub ttl: BlockNumber, } impl> Default for SchedulerParams { + #[allow(deprecated)] fn default() -> Self { Self { group_rotation_frequency: 1u32.into(), diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index ad082f179b2f008354bf073171912309ff7a0bb1..01b56b31cf20c3bcc931c988a418a379707ee849 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -131,6 +131,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 96c98c45954d3fd29e6ab64fab1e24c829f2d39f..65942c127b1cc05260c67c44b2350cf8b8b4490c 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -665,12 +665,21 @@ mod tests { } ); - impl frame_system::offchain::SendTransactionTypes for Test + impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 32686d1a0bfa46720db1aa14088b7e14f68624bb..2b36c19efce7811b6824640ae3b677c5c8700098 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -33,7 +33,11 @@ use scale_info::TypeInfo; use serde::{self, Deserialize, Deserializer, Serialize, Serializer}; use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; use sp_runtime::{ - traits::{CheckedSub, DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, CheckedSub, DispatchInfoOf, + Dispatchable, TransactionExtension, Zero, + }, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, @@ -51,6 +55,7 @@ pub trait WeightInfo { fn claim_attest() -> Weight; fn attest() -> Weight; fn move_claim() -> Weight; + fn prevalidate_attests() -> Weight; } pub struct TestWeightInfo; @@ -70,6 +75,9 @@ impl WeightInfo for TestWeightInfo { fn move_claim() -> Weight { Weight::zero() } + fn prevalidate_attests() -> Weight { + Weight::zero() + } } /// The kind of statement an account needs to make for a claim to be valid. @@ -400,7 +408,7 @@ pub mod pallet { /// Attest to a statement, needed to finalize the claims process. /// /// WARNING: Insecure unless your chain includes `PrevalidateAttests` as a - /// `SignedExtension`. + /// `TransactionExtension`. /// /// Unsigned Validation: /// A call to attest is deemed valid if the sender has a `Preclaim` registered @@ -611,58 +619,56 @@ impl PrevalidateAttests where ::RuntimeCall: IsSubType>, { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for PrevalidateAttests +impl TransactionExtension for PrevalidateAttests where ::RuntimeCall: IsSubType>, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "PrevalidateAttests"; + type Implicit = (); + type Pre = (); + type Val = (); - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self, call: &T::RuntimeCall) -> Weight { + if let Some(Call::attest { .. }) = call.is_sub_type() { + T::WeightInfo::prevalidate_attests() + } else { + Weight::zero() + } } - // - // The weight of this logic is included in the `attest` dispatchable. - // fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - if let Some(local_call) = call.is_sub_type() { - if let Call::attest { statement: attested_statement } = local_call { - let signer = Preclaims::::get(who) - .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; - if let Some(s) = Signing::::get(signer) { - let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); - ensure!(&attested_statement[..] == s.to_text(), e); - } + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + if let Some(Call::attest { statement: attested_statement }) = call.is_sub_type() { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let signer = Preclaims::::get(who) + .ok_or(InvalidTransaction::Custom(ValidityError::SignerHasNoClaim.into()))?; + if let Some(s) = Signing::::get(signer) { + let e = InvalidTransaction::Custom(ValidityError::InvalidStatement.into()); + ensure!(&attested_statement[..] == s.to_text(), e); } } - Ok(ValidTransaction::default()) + Ok((ValidTransaction::default(), (), origin)) } + + impl_tx_ext_default!(T::RuntimeCall; prepare); } #[cfg(any(test, feature = "runtime-benchmarks"))] @@ -713,8 +719,11 @@ mod tests { }; use pallet_balances; use sp_runtime::{ - traits::Identity, transaction_validity::TransactionLongevity, BuildStorage, - DispatchError::BadOrigin, TokenError, + traits::{DispatchTransaction, Identity}, + transaction_validity::TransactionLongevity, + BuildStorage, + DispatchError::BadOrigin, + TokenError, }; type Block = frame_system::mocking::MockBlock; @@ -1055,8 +1064,8 @@ mod tests { }); let di = c.get_dispatch_info(); assert_eq!(di.pays_fee, Pays::No); - let r = p.validate(&42, &c, &di, 20); - assert_eq!(r, TransactionValidity::Ok(ValidTransaction::default())); + let r = p.validate_only(Some(42).into(), &c, &di, 20); + assert_eq!(r.unwrap().0, ValidTransaction::default()); }); } @@ -1068,13 +1077,13 @@ mod tests { statement: StatementKind::Regular.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&42, &c, &di, 20); + let r = p.validate_only(Some(42).into(), &c, &di, 20); assert!(r.is_err()); let c = RuntimeCall::Claims(ClaimsCall::attest { statement: StatementKind::Saft.to_text().to_vec(), }); let di = c.get_dispatch_info(); - let r = p.validate(&69, &c, &di, 20); + let r = p.validate_only(Some(69).into(), &c, &di, 20); assert!(r.is_err()); }); } @@ -1432,10 +1441,16 @@ mod benchmarking { use super::*; use crate::claims::Call; use frame_benchmarking::{account, benchmarks}; - use frame_support::traits::UnfilteredDispatchable; + use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + traits::UnfilteredDispatchable, + }; use frame_system::RawOrigin; use secp_utils::*; - use sp_runtime::{traits::ValidateUnsigned, DispatchResult}; + use sp_runtime::{ + traits::{DispatchTransaction, ValidateUnsigned}, + DispatchResult, + }; const SEED: u32 = 0; @@ -1471,6 +1486,12 @@ mod benchmarking { } benchmarks! { + where_clause { where ::RuntimeCall: IsSubType> + From>, + ::RuntimeCall: Dispatchable + GetDispatchInfo, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + } + // Benchmark `claim` including `validate_unsigned` logic. claim { let c = MAX_CLAIMS; @@ -1574,24 +1595,9 @@ mod benchmarking { Preclaims::::insert(&account, eth_address); assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); - let call = super::Call::::attest { statement: StatementKind::Regular.to_text().to_vec() }; - // We have to copy the validate statement here because of trait issues... :( - let validate = |who: &T::AccountId, call: &super::Call| -> DispatchResult { - if let Call::attest{ statement: attested_statement } = call { - let signer = Preclaims::::get(who).ok_or("signer has no claim")?; - if let Some(s) = Signing::::get(signer) { - ensure!(&attested_statement[..] == s.to_text(), "invalid statement"); - } - } - Ok(()) - }; - let call_enc = call.encode(); - }: { - let call = as Decode>::decode(&mut &*call_enc) - .expect("call is encoded above, encoding must be correct"); - validate(&account, &call)?; - call.dispatch_bypass_filter(RawOrigin::Signed(account).into())?; - } + let stmt = StatementKind::Regular.to_text().to_vec(); + }: + _(RawOrigin::Signed(account), stmt) verify { assert_eq!(Claims::::get(eth_address), None); } @@ -1649,6 +1655,42 @@ mod benchmarking { } } + prevalidate_attests { + let c = MAX_CLAIMS; + + for i in 0 .. c / 2 { + create_claim::(c)?; + create_claim_attest::(u32::MAX - c)?; + } + + let ext = PrevalidateAttests::::new(); + let call = super::Call::attest { + statement: StatementKind::Regular.to_text().to_vec(), + }; + let call: ::RuntimeCall = call.into(); + let info = call.get_dispatch_info(); + let attest_c = u32::MAX - c; + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let eth_address = eth(&secret_key); + let account: T::AccountId = account("user", c, SEED); + let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); + let statement = StatementKind::Regular; + let signature = sig::(&secret_key, &account.encode(), statement.to_text()); + super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, Some(statement))?; + Preclaims::::insert(&account, eth_address); + assert_eq!(Claims::::get(eth_address), Some(VALUE.into())); + }: { + assert!(ext.test_run( + RawOrigin::Signed(account).into(), + &call, + &info, + 0, + |_| { + Ok(Default::default()) + } + ).unwrap().is_ok()); + } + impl_benchmark_test_suite!( Pallet, crate::claims::tests::new_test_ext(), diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 7a689a517eaa2ab97cd827890a0ffeb60bb59bb6..bfeed04a919f0d9ee7d821687b5235c89a7ceac4 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -98,12 +98,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } use crate::{auctions::Error as AuctionsError, crowdloan::Error as CrowdloanError}; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 07f02e92656120267272967d1d86ee3d9181da92..2ead621dedf0cb7b4b4b08da579035a474e22d2e 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -752,12 +752,21 @@ mod tests { } ); - impl frame_system::offchain::SendTransactionTypes for Test + impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } const NORMAL_RATIO: Perbill = Perbill::from_percent(75); diff --git a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs index 7ee76600b42c07bdbb011ddad1d238ec95b89bcf..33a36a1bb2ea6044a721b0ced6de57c12abc7260 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/mod.rs @@ -318,9 +318,12 @@ impl AssignmentProvider> for Pallet { Assignment::Bulk(para_id) } - fn session_core_count() -> u32 { - let config = configuration::ActiveConfig::::get(); - config.scheduler_params.num_cores + fn assignment_duplicated(assignment: &Assignment) { + match assignment { + Assignment::Pool { para_id, core_index } => + on_demand::Pallet::::assignment_duplicated(*para_id, *core_index), + Assignment::Bulk(_) => {}, + } } } diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index e7994b8ef820773b4f310b6285303263170a2638..25007f0eed6aac023836456b95fef41f6d205044 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -26,7 +26,6 @@ use crate::{ paras::{ParaGenesisArgs, ParaKind}, scheduler::common::Assignment, }; -use alloc::collections::btree_map::BTreeMap; use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; use pallet_broker::TaskId; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; @@ -78,7 +77,7 @@ fn run_to_block( OnDemand::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); + Scheduler::advance_claim_queue(&Default::default()); } } diff --git a/polkadot/runtime/parachains/src/assigner_parachains.rs b/polkadot/runtime/parachains/src/assigner_parachains.rs index 3c735b999cf2313b01150cb64832d0a394ce542e..53edae5c32fc9e8e00d50971f0916818849668dc 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains.rs @@ -63,7 +63,5 @@ impl AssignmentProvider> for Pallet { Assignment::Bulk(para_id) } - fn session_core_count() -> u32 { - paras::Parachains::::decode_len().unwrap_or(0) as u32 - } + fn assignment_duplicated(_: &Assignment) {} } diff --git a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs index 817e43a7138ddbe121e84ea6c02497e07b323469..6e8e185bb48dfc0d70cef6b182b5224a1603d6b3 100644 --- a/polkadot/runtime/parachains/src/assigner_parachains/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_parachains/tests.rs @@ -23,7 +23,6 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, }; -use alloc::collections::btree_map::BTreeMap; use frame_support::{assert_ok, pallet_prelude::*}; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; @@ -71,7 +70,7 @@ fn run_to_block( Scheduler::initializer_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); + Scheduler::advance_claim_queue(&Default::default()); } } diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 1654590d109e97aecc3de062ebb57489274c5026..fa9497f8ccd5cca79781abd9137991866cbbdb39 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -18,7 +18,10 @@ use crate::{ configuration, inclusion, initializer, paras, paras::ParaKind, paras_inherent, - scheduler::{self, common::AssignmentProvider, CoreOccupied, ParasEntry}, + scheduler::{ + self, + common::{Assignment, AssignmentProvider}, + }, session_info, shared, }; use alloc::{ @@ -138,8 +141,6 @@ pub(crate) struct BenchBuilder { /// Make every candidate include a code upgrade by setting this to `Some` where the interior /// value is the byte length of the new code. code_upgrade: Option, - /// Specifies whether the claimqueue should be filled. - fill_claimqueue: bool, /// Cores which should not be available when being populated with pending candidates. unavailable_cores: Vec, /// Use v2 candidate descriptor. @@ -178,7 +179,6 @@ impl BenchBuilder { backed_in_inherent_paras: Default::default(), elastic_paras: Default::default(), code_upgrade: None, - fill_claimqueue: true, unavailable_cores: vec![], candidate_descriptor_v2: false, candidate_modifier: None, @@ -322,13 +322,6 @@ impl BenchBuilder { self.max_validators() / self.max_validators_per_core() } - /// Set whether the claim queue should be filled. - #[cfg(not(feature = "runtime-benchmarks"))] - pub(crate) fn set_fill_claimqueue(mut self, f: bool) -> Self { - self.fill_claimqueue = f; - self - } - /// Get the minimum number of validity votes in order for a backed candidate to be included. #[cfg(feature = "runtime-benchmarks")] pub(crate) fn fallback_min_backing_votes() -> u32 { @@ -340,10 +333,13 @@ impl BenchBuilder { HeadData(vec![0xFF; max_head_size as usize]) } - fn candidate_descriptor_mock(candidate_descriptor_v2: bool) -> CandidateDescriptorV2 { + fn candidate_descriptor_mock( + para_id: ParaId, + candidate_descriptor_v2: bool, + ) -> CandidateDescriptorV2 { if candidate_descriptor_v2 { CandidateDescriptorV2::new( - 0.into(), + para_id, Default::default(), CoreIndex(200), 2, @@ -356,7 +352,7 @@ impl BenchBuilder { } else { // Convert v1 to v2. CandidateDescriptor:: { - para_id: 0.into(), + para_id, relay_parent: Default::default(), collator: junk_collator(), persisted_validation_data_hash: Default::default(), @@ -373,6 +369,7 @@ impl BenchBuilder { /// Create a mock of `CandidatePendingAvailability`. fn candidate_availability_mock( + para_id: ParaId, group_idx: GroupIndex, core_idx: CoreIndex, candidate_hash: CandidateHash, @@ -381,15 +378,16 @@ impl BenchBuilder { candidate_descriptor_v2: bool, ) -> inclusion::CandidatePendingAvailability> { inclusion::CandidatePendingAvailability::>::new( - core_idx, // core - candidate_hash, // hash - Self::candidate_descriptor_mock(candidate_descriptor_v2), // candidate descriptor - commitments, // commitments - availability_votes, // availability votes - Default::default(), // backers - Zero::zero(), // relay parent - One::one(), /* relay chain block this - * was backed in */ + core_idx, // core + candidate_hash, // hash + Self::candidate_descriptor_mock(para_id, candidate_descriptor_v2), /* candidate descriptor */ + commitments, // commitments + availability_votes, /* availability + * votes */ + Default::default(), // backers + Zero::zero(), // relay parent + One::one(), /* relay chain block this + * was backed in */ group_idx, // backing group ) } @@ -416,6 +414,7 @@ impl BenchBuilder { hrmp_watermark: 0u32.into(), }; let candidate_availability = Self::candidate_availability_mock( + para_id, group_idx, core_idx, candidate_hash, @@ -886,14 +885,11 @@ impl BenchBuilder { extra_cores; assert!(used_cores <= max_cores); - let fill_claimqueue = self.fill_claimqueue; // NOTE: there is an n+2 session delay for these actions to take effect. // We are currently in Session 0, so these changes will take effect in Session 2. Self::setup_para_ids(used_cores - extra_cores); - configuration::ActiveConfig::::mutate(|c| { - c.scheduler_params.num_cores = used_cores as u32; - }); + configuration::Pallet::::set_coretime_cores_unchecked(used_cores as u32).unwrap(); let validator_ids = generate_validator_pairs::(self.max_validators()); let target_session = SessionIndex::from(self.target_session); @@ -902,7 +898,7 @@ impl BenchBuilder { let bitfields = builder.create_availability_bitfields( &builder.backed_and_concluding_paras, &builder.elastic_paras, - used_cores, + scheduler::Pallet::::num_availability_cores(), ); let mut backed_in_inherent = BTreeMap::new(); @@ -930,66 +926,57 @@ impl BenchBuilder { assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores - extra_cores); - // Mark all the used cores as occupied. We expect that there are - // `backed_and_concluding_paras` that are pending availability and that there are - // `used_cores - backed_and_concluding_paras ` which are about to be disputed. - let now = frame_system::Pallet::::block_number() + One::one(); - + // Sanity check that the occupied cores reported by the inclusion module are what we expect + // to be. let mut core_idx = 0u32; let elastic_paras = &builder.elastic_paras; - // Assign potentially multiple cores to same parachains, - let cores = all_cores + + let mut occupied_cores = inclusion::Pallet::::get_occupied_cores() + .map(|(core, candidate)| (core, candidate.candidate_descriptor().para_id())) + .collect::>(); + occupied_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0)); + + let mut expected_cores = all_cores .iter() .flat_map(|(para_id, _)| { (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) .map(|_para_local_core_idx| { - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - // Load an assignment into provider so that one is present to pop - let assignment = - ::AssignmentProvider::get_mock_assignment( - CoreIndex(core_idx), - ParaId::from(*para_id), - ); + let old_core_idx = core_idx; core_idx += 1; - CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl)) + (CoreIndex(old_core_idx), ParaId::from(*para_id)) }) - .collect::>>() + .collect::>() }) - .collect::>>(); + .collect::>(); - scheduler::AvailabilityCores::::set(cores); + expected_cores.sort_by(|(core_a, _), (core_b, _)| core_a.0.cmp(&core_b.0)); - core_idx = 0u32; + assert_eq!(expected_cores, occupied_cores); // We need entries in the claim queue for those: all_cores.append(&mut builder.backed_in_inherent_paras.clone()); - if fill_claimqueue { - let cores = all_cores - .keys() - .flat_map(|para_id| { - (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) - .map(|_para_local_core_idx| { - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - // Load an assignment into provider so that one is present to pop - let assignment = - ::AssignmentProvider::get_mock_assignment( - CoreIndex(core_idx), - ParaId::from(*para_id), - ); - - core_idx += 1; - ( - CoreIndex(core_idx - 1), - [ParasEntry::new(assignment, now + ttl)].into(), - ) - }) - .collect::>)>>() - }) - .collect::>>>(); + let mut core_idx = 0u32; + let cores = all_cores + .keys() + .flat_map(|para_id| { + (0..elastic_paras.get(¶_id).cloned().unwrap_or(1)) + .map(|_para_local_core_idx| { + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(core_idx), + ParaId::from(*para_id), + ); - scheduler::ClaimQueue::::set(cores); - } + core_idx += 1; + (CoreIndex(core_idx - 1), [assignment].into()) + }) + .collect::)>>() + }) + .collect::>>(); + + scheduler::ClaimQueue::::set(cores); Bench:: { data: ParachainsInherentData { diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 36888247580e75c51238afc127f6ab81c233bdd1..e5cf7c4d276e8ffc27883f246a18273c41fef4a8 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -337,8 +337,6 @@ pub enum InconsistentError { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, - /// TTL should be bigger than lookahead - LookaheadExceedsTTL, /// Lookahead is zero, while it must be at least 1 for parachains to work. LookaheadZero, /// Passed in queue size for on-demand was too large. @@ -434,10 +432,6 @@ where return Err(InconsistentExecutorParams { inner }) } - if self.scheduler_params.ttl < self.scheduler_params.lookahead.into() { - return Err(LookaheadExceedsTTL) - } - if self.scheduler_params.lookahead == 0 { return Err(LookaheadZero) } @@ -686,18 +680,7 @@ pub mod pallet { Self::set_coretime_cores_unchecked(new) } - /// Set the max number of times a claim may timeout on a core before it is abandoned - #[pallet::call_index(7)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_max_availability_timeouts(origin: OriginFor, new: u32) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.scheduler_params.max_availability_timeouts = new; - }) - } + // Call index 7 used to be `set_max_availability_timeouts`, which was removed. /// Set the parachain validator-group rotation frequency #[pallet::call_index(8)] @@ -1193,18 +1176,8 @@ pub mod pallet { config.scheduler_params.on_demand_target_queue_utilization = new; }) } - /// Set the on demand (parathreads) ttl in the claimqueue. - #[pallet::call_index(51)] - #[pallet::weight(( - T::WeightInfo::set_config_with_block_number(), - DispatchClass::Operational - ))] - pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.scheduler_params.ttl = new; - }) - } + + // Call index 51 used to be `set_on_demand_ttl`, which was removed. /// Set the minimum backing votes threshold. #[pallet::call_index(52)] diff --git a/polkadot/runtime/parachains/src/configuration/migration/v12.rs b/polkadot/runtime/parachains/src/configuration/migration/v12.rs index 111b1a1999661aab11ecbe9746a3930061c88e1b..d1e0cf10a0ff75b8011b3c853f1d591b4294f30c 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v12.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v12.rs @@ -143,6 +143,7 @@ fn migrate_to_v12() -> Weight { minimum_backing_votes : pre.minimum_backing_votes, node_features : pre.node_features, approval_voting_params : pre.approval_voting_params, + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency : pre.group_rotation_frequency, paras_availability_period : pre.paras_availability_period, @@ -231,7 +232,10 @@ mod tests { assert_eq!(v12.scheduler_params.paras_availability_period, 4); assert_eq!(v12.scheduler_params.lookahead, 1); assert_eq!(v12.scheduler_params.num_cores, 1); - assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + #[allow(deprecated)] + { + assert_eq!(v12.scheduler_params.max_availability_timeouts, 0); + } assert_eq!(v12.scheduler_params.on_demand_queue_max_size, 10_000); assert_eq!( v12.scheduler_params.on_demand_target_queue_utilization, @@ -239,7 +243,10 @@ mod tests { ); assert_eq!(v12.scheduler_params.on_demand_fee_variability, Perbill::from_percent(3)); assert_eq!(v12.scheduler_params.on_demand_base_fee, 10_000_000); - assert_eq!(v12.scheduler_params.ttl, 5); + #[allow(deprecated)] + { + assert_eq!(v12.scheduler_params.ttl, 5); + } } #[test] @@ -282,6 +289,7 @@ mod tests { for (_, v12) in configs_to_check { #[rustfmt::skip] + #[allow(deprecated)] { assert_eq!(v11.max_code_size , v12.max_code_size); assert_eq!(v11.max_head_data_size , v12.max_head_data_size); diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index 0d20399e471ba7b66f93e0f46dfe7f1a3677ef84..a8689a04fe0416ee561c7b16924cab2d7c19c051 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -316,13 +316,14 @@ fn setting_pending_config_members() { approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, minimum_backing_votes: 5, node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency: 20, paras_availability_period: 10, max_validators_per_core: None, lookahead: 3, num_cores: 2, - max_availability_timeouts: 5, + max_availability_timeouts: 0, on_demand_queue_max_size: 10_000u32, on_demand_base_fee: 10_000_000u128, on_demand_fee_variability: Perbill::from_percent(3), @@ -355,11 +356,6 @@ fn setting_pending_config_members() { new_config.scheduler_params.num_cores, ) .unwrap(); - Configuration::set_max_availability_timeouts( - RuntimeOrigin::root(), - new_config.scheduler_params.max_availability_timeouts, - ) - .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), new_config.scheduler_params.group_rotation_frequency, diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index d4be135aad65767fdccf8aea785019940e81c2bb..52189be3d247bc57d32260cfdc1133ca28337b88 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -19,8 +19,6 @@ pub use v_coretime::{GetLegacyLease, MigrateToCoretime}; mod v_coretime { - #[cfg(feature = "try-runtime")] - use crate::scheduler::common::AssignmentProvider; use crate::{ assigner_coretime, configuration, coretime::{mk_coretime_call, Config, PartsOf57600, WeightInfo}, @@ -142,7 +140,8 @@ mod v_coretime { let dmp_queue_size = crate::dmp::Pallet::::dmq_contents(T::BrokerId::get().into()).len() as u32; - let new_core_count = assigner_coretime::Pallet::::session_core_count(); + let config = configuration::ActiveConfig::::get(); + let new_core_count = config.scheduler_params.num_cores; ensure!(new_core_count == prev_core_count, "Total number of cores need to not change."); ensure!( dmp_queue_size > prev_dmp_queue_size, diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index f86573dadf562fee7b4cb63190399cbe390a4865..d5a3f31e5943f26f784f9b4e58026f8213bdf12f 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -1309,3 +1309,11 @@ fn check_signature( res } + +#[cfg(all(not(feature = "runtime-benchmarks"), test))] +// Test helper for clearing the on-chain dispute data. +pub(crate) fn clear_dispute_storage() { + let _ = Disputes::::clear(u32::MAX, None); + let _ = BackersOnDisputes::::clear(u32::MAX, None); + let _ = Included::::clear(u32::MAX, None); +} diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 4b76fb47e1f8d5fff51e13274931514930ef5de7..2e09ea667f74c7ad116e015d1a7d63afaa908ad6 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -653,7 +653,7 @@ impl Default for SlashingReportHandler { impl HandleReports for SlashingReportHandler where - T: Config + frame_system::offchain::SendTransactionTypes>, + T: Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, T::KeyOwnerIdentification, @@ -685,7 +685,7 @@ where dispute_proof: DisputeProof, key_owner_proof: ::KeyOwnerProof, ) -> Result<(), sp_runtime::TryRuntimeError> { - use frame_system::offchain::SubmitTransaction; + use frame_system::offchain::{CreateInherent, SubmitTransaction}; let session_index = dispute_proof.time_slot.session_index; let validator_index = dispute_proof.validator_index.0; @@ -696,7 +696,8 @@ where key_owner_proof, }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = >>::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => { log::info!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 36f874b8db1e9bb015657cc7b2628e64ee75443b..ea3a5d3cdda9f4f5881fa9c2470cb20e3f256332 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -50,7 +50,7 @@ use polkadot_primitives::{ CandidateReceiptV2 as CandidateReceipt, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, }, - well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, Hash, HeadData, + well_known_keys, CandidateCommitments, CandidateHash, CoreIndex, GroupIndex, HeadData, Id as ParaId, SignedAvailabilityBitfields, SigningContext, UpwardMessage, ValidatorId, ValidatorIndex, ValidityAttestation, }; @@ -161,16 +161,6 @@ impl CandidatePendingAvailability { self.relay_parent_number.clone() } - /// Get the candidate backing group. - pub(crate) fn backing_group(&self) -> GroupIndex { - self.backing_group - } - - /// Get the candidate's backers. - pub(crate) fn backers(&self) -> &BitVec { - &self.backers - } - #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, @@ -207,24 +197,6 @@ pub trait RewardValidators { fn reward_bitfields(validators: impl IntoIterator); } -/// Helper return type for `process_candidates`. -#[derive(Encode, Decode, PartialEq, TypeInfo)] -#[cfg_attr(test, derive(Debug))] -pub(crate) struct ProcessedCandidates { - pub(crate) core_indices: Vec<(CoreIndex, ParaId)>, - pub(crate) candidate_receipt_with_backing_validator_indices: - Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, -} - -impl Default for ProcessedCandidates { - fn default() -> Self { - Self { - core_indices: Vec::new(), - candidate_receipt_with_backing_validator_indices: Vec::new(), - } - } -} - /// Reads the footprint of queues for a specific origin type. pub trait QueueFootprinter { type Origin; @@ -514,6 +486,14 @@ impl Pallet { T::MessageQueue::sweep_queue(AggregateMessageOrigin::Ump(UmpQueueId::Para(para))); } + pub(crate) fn get_occupied_cores( + ) -> impl Iterator>)> + { + PendingAvailability::::iter_values().flat_map(|pending_candidates| { + pending_candidates.into_iter().map(|c| (c.core, c.clone())) + }) + } + /// Extract the freed cores based on cores that became available. /// /// Bitfields are expected to have been sanitized already. E.g. via `sanitize_bitfields`! @@ -640,12 +620,15 @@ impl Pallet { candidates: &BTreeMap, CoreIndex)>>, group_validators: GV, core_index_enabled: bool, - ) -> Result, DispatchError> + ) -> Result< + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, + DispatchError, + > where GV: Fn(GroupIndex) -> Option>, { if candidates.is_empty() { - return Ok(ProcessedCandidates::default()) + return Ok(Default::default()) } let now = frame_system::Pallet::::block_number(); @@ -654,7 +637,6 @@ impl Pallet { // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = Vec::with_capacity(candidates.len()); - let mut core_indices = Vec::with_capacity(candidates.len()); for (para_id, para_candidates) in candidates { let mut latest_head_data = match Self::para_latest_head_data(para_id) { @@ -708,7 +690,6 @@ impl Pallet { latest_head_data = candidate.candidate().commitments.head_data.clone(); candidate_receipt_with_backing_validator_indices .push((candidate.receipt(), backer_idx_and_attestation)); - core_indices.push((*core, *para_id)); // Update storage now PendingAvailability::::mutate(¶_id, |pending_availability| { @@ -743,10 +724,7 @@ impl Pallet { } } - Ok(ProcessedCandidates:: { - core_indices, - candidate_receipt_with_backing_validator_indices, - }) + Ok(candidate_receipt_with_backing_validator_indices) } // Get the latest backed output head data of this para (including pending availability). @@ -1173,7 +1151,9 @@ impl Pallet { /// Returns the first `CommittedCandidateReceipt` pending availability for the para provided, if /// any. - pub(crate) fn candidate_pending_availability( + /// A para_id could have more than one candidates pending availability, if it's using elastic + /// scaling. These candidates form a chain. This function returns the first in the chain. + pub(crate) fn first_candidate_pending_availability( para: ParaId, ) -> Option> { PendingAvailability::::get(¶).and_then(|p| { @@ -1201,24 +1181,6 @@ impl Pallet { }) .unwrap_or_default() } - - /// Returns the metadata around the first candidate pending availability for the - /// para provided, if any. - pub(crate) fn pending_availability( - para: ParaId, - ) -> Option>> { - PendingAvailability::::get(¶).and_then(|p| p.get(0).cloned()) - } - - /// Returns the metadata around the candidate pending availability occupying the supplied core, - /// if any. - pub(crate) fn pending_availability_with_core( - para: ParaId, - core: CoreIndex, - ) -> Option>> { - PendingAvailability::::get(¶) - .and_then(|p| p.iter().find(|c| c.core == core).cloned()) - } } const fn availability_threshold(n_validators: usize) -> usize { diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 57525446059fb89cae44a8b0869e34bc5ddebb84..8513d2dad91d1e527d76d850174d51f3d4df302a 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -1267,7 +1267,7 @@ fn candidate_checks() { &group_validators, false ), - Ok(ProcessedCandidates::default()) + Ok(Default::default()) ); // Check candidate ordering @@ -1563,20 +1563,16 @@ fn candidate_checks() { None, ); - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])] - .into_iter() - .collect(), - &group_validators, - false, - ) - .expect("candidate is accepted with bad collator signature"); - - assert_eq!(occupied_cores, vec![(CoreIndex::from(2), thread_a)]); + let candidate_receipt_with_backing_validator_indices = + ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(thread_a_assignment.0, vec![(backed.clone(), thread_a_assignment.1)])] + .into_iter() + .collect(), + &group_validators, + false, + ) + .expect("candidate is accepted with bad collator signature"); let mut expected = std::collections::HashMap::< CandidateHash, @@ -1924,10 +1920,7 @@ fn backing_works() { } }; - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( + let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates( &allowed_relay_parents, &backed_candidates, &group_validators, @@ -1935,15 +1928,6 @@ fn backing_works() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!( - occupied_cores, - vec![ - (CoreIndex::from(0), chain_a), - (CoreIndex::from(1), chain_b), - (CoreIndex::from(2), thread_a) - ] - ); - // Transform the votes into the setup we expect let expected = { let mut intermediate = std::collections::HashMap::< @@ -2224,10 +2208,7 @@ fn backing_works_with_elastic_scaling_mvp() { } }; - let ProcessedCandidates { - core_indices: occupied_cores, - candidate_receipt_with_backing_validator_indices, - } = ParaInclusion::process_candidates( + let candidate_receipt_with_backing_validator_indices = ParaInclusion::process_candidates( &allowed_relay_parents, &backed_candidates, &group_validators, @@ -2235,16 +2216,6 @@ fn backing_works_with_elastic_scaling_mvp() { ) .expect("candidates scheduled, in order, and backed"); - // Both b candidates will be backed. - assert_eq!( - occupied_cores, - vec![ - (CoreIndex::from(0), chain_a), - (CoreIndex::from(1), chain_b), - (CoreIndex::from(2), chain_b), - ] - ); - // Transform the votes into the setup we expect let mut expected = std::collections::HashMap::< CandidateHash, @@ -2420,18 +2391,15 @@ fn can_include_candidate_with_ok_code_upgrade() { None, ); - let ProcessedCandidates { core_indices: occupied_cores, .. } = - ParaInclusion::process_candidates( - &allowed_relay_parents, - &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] - .into_iter() - .collect::>(), - group_validators, - false, - ) - .expect("candidates scheduled, in order, and backed"); - - assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); + let _ = ParaInclusion::process_candidates( + &allowed_relay_parents, + &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] + .into_iter() + .collect::>(), + group_validators, + false, + ) + .expect("candidates scheduled, in order, and backed"); let backers = { let num_backers = effective_minimum_backing_votes( @@ -2846,7 +2814,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { None, ); - let ProcessedCandidates { core_indices: occupied_cores, .. } = + let _ = ParaInclusion::process_candidates( &allowed_relay_parents, &vec![(chain_a_assignment.0, vec![(backed_a, chain_a_assignment.1)])] @@ -2857,8 +2825,6 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); - // Run a couple of blocks before the inclusion. run_to_block(7, |_| None); diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 340f727097b58245d9fa9a2553f9e178544bcea1..6ee245fb5230c109ecf64c2615ea1df052eb79f9 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -87,10 +87,10 @@ impl> Default for SessionChangeNotification, - queued: Vec, - session_index: SessionIndex, +pub(crate) struct BufferedSessionChange { + pub validators: Vec, + pub queued: Vec, + pub session_index: SessionIndex, } pub trait WeightInfo { @@ -149,7 +149,7 @@ pub mod pallet { #[pallet::storage] pub(super) type HasInitialized = StorageValue<_, ()>; - /// Buffered session changes along with the block number at which they should be applied. + /// Buffered session changes. /// /// Typically this will be empty or one element long. Apart from that this item never hits /// the storage. @@ -157,7 +157,7 @@ pub mod pallet { /// However this is a `Vec` regardless to handle various edge cases that may occur at runtime /// upgrade boundaries or if governance intervenes. #[pallet::storage] - pub(super) type BufferedSessionChanges = + pub(crate) type BufferedSessionChanges = StorageValue<_, Vec, ValueQuery>; #[pallet::hooks] @@ -254,9 +254,6 @@ impl Pallet { buf }; - // inform about upcoming new session - scheduler::Pallet::::pre_new_session(); - let configuration::SessionChangeOutcome { prev_config, new_config } = configuration::Pallet::::initializer_on_new_session(&session_index); let new_config = new_config.unwrap_or_else(|| prev_config.clone()); @@ -328,6 +325,11 @@ impl Pallet { { Self::on_new_session(changed, session_index, validators, queued) } + + /// Return whether at the end of this block a new session will be initialized. + pub(crate) fn upcoming_session_change() -> bool { + !BufferedSessionChanges::::get().is_empty() + } } impl sp_runtime::BoundToRuntimeAppPublic for Pallet { diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 75c9e3a5c9b947832a4ca8c15b8813e21d641b2b..c23918708b21f009d9f508c119106818f38ce4a8 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -90,12 +90,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -511,9 +520,6 @@ pub mod mock_assigner { #[pallet::storage] pub(super) type MockAssignmentQueue = StorageValue<_, VecDeque, ValueQuery>; - - #[pallet::storage] - pub(super) type MockCoreCount = StorageValue<_, u32, OptionQuery>; } impl Pallet { @@ -522,12 +528,6 @@ pub mod mock_assigner { pub fn add_test_assignment(assignment: Assignment) { MockAssignmentQueue::::mutate(|queue| queue.push_back(assignment)); } - - // Allows for customized core count in scheduler tests, rather than a core count - // derived from on-demand config + parachain count. - pub fn set_core_count(count: u32) { - MockCoreCount::::set(Some(count)); - } } impl AssignmentProvider for Pallet { @@ -545,20 +545,18 @@ pub mod mock_assigner { } // We don't care about core affinity in the test assigner - fn report_processed(_assignment: Assignment) {} + fn report_processed(_: Assignment) {} - // The results of this are tested in on_demand tests. No need to represent it - // in the mock assigner. - fn push_back_assignment(_assignment: Assignment) {} + fn push_back_assignment(assignment: Assignment) { + Self::add_test_assignment(assignment); + } #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(_: CoreIndex, para_id: ParaId) -> Assignment { Assignment::Bulk(para_id) } - fn session_core_count() -> u32 { - MockCoreCount::::get().unwrap_or(5) - } + fn assignment_duplicated(_: &Assignment) {} } } diff --git a/polkadot/runtime/parachains/src/on_demand/mod.rs b/polkadot/runtime/parachains/src/on_demand/mod.rs index dc046c194fd0b114ba0407878dea05d497be1f1b..66400eb00fd9d7a64ed2ffd660ac0e0f7c3e9027 100644 --- a/polkadot/runtime/parachains/src/on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/on_demand/mod.rs @@ -317,6 +317,11 @@ where Some(assignment) } + /// Report that an assignment was duplicated by the scheduler. + pub fn assignment_duplicated(para_id: ParaId, core_index: CoreIndex) { + Pallet::::increase_affinity(para_id, core_index); + } + /// Report that the `para_id` & `core_index` combination was processed. /// /// This should be called once it is clear that the assignment won't get pushed back anymore. diff --git a/polkadot/runtime/parachains/src/on_demand/tests.rs b/polkadot/runtime/parachains/src/on_demand/tests.rs index 9742954118103b5b46895dde5fc7456a59bf2877..7da16942c7ad6989a2188fe749c947141f073ee0 100644 --- a/polkadot/runtime/parachains/src/on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/on_demand/tests.rs @@ -30,7 +30,6 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, }; -use alloc::collections::btree_map::BTreeMap; use core::cmp::{Ord, Ordering}; use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; @@ -86,7 +85,7 @@ fn run_to_block( OnDemand::on_initialize(b + 1); // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); + Scheduler::advance_claim_queue(&Default::default()); } } diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 5048656e63633d78f42a0ee70f8fab9faf5ad5ee..e0f244dbd8631a88d75bd8d448b62c00066ecf23 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -615,7 +615,7 @@ pub mod pallet { frame_system::Config + configuration::Config + shared::Config - + frame_system::offchain::SendTransactionTypes> + + frame_system::offchain::CreateInherent> { type RuntimeEvent: From + IsType<::RuntimeEvent>; @@ -2177,9 +2177,8 @@ impl Pallet { ) { use frame_system::offchain::SubmitTransaction; - if let Err(e) = SubmitTransaction::>::submit_unsigned_transaction( - Call::include_pvf_check_statement { stmt, signature }.into(), - ) { + let xt = T::create_inherent(Call::include_pvf_check_statement { stmt, signature }.into()); + if let Err(e) = SubmitTransaction::>::submit_transaction(xt) { log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,); } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 266860061bed8830311237721deda12b2b0e9356..485e7211c1d2f27581e8e9524ed0ca108d6199d1 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -43,7 +43,8 @@ benchmarks! { // Variant over `v`, the number of dispute statements in a dispute statement set. This gives the // weight of a single dispute statement set. enter_variable_disputes { - let v in 10..BenchBuilder::::fallback_max_validators(); + // The number of statements needs to be at least a third of the validator set size. + let v in 400..BenchBuilder::::fallback_max_validators(); let scenario = BenchBuilder::::new() .set_dispute_sessions(&[2]) diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 2aca0f2c728a8bf8b962d116de1b572af0fe42d9..4c1394fd1347395371a581619d56a70df1bc9c73 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -27,8 +27,7 @@ use crate::{ inclusion::{self, CandidateCheckContext}, initializer, metrics::METRICS, - paras, - scheduler::{self, FreedReason}, + paras, scheduler, shared::{self, AllowedRelayParentsTracker}, ParaId, }; @@ -38,6 +37,7 @@ use alloc::{ vec::Vec, }; use bitvec::prelude::BitVec; +use core::result::Result; use frame_support::{ defensive, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, @@ -105,15 +105,6 @@ impl DisputedBitfield { } } -/// The context in which the inherent data is checked or processed. -#[derive(PartialEq)] -pub enum ProcessInherentDataContext { - /// Enables filtering/limits weight of inherent up to maximum block weight. - /// Invariant: InherentWeight <= BlockWeight. - ProvideInherent, - /// Checks the InherentWeight invariant. - Enter, -} pub use pallet::*; #[frame_support::pallet] @@ -140,11 +131,9 @@ pub mod pallet { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, - /// The data given to the inherent will result in an overweight block. - InherentOverweight, - /// A candidate was filtered during inherent execution. This should have only been done + /// Inherent data was filtered during execution. This should have only been done /// during creation. - CandidatesFilteredDuringExecution, + InherentDataFilteredDuringExecution, /// Too many candidates supplied. UnscheduledCandidate, } @@ -253,9 +242,12 @@ pub mod pallet { ensure!(!Included::::exists(), Error::::TooManyInclusionInherents); Included::::set(Some(())); + let initial_data = data.clone(); - Self::process_inherent_data(data, ProcessInherentDataContext::Enter) - .map(|(_processed, post_info)| post_info) + Self::process_inherent_data(data).and_then(|(processed, post_info)| { + ensure!(initial_data == processed, Error::::InherentDataFilteredDuringExecution); + Ok(post_info) + }) } } } @@ -273,10 +265,7 @@ impl Pallet { return None }, }; - match Self::process_inherent_data( - parachains_inherent_data, - ProcessInherentDataContext::ProvideInherent, - ) { + match Self::process_inherent_data(parachains_inherent_data) { Ok((processed, _)) => Some(processed), Err(err) => { log::warn!(target: LOG_TARGET, "Processing inherent data failed: {:?}", err); @@ -290,21 +279,12 @@ impl Pallet { /// The given inherent data is processed and state is altered accordingly. If any data could /// not be applied (inconsistencies, weight limit, ...) it is removed. /// - /// When called from `create_inherent` the `context` must be set to - /// `ProcessInherentDataContext::ProvideInherent` so it guarantees the invariant that inherent - /// is not overweight. - /// It is **mandatory** that calls from `enter` set `context` to - /// `ProcessInherentDataContext::Enter` to ensure the weight invariant is checked. - /// /// Returns: Result containing processed inherent data and weight, the processed inherent would /// consume. fn process_inherent_data( data: ParachainsInherentData>, - context: ProcessInherentDataContext, - ) -> core::result::Result< - (ParachainsInherentData>, PostDispatchInfo), - DispatchErrorWithPostInfo, - > { + ) -> Result<(ParachainsInherentData>, PostDispatchInfo), DispatchErrorWithPostInfo> + { #[cfg(feature = "runtime-metrics")] sp_io::init_tracing(); @@ -333,6 +313,27 @@ impl Pallet { let now = frame_system::Pallet::::block_number(); let config = configuration::ActiveConfig::::get(); + // Before anything else, update the allowed relay-parents. + { + let parent_number = now - One::one(); + let parent_storage_root = *parent_header.state_root(); + + shared::AllowedRelayParents::::mutate(|tracker| { + tracker.update( + parent_hash, + parent_storage_root, + scheduler::ClaimQueue::::get() + .into_iter() + .map(|(core_index, paras)| { + (core_index, paras.into_iter().map(|e| e.para_id()).collect()) + }) + .collect(), + parent_number, + config.async_backing_params.allowed_ancestry_len, + ); + }); + } + let candidates_weight = backed_candidates_weight::(&backed_candidates); let bitfields_weight = signed_bitfields_weight::(&bitfields); let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); @@ -345,7 +346,7 @@ impl Pallet { log::debug!(target: LOG_TARGET, "Time weight before filter: {}, candidates + bitfields: {}, disputes: {}", weight_before_filtering.ref_time(), candidates_weight.ref_time() + bitfields_weight.ref_time(), disputes_weight.ref_time()); let current_session = shared::CurrentSessionIndex::::get(); - let expected_bits = scheduler::AvailabilityCores::::get().len(); + let expected_bits = scheduler::Pallet::::num_availability_cores(); let validator_public = shared::ActiveValidatorKeys::::get(); // We are assuming (incorrectly) to have all the weight (for the mandatory class or even @@ -390,7 +391,7 @@ impl Pallet { T::DisputesHandler::filter_dispute_data(set, post_conclusion_acceptance_period) }; - // Limit the disputes first, since the following statements depend on the votes include + // Limit the disputes first, since the following statements depend on the votes included // here. let (checked_disputes_sets, checked_disputes_sets_consumed_weight) = limit_and_sanitize_disputes::( @@ -399,7 +400,7 @@ impl Pallet { max_block_weight, ); - let mut all_weight_after = if context == ProcessInherentDataContext::ProvideInherent { + let mut all_weight_after = { // Assure the maximum block weight is adhered, by limiting bitfields and backed // candidates. Dispute statement sets were already limited before. let non_disputes_weight = apply_weight_limit::( @@ -427,23 +428,6 @@ impl Pallet { log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large, time: {}, size: {}", all_weight_after.ref_time(), all_weight_after.proof_size()); } all_weight_after - } else { - // This check is performed in the context of block execution. Ensures inherent weight - // invariants guaranteed by `create_inherent_data` for block authorship. - if weight_before_filtering.any_gt(max_block_weight) { - log::error!( - "Overweight para inherent data reached the runtime {:?}: {} > {}", - parent_hash, - weight_before_filtering, - max_block_weight - ); - } - - ensure!( - weight_before_filtering.all_lte(max_block_weight), - Error::::InherentOverweight - ); - weight_before_filtering }; // Note that `process_checked_multi_dispute_data` will iterate and import each @@ -567,98 +551,9 @@ impl Pallet { log::debug!(target: LOG_TARGET, "Evicted timed out cores: {:?}", freed_timeout); } - // We'll schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded - .into_iter() - .map(|(c, _hash)| (c, FreedReason::Concluded)) - .chain(freed_disputed.into_iter().map(|core| (core, FreedReason::Concluded))) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) - .collect::>(); - scheduler::Pallet::::free_cores_and_fill_claim_queue(freed, now); - - METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - - // After freeing cores and filling claims, but before processing backed candidates - // we update the allowed relay-parents. - { - let parent_number = now - One::one(); - let parent_storage_root = *parent_header.state_root(); - - shared::AllowedRelayParents::::mutate(|tracker| { - tracker.update( - parent_hash, - parent_storage_root, - scheduler::ClaimQueue::::get() - .into_iter() - .map(|(core_index, paras)| { - (core_index, paras.into_iter().map(|e| e.para_id()).collect()) - }) - .collect(), - parent_number, - config.async_backing_params.allowed_ancestry_len, - ); - }); - } - let allowed_relay_parents = shared::AllowedRelayParents::::get(); - - let core_index_enabled = configuration::ActiveConfig::::get() - .node_features - .get(FeatureIndex::ElasticScalingMVP as usize) - .map(|b| *b) - .unwrap_or(false); - - let allow_v2_receipts = configuration::ActiveConfig::::get() - .node_features - .get(FeatureIndex::CandidateReceiptV2 as usize) - .map(|b| *b) - .unwrap_or(false); - - let mut eligible: BTreeMap> = BTreeMap::new(); - let mut total_eligible_cores = 0; - - for (core_idx, para_id) in scheduler::Pallet::::eligible_paras() { - total_eligible_cores += 1; - log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx); - eligible.entry(para_id).or_default().insert(core_idx); - } - - let initial_candidate_count = backed_candidates.len(); - let backed_candidates_with_core = sanitize_backed_candidates::( - backed_candidates, - &allowed_relay_parents, - concluded_invalid_hashes, - eligible, - core_index_enabled, - allow_v2_receipts, - ); - let count = count_backed_candidates(&backed_candidates_with_core); - - ensure!(count <= total_eligible_cores, Error::::UnscheduledCandidate); - - METRICS.on_candidates_sanitized(count as u64); - - // In `Enter` context (invoked during execution) no more candidates should be filtered, - // because they have already been filtered during `ProvideInherent` context. Abort in such - // cases. - if context == ProcessInherentDataContext::Enter { - ensure!( - initial_candidate_count == count, - Error::::CandidatesFilteredDuringExecution - ); - } - - // Process backed candidates according to scheduled cores. - let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { - core_indices: occupied, - candidate_receipt_with_backing_validator_indices, - } = inclusion::Pallet::::process_candidates( - &allowed_relay_parents, - &backed_candidates_with_core, - scheduler::Pallet::::group_validators, - core_index_enabled, - )?; - // Note which of the scheduled cores were actually occupied by a backed candidate. - scheduler::Pallet::::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); + // Back candidates. + let (candidate_receipt_with_backing_validator_indices, backed_candidates_with_core) = + Self::back_candidates(concluded_invalid_hashes, backed_candidates)?; set_scrapable_on_chain_backings::( current_session, @@ -672,6 +567,7 @@ impl Pallet { let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect(); + let count = backed_candidates_with_core.len(); let processed = ParachainsInherentData { bitfields, backed_candidates: backed_candidates_with_core.into_iter().fold( @@ -686,6 +582,104 @@ impl Pallet { }; Ok((processed, Some(all_weight_after).into())) } + + fn back_candidates( + concluded_invalid_hashes: BTreeSet, + backed_candidates: Vec>, + ) -> Result< + ( + Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>, + BTreeMap, CoreIndex)>>, + ), + DispatchErrorWithPostInfo, + > { + let allowed_relay_parents = shared::AllowedRelayParents::::get(); + let upcoming_new_session = initializer::Pallet::::upcoming_session_change(); + + METRICS.on_candidates_processed_total(backed_candidates.len() as u64); + + if !upcoming_new_session { + let occupied_cores = + inclusion::Pallet::::get_occupied_cores().map(|(core, _)| core).collect(); + + let mut eligible: BTreeMap> = BTreeMap::new(); + let mut total_eligible_cores = 0; + + for (core_idx, para_id) in Self::eligible_paras(&occupied_cores) { + total_eligible_cores += 1; + log::trace!(target: LOG_TARGET, "Found eligible para {:?} on core {:?}", para_id, core_idx); + eligible.entry(para_id).or_default().insert(core_idx); + } + + let node_features = configuration::ActiveConfig::::get().node_features; + let core_index_enabled = node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + let allow_v2_receipts = node_features + .get(FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false); + + let backed_candidates_with_core = sanitize_backed_candidates::( + backed_candidates, + &allowed_relay_parents, + concluded_invalid_hashes, + eligible, + core_index_enabled, + allow_v2_receipts, + ); + let count = count_backed_candidates(&backed_candidates_with_core); + + ensure!(count <= total_eligible_cores, Error::::UnscheduledCandidate); + + METRICS.on_candidates_sanitized(count as u64); + + // Process backed candidates according to scheduled cores. + let candidate_receipt_with_backing_validator_indices = + inclusion::Pallet::::process_candidates( + &allowed_relay_parents, + &backed_candidates_with_core, + scheduler::Pallet::::group_validators, + core_index_enabled, + )?; + + // We need to advance the claim queue on all cores, except for the ones that did not + // get freed in this block. The ones that did not get freed also cannot be newly + // occupied. + scheduler::Pallet::::advance_claim_queue(&occupied_cores); + + Ok((candidate_receipt_with_backing_validator_indices, backed_candidates_with_core)) + } else { + log::debug!( + target: LOG_TARGET, + "Upcoming session change, not backing any new candidates." + ); + // If we'll initialize a new session at the end of the block, we don't want to + // advance the claim queue. + + Ok((vec![], BTreeMap::new())) + } + } + + /// Paras that may get backed on cores. + /// + /// 1. The para must be scheduled on core. + /// 2. Core needs to be free, otherwise backing is not possible. + /// + /// We get a set of the occupied cores as input. + pub(crate) fn eligible_paras<'a>( + occupied_cores: &'a BTreeSet, + ) -> impl Iterator + 'a { + scheduler::ClaimQueue::::get().into_iter().filter_map(|(core_idx, queue)| { + if occupied_cores.contains(&core_idx) { + return None + } + let next_scheduled = queue.front()?; + Some((core_idx, next_scheduled.para_id())) + }) + } } /// Derive a bitfield from dispute @@ -1144,10 +1138,7 @@ fn sanitize_backed_candidates( } fn count_backed_candidates(backed_candidates: &BTreeMap>) -> usize { - backed_candidates.iter().fold(0, |mut count, (_id, candidates)| { - count += candidates.len(); - count - }) + backed_candidates.values().map(|c| c.len()).sum() } /// Derive entropy from babe provided per block randomness. diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 88af8332dccdef02a3a394398d01e36b8b0a32bc..eef26b83368f5d9fa6f362b99c22cdbd0f448186 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -49,11 +49,10 @@ mod enter { use crate::{ builder::{junk_collator, junk_collator_signature, Bench, BenchBuilder, CandidateModifier}, + disputes::clear_dispute_storage, + initializer::BufferedSessionChange, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, RuntimeOrigin, Test}, - scheduler::{ - common::{Assignment, AssignmentProvider}, - ParasEntry, - }, + scheduler::common::{Assignment, AssignmentProvider}, session_info, }; use alloc::collections::btree_map::BTreeMap; @@ -75,7 +74,6 @@ mod enter { backed_and_concluding: BTreeMap, num_validators_per_core: u32, code_upgrade: Option, - fill_claimqueue: bool, elastic_paras: BTreeMap, unavailable_cores: Vec, v2_descriptor: bool, @@ -89,7 +87,6 @@ mod enter { backed_and_concluding, num_validators_per_core, code_upgrade, - fill_claimqueue, elastic_paras, unavailable_cores, v2_descriptor, @@ -110,14 +107,11 @@ mod enter { .set_dispute_statements(dispute_statements) .set_backed_and_concluding_paras(backed_and_concluding.clone()) .set_dispute_sessions(&dispute_sessions[..]) - .set_fill_claimqueue(fill_claimqueue) .set_unavailable_cores(unavailable_cores) .set_candidate_descriptor_v2(v2_descriptor) .set_candidate_modifier(candidate_modifier); // Setup some assignments as needed: - mock_assigner::Pallet::::set_core_count(builder.max_cores()); - (0..(builder.max_cores() as usize - extra_cores)).for_each(|para_id| { (0..elastic_paras.get(&(para_id as u32)).cloned().unwrap_or(1)).for_each( |_para_local_core_idx| { @@ -166,7 +160,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor, @@ -190,6 +183,7 @@ mod enter { inherent_data .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) assert_eq!( @@ -274,7 +268,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 3)].into_iter().collect(), unavailable_cores: vec![], v2_descriptor, @@ -295,6 +288,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); assert!(pallet::OnChainVotes::::get().is_none()); // Nothing is filtered out (including the backed candidates.) @@ -377,7 +371,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 4)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: false, @@ -529,6 +522,101 @@ mod enter { }); } + #[test] + // Test that no new candidates are backed if there's an upcoming session change scheduled at the + // end of the block. Claim queue will also not be advanced. + fn session_change() { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduler_params.lookahead > 0); + + new_test_ext(config).execute_with(|| { + let dispute_statements = BTreeMap::new(); + + let mut backed_and_concluding = BTreeMap::new(); + backed_and_concluding.insert(0, 1); + backed_and_concluding.insert(1, 1); + + let scenario = make_inherent_data(TestConfig { + dispute_statements, + dispute_sessions: vec![], // No disputes + backed_and_concluding, + num_validators_per_core: 1, + code_upgrade: None, + elastic_paras: BTreeMap::new(), + unavailable_cores: vec![], + v2_descriptor: false, + candidate_modifier: None, + }); + + let prev_claim_queue = scheduler::ClaimQueue::::get(); + + assert_eq!(inclusion::PendingAvailability::::iter().count(), 2); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(0)).unwrap().len(), + 1 + ); + assert_eq!( + inclusion::PendingAvailability::::get(ParaId::from(1)).unwrap().len(), + 1 + ); + + // We expect the scenario to have cores 0 & 1 with pending availability. The backed + // candidates are also created for cores 0 & 1. The pending available candidates will + // become available but the new candidates will not be backed since there is an upcoming + // session change. + let mut expected_para_inherent_data = scenario.data.clone(); + expected_para_inherent_data.backed_candidates.clear(); + + // Check the para inherent data is as expected: + // * 1 bitfield per validator (2 validators) + assert_eq!(expected_para_inherent_data.bitfields.len(), 2); + // * 0 disputes. + assert_eq!(expected_para_inherent_data.disputes.len(), 0); + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) + .unwrap(); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); + + // Simulate a session change scheduled to happen at the end of the block. + initializer::BufferedSessionChanges::::put(vec![BufferedSessionChange { + validators: vec![], + queued: vec![], + session_index: 3, + }]); + + // Only backed candidates are filtered out. + assert_eq!( + Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(), + expected_para_inherent_data + ); + + assert_eq!( + // No candidates backed. + OnChainVotes::::get().unwrap().backing_validators_per_candidate.len(), + 0 + ); + + assert_eq!( + // The session of the on chain votes should equal the current session, which is 2 + OnChainVotes::::get().unwrap().session, + 2 + ); + + // No pending availability candidates. + assert_eq!(inclusion::PendingAvailability::::iter().count(), 2); + assert!(inclusion::PendingAvailability::::get(ParaId::from(0)) + .unwrap() + .is_empty()); + assert!(inclusion::PendingAvailability::::get(ParaId::from(1)) + .unwrap() + .is_empty()); + + // The claim queue should not have been advanced. + assert_eq!(prev_claim_queue, scheduler::ClaimQueue::::get()); + }); + } + #[test] fn test_session_is_tracked_in_on_chain_scraping() { use crate::disputes::run_to_block; @@ -635,7 +723,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -657,8 +744,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); let multi_dispute_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -673,6 +759,8 @@ mod enter { &expected_para_inherent_data.disputes[..2], ); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), multi_dispute_inherent_data, @@ -710,7 +798,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 6, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -731,8 +818,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); @@ -744,6 +830,8 @@ mod enter { assert_eq!(limit_inherent_data.disputes[0].session, 1); assert_eq!(limit_inherent_data.disputes[1].session, 2); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -783,7 +871,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 4, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -805,8 +892,7 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = @@ -828,6 +914,8 @@ mod enter { // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -872,7 +960,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -894,10 +981,8 @@ mod enter { .put_data(PARACHAINS_INHERENT_IDENTIFIER, &expected_para_inherent_data) .unwrap(); - // The current schedule is empty prior to calling `create_inherent_enter`. - assert!(scheduler::Pallet::::claim_queue_is_empty()); + assert!(!scheduler::Pallet::::claim_queue_is_empty()); - // Nothing is filtered out (including the backed candidates.) let limit_inherent_data = Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert_ne!(limit_inherent_data, expected_para_inherent_data); @@ -918,9 +1003,11 @@ mod enter { // over weight assert_eq!(limit_inherent_data.backed_candidates.len(), 0); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), - limit_inherent_data, + limit_inherent_data )); assert_eq!( @@ -961,7 +1048,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1022,7 +1108,6 @@ mod enter { backed_and_concluding, num_validators_per_core, code_upgrade: None, - fill_claimqueue: true, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1045,6 +1130,21 @@ mod enter { Pallet::::create_inherent_inner(&inherent_data.clone()).unwrap(); assert!(limit_inherent_data == expected_para_inherent_data); + // Cores were scheduled. We should put the assignments back, before calling enter(). + let cores = (0..num_candidates) + .into_iter() + .map(|i| { + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [assignment].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -1110,7 +1210,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1168,24 +1267,23 @@ mod enter { ); // One core was scheduled. We should put the assignment back, before calling enter(). - let now = frame_system::Pallet::::block_number() + 1; let used_cores = 5; let cores = (0..used_cores) .into_iter() .map(|i| { - let SchedulerParams { ttl, .. } = - configuration::ActiveConfig::::get().scheduler_params; // Load an assignment into provider so that one is present to pop let assignment = ::AssignmentProvider::get_mock_assignment( CoreIndex(i), ParaId::from(i), ); - (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + (CoreIndex(i), [assignment].into()) }) .collect(); scheduler::ClaimQueue::::set(cores); + clear_dispute_storage::(); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -1219,7 +1317,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1289,7 +1386,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1357,7 +1453,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1448,7 +1543,6 @@ mod enter { } // Ensure that overweight parachain inherents are always rejected by the runtime. - // Runtime should panic and return `InherentOverweight` error. #[rstest] #[case(true)] #[case(false)] @@ -1481,7 +1575,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor, @@ -1550,7 +1643,6 @@ mod enter { } // Ensure that overweight parachain inherents are always rejected by the runtime. - // Runtime should panic and return `InherentOverweight` error. #[test] fn inherent_create_weight_invariant() { new_test_ext(MockGenesisConfig::default()).execute_with(|| { @@ -1572,7 +1664,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: false, elastic_paras: BTreeMap::new(), unavailable_cores: vec![], v2_descriptor: false, @@ -1602,7 +1693,7 @@ mod enter { .unwrap_err() .error; - assert_eq!(dispatch_error, Error::::InherentOverweight.into()); + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); }); } @@ -1632,7 +1723,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 5, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 8)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -1672,7 +1762,7 @@ mod enter { // We expect `enter` to fail because the inherent data contains backed candidates with // v2 descriptors. - assert_eq!(dispatch_error, Error::::CandidatesFilteredDuringExecution.into()); + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); }); } @@ -1700,9 +1790,8 @@ mod enter { dispute_statements: BTreeMap::new(), dispute_sessions: vec![], // No disputes backed_and_concluding, - num_validators_per_core: 5, + num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 8)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -1721,8 +1810,8 @@ mod enter { let unfiltered_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 10 backed candidates) - assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50); + // * 1 bitfield per validator (1 validators per core, 10 backed candidates) + assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10); // * 10 v2 candidate descriptors. assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); @@ -1740,7 +1829,7 @@ mod enter { // We expect `enter` to fail because the inherent data contains backed candidates with // v2 descriptors. - assert_eq!(dispatch_error, Error::::CandidatesFilteredDuringExecution.into()); + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); }); } @@ -1768,9 +1857,8 @@ mod enter { dispute_statements: BTreeMap::new(), dispute_sessions: vec![], // No disputes backed_and_concluding, - num_validators_per_core: 5, + num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 8)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -1789,8 +1877,8 @@ mod enter { let unfiltered_para_inherent_data = scenario.data.clone(); // Check the para inherent data is as expected: - // * 1 bitfield per validator (5 validators per core, 10 backed candidates) - assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 50); + // * 1 bitfield per validator (1 validator per core, 10 backed candidates) + assert_eq!(unfiltered_para_inherent_data.bitfields.len(), 10); // * 10 v2 candidate descriptors. assert_eq!(unfiltered_para_inherent_data.backed_candidates.len(), 10); @@ -1808,7 +1896,7 @@ mod enter { // We expect `enter` to fail because the inherent data contains backed candidates with // v2 descriptors. - assert_eq!(dispatch_error, Error::::CandidatesFilteredDuringExecution.into()); + assert_eq!(dispatch_error, Error::::InherentDataFilteredDuringExecution.into()); }); } #[test] @@ -1845,7 +1933,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 3)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -1900,7 +1987,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 3)].into_iter().collect(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -1987,7 +2073,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: Default::default(), unavailable_cores: unavailable_cores.clone(), v2_descriptor: true, @@ -2042,7 +2127,6 @@ mod enter { backed_and_concluding, num_validators_per_core: 1, code_upgrade: None, - fill_claimqueue: true, elastic_paras: [(2, 3)].into_iter().collect(), unavailable_cores, v2_descriptor: true, @@ -2374,7 +2458,7 @@ mod sanitizers { mod candidates { use crate::{ mock::{set_disabled_validators, RuntimeOrigin}, - scheduler::{common::Assignment, ParasEntry}, + scheduler::common::Assignment, util::{make_persisted_validation_data, make_persisted_validation_data_with_parent}, }; use alloc::collections::vec_deque::VecDeque; @@ -2455,17 +2539,17 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(1), + }]), ), ])); @@ -2547,7 +2631,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![(CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(2))] ); assert_eq!( @@ -2643,73 +2727,73 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(5), + }]), ), ( CoreIndex::from(6), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 5.into(), + core_index: CoreIndex(6), + }]), ), ( CoreIndex::from(7), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(7) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 7.into(), + core_index: CoreIndex(7), + }]), ), ( CoreIndex::from(8), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 7.into(), core_index: CoreIndex(8) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 7.into(), + core_index: CoreIndex(8), + }]), ), ( CoreIndex::from(9), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 8.into(), core_index: CoreIndex(9) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 8.into(), + core_index: CoreIndex(9), + }]), ), ])); @@ -3089,7 +3173,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -3104,7 +3188,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } @@ -3188,66 +3272,66 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(5), + }]), ), ( CoreIndex::from(6), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(6) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 3.into(), + core_index: CoreIndex(6), + }]), ), ( CoreIndex::from(7), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(7) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(7), + }]), ), ( CoreIndex::from(8), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(8) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 4.into(), + core_index: CoreIndex(8), + }]), ), ])); @@ -3577,7 +3661,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -3591,7 +3675,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } @@ -3712,45 +3796,45 @@ mod sanitizers { scheduler::Pallet::::set_claim_queue(BTreeMap::from([ ( CoreIndex::from(0), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(0), + }]), ), ( CoreIndex::from(1), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(1), + }]), ), ( CoreIndex::from(2), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(2) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 1.into(), + core_index: CoreIndex(2), + }]), ), ( CoreIndex::from(3), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(3), + }]), ), ( CoreIndex::from(4), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(4) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(4), + }]), ), ( CoreIndex::from(5), - VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(5) }, - RELAY_PARENT_NUM, - )]), + VecDeque::from([Assignment::Pool { + para_id: 2.into(), + core_index: CoreIndex(5), + }]), ), ])); @@ -3998,7 +4082,7 @@ mod sanitizers { // State sanity checks assert_eq!( - scheduler::Pallet::::scheduled_paras().collect::>(), + Pallet::::eligible_paras(&Default::default()).collect::>(), vec![ (CoreIndex(0), ParaId::from(1)), (CoreIndex(1), ParaId::from(1)), @@ -4009,7 +4093,7 @@ mod sanitizers { ] ); let mut scheduled: BTreeMap> = BTreeMap::new(); - for (core_idx, para_id) in scheduler::Pallet::::scheduled_paras() { + for (core_idx, para_id) in Pallet::::eligible_paras(&Default::default()) { scheduled.entry(para_id).or_default().insert(core_idx); } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs index a0996d5df0eec2eade802bf0980e1b4c49c3ce17..e9327bc7641a3602eaaf7096cbd16d2d6e24b780 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs @@ -18,8 +18,7 @@ //! functions. use crate::{ - configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, - scheduler::{self, CoreOccupied}, + configuration, disputes, dmp, hrmp, inclusion, initializer, paras, paras_inherent, scheduler, session_info, shared, }; use alloc::{ @@ -67,15 +66,6 @@ pub fn validator_groups( /// Implementation for the `availability_cores` function of the runtime API. pub fn availability_cores() -> Vec>> { - let cores = scheduler::AvailabilityCores::::get(); - let now = frame_system::Pallet::::block_number() + One::one(); - - // This explicit update is only strictly required for session boundaries: - // - // At the end of a session we clear the claim queues: Without this update call, nothing would be - // scheduled to the client. - scheduler::Pallet::::free_cores_and_fill_claim_queue(Vec::new(), now); - let time_out_for = scheduler::Pallet::::availability_timeout_predicate(); let group_responsible_for = @@ -95,76 +85,42 @@ pub fn availability_cores() -> Vec = scheduler::Pallet::::scheduled_paras().collect(); - - cores - .into_iter() - .enumerate() - .map(|(i, core)| match core { - CoreOccupied::Paras(entry) => { - // Due to https://github.com/paritytech/polkadot-sdk/issues/64, using the new storage types would cause - // this runtime API to panic. We explicitly handle the storage for version 0 to - // prevent that. When removing the inclusion v0 -> v1 migration, this bit of code - // can also be removed. - let pending_availability = if inclusion::Pallet::::on_chain_storage_version() == - StorageVersion::new(0) - { - inclusion::migration::v0::PendingAvailability::::get(entry.para_id()) - .expect("Occupied core always has pending availability; qed") - } else { - let candidate = inclusion::Pallet::::pending_availability_with_core( - entry.para_id(), - CoreIndex(i as u32), - ) - .expect("Occupied core always has pending availability; qed"); - - // Translate to the old candidate format, as we don't need the commitments now. - inclusion::migration::v0::CandidatePendingAvailability { - core: candidate.core_occupied(), - hash: candidate.candidate_hash(), - descriptor: candidate.candidate_descriptor().clone(), - availability_votes: candidate.availability_votes().clone(), - backers: candidate.backers().clone(), - relay_parent_number: candidate.relay_parent_number(), - backed_in_number: candidate.backed_in_number(), - backing_group: candidate.backing_group(), - } - }; - - let backed_in_number = pending_availability.backed_in_number; + let claim_queue = scheduler::Pallet::::get_claim_queue(); + let occupied_cores: BTreeMap> = + inclusion::Pallet::::get_occupied_cores().collect(); + let n_cores = scheduler::Pallet::::num_availability_cores(); + (0..n_cores) + .map(|core_idx| { + let core_idx = CoreIndex(core_idx as u32); + if let Some(pending_availability) = occupied_cores.get(&core_idx) { // Use the same block number for determining the responsible group as what the // backing subsystem would use when it calls validator_groups api. let backing_group_allocation_time = - pending_availability.relay_parent_number + One::one(); + pending_availability.relay_parent_number() + One::one(); CoreState::Occupied(OccupiedCore { - next_up_on_available: scheduler::Pallet::::next_up_on_available(CoreIndex( - i as u32, - )), - occupied_since: backed_in_number, - time_out_at: time_out_for(backed_in_number).live_until, - next_up_on_time_out: scheduler::Pallet::::next_up_on_time_out(CoreIndex( - i as u32, - )), - availability: pending_availability.availability_votes.clone(), + next_up_on_available: scheduler::Pallet::::next_up_on_available(core_idx), + occupied_since: pending_availability.backed_in_number(), + time_out_at: time_out_for(pending_availability.backed_in_number()).live_until, + next_up_on_time_out: scheduler::Pallet::::next_up_on_available(core_idx), + availability: pending_availability.availability_votes().clone(), group_responsible: group_responsible_for( backing_group_allocation_time, - pending_availability.core, + pending_availability.core_occupied(), ), - candidate_hash: pending_availability.hash, - candidate_descriptor: pending_availability.descriptor, + candidate_hash: pending_availability.candidate_hash(), + candidate_descriptor: pending_availability.candidate_descriptor().clone(), }) - }, - CoreOccupied::Free => { - if let Some(para_id) = scheduled.get(&CoreIndex(i as _)).cloned() { + } else { + if let Some(assignment) = claim_queue.get(&core_idx).and_then(|q| q.front()) { CoreState::Scheduled(polkadot_primitives::ScheduledCore { - para_id, + para_id: assignment.para_id(), collator: None, }) } else { CoreState::Free } - }, + } }) .collect() } @@ -195,13 +151,12 @@ where build() }, OccupiedCoreAssumption::TimedOut => build(), - OccupiedCoreAssumption::Free => { - if >::pending_availability(para_id).is_some() { + OccupiedCoreAssumption::Free => + if !>::candidates_pending_availability(para_id).is_empty() { None } else { build() - } - }, + }, } } @@ -240,10 +195,12 @@ pub fn assumed_validation_data( let persisted_validation_data = make_validation_data().or_else(|| { // Try again with force enacting the pending candidates. This check only makes sense if // there are any pending candidates. - inclusion::Pallet::::pending_availability(para_id).and_then(|_| { - inclusion::Pallet::::force_enact(para_id); - make_validation_data() - }) + (!inclusion::Pallet::::candidates_pending_availability(para_id).is_empty()) + .then_some(()) + .and_then(|_| { + inclusion::Pallet::::force_enact(para_id); + make_validation_data() + }) }); // If we were successful, also query current validation code hash. persisted_validation_data.zip(paras::CurrentCodeHash::::get(¶_id)) @@ -319,7 +276,7 @@ pub fn validation_code( pub fn candidate_pending_availability( para_id: ParaId, ) -> Option> { - inclusion::Pallet::::candidate_pending_availability(para_id) + inclusion::Pallet::::first_candidate_pending_availability(para_id) } /// Implementation for the `candidate_events` function of the runtime API. @@ -568,23 +525,12 @@ pub fn approval_voting_params() -> ApprovalVotingParams /// Returns the claimqueue from the scheduler pub fn claim_queue() -> BTreeMap> { - let now = >::block_number() + One::one(); - - // This is needed so that the claim queue always has the right size (equal to - // scheduling_lookahead). Otherwise, if a candidate is backed in the same block where the - // previous candidate is included, the claim queue will have already pop()-ed the next item - // from the queue and the length would be `scheduling_lookahead - 1`. - >::free_cores_and_fill_claim_queue(Vec::new(), now); let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: let n_lookahead = config.scheduler_params.lookahead.max(1); - - scheduler::ClaimQueue::::get() + scheduler::Pallet::::get_claim_queue() .into_iter() .map(|(core_index, entries)| { - // on cores timing out internal claim queue size may be temporarily longer than it - // should be as the timed out assignment might got pushed back to an already full claim - // queue: ( core_index, entries.into_iter().map(|e| e.para_id()).take(n_lookahead as usize).collect(), diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index 445583d929aba9a82322f8fb94a70d38632a4f8f..329df3a8a9deb3c06a337d9cf3e4e43e55c100ae 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -36,14 +36,9 @@ //! number of groups as availability cores. Validator groups will be assigned to different //! availability cores over time. -use core::iter::Peekable; - use crate::{configuration, initializer::SessionChangeNotification, paras}; use alloc::{ - collections::{ - btree_map::{self, BTreeMap}, - vec_deque::VecDeque, - }, + collections::{btree_map::BTreeMap, btree_set::BTreeSet, vec_deque::VecDeque}, vec::Vec, }; use frame_support::{pallet_prelude::*, traits::Defensive}; @@ -71,7 +66,7 @@ pub mod migration; pub mod pallet { use super::*; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::without_storage_info] @@ -93,47 +88,6 @@ pub mod pallet { #[pallet::storage] pub type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; - /// One entry for each availability core. The i'th parachain belongs to the i'th core, with the - /// remaining cores all being on demand parachain multiplexers. - /// - /// Bounded by the maximum of either of these two values: - /// * The number of parachains and parathread multiplexers - /// * The number of validators divided by `configuration.max_validators_per_core`. - #[pallet::storage] - pub type AvailabilityCores = StorageValue<_, Vec>, ValueQuery>; - - /// Representation of a core in `AvailabilityCores`. - /// - /// This is not to be confused with `CoreState` which is an enriched variant of this and exposed - /// to the node side. It also provides information about scheduled/upcoming assignments for - /// example and is computed on the fly in the `availability_cores` runtime call. - #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)] - pub enum CoreOccupied { - /// No candidate is waiting availability on this core right now (the core is not occupied). - Free, - /// A para is currently waiting for availability/inclusion on this core. - Paras(ParasEntry), - } - - /// Convenience type alias for `CoreOccupied`. - pub type CoreOccupiedType = CoreOccupied>; - - impl CoreOccupied { - /// Is core free? - pub fn is_free(&self) -> bool { - matches!(self, Self::Free) - } - } - - /// Reasons a core might be freed. - #[derive(Clone, Copy)] - pub enum FreedReason { - /// The core's work concluded and the parablock assigned to it is considered available. - Concluded, - /// The core's work timed out. - TimedOut, - } - /// The block number where the session start occurred. Used to track how many group rotations /// have occurred. /// @@ -145,40 +99,9 @@ pub mod pallet { pub type SessionStartBlock = StorageValue<_, BlockNumberFor, ValueQuery>; /// One entry for each availability core. The `VecDeque` represents the assignments to be - /// scheduled on that core. The value contained here will not be valid after the end of - /// a block. Runtime APIs should be used to determine scheduled cores for the upcoming block. + /// scheduled on that core. #[pallet::storage] - pub type ClaimQueue = - StorageValue<_, BTreeMap>>, ValueQuery>; - - /// Assignments as tracked in the claim queue. - #[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq, Clone)] - pub struct ParasEntry { - /// The underlying [`Assignment`]. - pub assignment: Assignment, - /// The number of times the entry has timed out in availability already. - pub availability_timeouts: u32, - /// The block height until this entry needs to be backed. - /// - /// If missed the entry will be removed from the claim queue without ever having occupied - /// the core. - pub ttl: N, - } - - /// Convenience type declaration for `ParasEntry`. - pub type ParasEntryType = ParasEntry>; - - impl ParasEntry { - /// Create a new `ParasEntry`. - pub fn new(assignment: Assignment, now: N) -> Self { - ParasEntry { assignment, availability_timeouts: 0, ttl: now } - } - - /// Return `Id` from the underlying `Assignment`. - pub fn para_id(&self) -> ParaId { - self.assignment.para_id() - } - } + pub type ClaimQueue = StorageValue<_, BTreeMap>, ValueQuery>; /// Availability timeout status of a core. pub(crate) struct AvailabilityTimeoutStatus { @@ -195,30 +118,6 @@ pub mod pallet { } } -type PositionInClaimQueue = u32; - -struct ClaimQueueIterator { - next_idx: u32, - queue: Peekable>>, -} - -impl Iterator for ClaimQueueIterator { - type Item = (CoreIndex, VecDeque); - - fn next(&mut self) -> Option { - let (idx, _) = self.queue.peek()?; - let val = if idx != &CoreIndex(self.next_idx) { - log::trace!(target: LOG_TARGET, "idx did not match claim queue idx: {:?} vs {:?}", idx, self.next_idx); - (CoreIndex(self.next_idx), VecDeque::new()) - } else { - let (idx, q) = self.queue.next()?; - (idx, q) - }; - self.next_idx += 1; - Some(val) - } -} - impl Pallet { /// Called by the initializer to initialize the scheduler pallet. pub(crate) fn initializer_initialize(_now: BlockNumberFor) -> Weight { @@ -228,31 +127,22 @@ impl Pallet { /// Called by the initializer to finalize the scheduler pallet. pub(crate) fn initializer_finalize() {} - /// Called before the initializer notifies of a new session. - pub(crate) fn pre_new_session() { - Self::push_claim_queue_items_to_assignment_provider(); - Self::push_occupied_cores_to_assignment_provider(); - } - /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session( notification: &SessionChangeNotification>, ) { - let SessionChangeNotification { validators, new_config, .. } = notification; + let SessionChangeNotification { validators, new_config, prev_config, .. } = notification; let config = new_config; + let assigner_cores = config.scheduler_params.num_cores; let n_cores = core::cmp::max( - T::AssignmentProvider::session_core_count(), + assigner_cores, match config.scheduler_params.max_validators_per_core { Some(x) if x != 0 => validators.len() as u32 / x, _ => 0, }, ); - AvailabilityCores::::mutate(|cores| { - cores.resize_with(n_cores as _, || CoreOccupied::Free); - }); - // shuffle validators into groups. if n_cores == 0 || validators.is_empty() { ValidatorGroups::::set(Vec::new()); @@ -295,151 +185,24 @@ impl Pallet { ValidatorGroups::::set(groups); } + // Resize and populate claim queue. + Self::maybe_resize_claim_queue(prev_config.scheduler_params.num_cores, assigner_cores); + Self::populate_claim_queue_after_session_change(); + let now = frame_system::Pallet::::block_number() + One::one(); SessionStartBlock::::set(now); } - /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along - /// with the reason for them being freed. Returns a tuple of concluded and timedout paras. - fn free_cores( - just_freed_cores: impl IntoIterator, - ) -> (BTreeMap, BTreeMap>) { - let mut timedout_paras: BTreeMap> = BTreeMap::new(); - let mut concluded_paras = BTreeMap::new(); - - AvailabilityCores::::mutate(|cores| { - let c_len = cores.len(); - - just_freed_cores - .into_iter() - .filter(|(freed_index, _)| (freed_index.0 as usize) < c_len) - .for_each(|(freed_index, freed_reason)| { - match core::mem::replace(&mut cores[freed_index.0 as usize], CoreOccupied::Free) - { - CoreOccupied::Free => {}, - CoreOccupied::Paras(entry) => { - match freed_reason { - FreedReason::Concluded => { - concluded_paras.insert(freed_index, entry.assignment); - }, - FreedReason::TimedOut => { - timedout_paras.insert(freed_index, entry); - }, - }; - }, - }; - }) - }); - - (concluded_paras, timedout_paras) - } - - /// Get an iterator into the claim queues. - /// - /// This iterator will have an item for each and every core index up to the maximum core index - /// found in the claim queue. In other words there will be no holes/missing core indices, - /// between core 0 and the maximum, even if the claim queue was missing entries for particular - /// indices in between. (The iterator will return an empty `VecDeque` for those indices. - fn claim_queue_iterator() -> impl Iterator>)> { - let queues = ClaimQueue::::get(); - return ClaimQueueIterator::> { - next_idx: 0, - queue: queues.into_iter().peekable(), - } - } - - /// Note that the given cores have become occupied. Update the claim queue accordingly. - /// This will not push a new entry onto the claim queue, so the length after this call will be - /// the expected length - 1. The claim_queue runtime API will take care of adding another entry - /// here, to ensure the right lookahead. - pub(crate) fn occupied( - now_occupied: BTreeMap, - ) -> BTreeMap { - let mut availability_cores = AvailabilityCores::::get(); - - log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied); - - let pos_mapping: BTreeMap = now_occupied - .iter() - .flat_map(|(core_idx, para_id)| { - match Self::remove_from_claim_queue(*core_idx, *para_id) { - Err(e) => { - log::debug!( - target: LOG_TARGET, - "[occupied] error on remove_from_claim queue {}", - e - ); - None - }, - Ok((pos_in_claim_queue, pe)) => { - availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); - - Some((*core_idx, pos_in_claim_queue)) - }, - } - }) - .collect(); - - // Drop expired claims after processing now_occupied. - Self::drop_expired_claims_from_claim_queue(); - - AvailabilityCores::::set(availability_cores); - - pos_mapping - } - - /// Iterates through every element in all claim queues and tries to add new assignments from the - /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the - /// current block height. - fn drop_expired_claims_from_claim_queue() { - let now = frame_system::Pallet::::block_number(); - let availability_cores = AvailabilityCores::::get(); - let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - - ClaimQueue::::mutate(|cq| { - for (idx, _) in (0u32..).zip(availability_cores) { - let core_idx = CoreIndex(idx); - if let Some(core_claim_queue) = cq.get_mut(&core_idx) { - let mut i = 0; - let mut num_dropped = 0; - while i < core_claim_queue.len() { - let maybe_dropped = if let Some(entry) = core_claim_queue.get(i) { - if entry.ttl < now { - core_claim_queue.remove(i) - } else { - None - } - } else { - None - }; - - if let Some(dropped) = maybe_dropped { - num_dropped += 1; - T::AssignmentProvider::report_processed(dropped.assignment); - } else { - i += 1; - } - } - - for _ in 0..num_dropped { - // For all claims dropped due to TTL, attempt to pop a new entry to - // the back of the claim queue. - if let Some(assignment) = - T::AssignmentProvider::pop_assignment_for_core(core_idx) - { - core_claim_queue.push_back(ParasEntry::new(assignment, now + ttl)); - } - } - } - } - }); - } - /// Get the validators in the given group, if the group index is valid for this session. pub(crate) fn group_validators(group_index: GroupIndex) -> Option> { ValidatorGroups::::get().get(group_index.0 as usize).map(|g| g.clone()) } + /// Get the number of cores. + pub(crate) fn num_availability_cores() -> usize { + ValidatorGroups::::decode_len().unwrap_or(0) + } + /// Get the group assigned to a specific core by index at the current block number. Result /// undefined if the core index is unknown or the block number is less than the session start /// index. @@ -531,183 +294,137 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { - ClaimQueue::::get() - .get(&core) - .and_then(|a| a.front().map(|pe| Self::paras_entry_to_scheduled_core(pe))) + // Since this is being called from a runtime API, we need to workaround for #64. + if Self::on_chain_storage_version() == StorageVersion::new(2) { + migration::v2::ClaimQueue::::get() + .get(&core) + .and_then(|a| a.front().map(|entry| entry.assignment.para_id())) + } else { + ClaimQueue::::get() + .get(&core) + .and_then(|a| a.front().map(|assignment| assignment.para_id())) + } + .map(|para_id| ScheduledCore { para_id, collator: None }) } - fn paras_entry_to_scheduled_core(pe: &ParasEntryType) -> ScheduledCore { - ScheduledCore { para_id: pe.para_id(), collator: None } + // Since this is being called from a runtime API, we need to workaround for #64. + pub(crate) fn get_claim_queue() -> BTreeMap> { + if Self::on_chain_storage_version() == StorageVersion::new(2) { + migration::v2::ClaimQueue::::get() + .into_iter() + .map(|(core_index, entries)| { + (core_index, entries.into_iter().map(|e| e.assignment).collect()) + }) + .collect() + } else { + ClaimQueue::::get() + } } - /// Return the next thing that will be scheduled on this core assuming it is currently - /// occupied and the candidate occupying it times out. - pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { - let max_availability_timeouts = configuration::ActiveConfig::::get() - .scheduler_params - .max_availability_timeouts; - Self::next_up_on_available(core).or_else(|| { - // Or, if none, the claim currently occupying the core, - // as it would be put back on the queue after timing out if number of retries is not at - // the maximum. - let cores = AvailabilityCores::::get(); - cores.get(core.0 as usize).and_then(|c| match c { - CoreOccupied::Free => None, - CoreOccupied::Paras(pe) => - if pe.availability_timeouts < max_availability_timeouts { - Some(Self::paras_entry_to_scheduled_core(pe)) - } else { - None - }, - }) - }) - } + /// For each core that isn't part of the `except_for` set, pop the first item of the claim queue + /// and fill the queue from the assignment provider. + pub(crate) fn advance_claim_queue(except_for: &BTreeSet) { + let config = configuration::ActiveConfig::::get(); + let num_assigner_cores = config.scheduler_params.num_cores; + // Extra sanity, config should already never be smaller than 1: + let n_lookahead = config.scheduler_params.lookahead.max(1); + + for core_idx in 0..num_assigner_cores { + let core_idx = CoreIndex::from(core_idx); + + if !except_for.contains(&core_idx) { + let core_idx = CoreIndex::from(core_idx); - /// Pushes occupied cores to the assignment provider. - fn push_occupied_cores_to_assignment_provider() { - AvailabilityCores::::mutate(|cores| { - for core in cores.iter_mut() { - match core::mem::replace(core, CoreOccupied::Free) { - CoreOccupied::Free => continue, - CoreOccupied::Paras(entry) => { - Self::maybe_push_assignment(entry); - }, + if let Some(dropped_para) = Self::pop_front_of_claim_queue(&core_idx) { + T::AssignmentProvider::report_processed(dropped_para); } - } - }); - } - // on new session - fn push_claim_queue_items_to_assignment_provider() { - for (_, claim_queue) in ClaimQueue::::take() { - // Push back in reverse order so that when we pop from the provider again, - // the entries in the claim queue are in the same order as they are right now. - for para_entry in claim_queue.into_iter().rev() { - Self::maybe_push_assignment(para_entry); + Self::fill_claim_queue(core_idx, n_lookahead); } } } - /// Push assignments back to the provider on session change unless the paras - /// timed out on availability before. - fn maybe_push_assignment(pe: ParasEntryType) { - if pe.availability_timeouts == 0 { - T::AssignmentProvider::push_back_assignment(pe.assignment); + // on new session + fn maybe_resize_claim_queue(old_core_count: u32, new_core_count: u32) { + if new_core_count < old_core_count { + ClaimQueue::::mutate(|cq| { + let to_remove: Vec<_> = cq + .range(CoreIndex(new_core_count)..CoreIndex(old_core_count)) + .map(|(k, _)| *k) + .collect(); + for key in to_remove { + if let Some(dropped_assignments) = cq.remove(&key) { + Self::push_back_to_assignment_provider(dropped_assignments.into_iter()); + } + } + }); } } - /// Frees cores and fills the free claim queue spots by popping from the `AssignmentProvider`. - pub fn free_cores_and_fill_claim_queue( - just_freed_cores: impl IntoIterator, - now: BlockNumberFor, - ) { - let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); - - // This can only happen on new sessions at which we move all assignments back to the - // provider. Hence, there's nothing we need to do here. - if ValidatorGroups::::decode_len().map_or(true, |l| l == 0) { - return - } - let n_session_cores = T::AssignmentProvider::session_core_count(); - let cq = ClaimQueue::::get(); + // Populate the claim queue. To be called on new session, after all the other modules were + // initialized. + fn populate_claim_queue_after_session_change() { let config = configuration::ActiveConfig::::get(); // Extra sanity, config should already never be smaller than 1: let n_lookahead = config.scheduler_params.lookahead.max(1); - let max_availability_timeouts = config.scheduler_params.max_availability_timeouts; - let ttl = config.scheduler_params.ttl; + let new_core_count = config.scheduler_params.num_cores; - for core_idx in 0..n_session_cores { + for core_idx in 0..new_core_count { let core_idx = CoreIndex::from(core_idx); + Self::fill_claim_queue(core_idx, n_lookahead); + } + } - let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32); - - // add previously timedout paras back into the queue - if let Some(mut entry) = timedout_paras.remove(&core_idx) { - if entry.availability_timeouts < max_availability_timeouts { - // Increment the timeout counter. - entry.availability_timeouts += 1; - if n_lookahead_used < n_lookahead { - entry.ttl = now + ttl; - } else { - // Over max capacity, we need to bump ttl (we exceeded the claim queue - // size, so otherwise the entry might get dropped before reaching the top): - entry.ttl = now + ttl + One::one(); - } - Self::add_to_claim_queue(core_idx, entry); - // The claim has been added back into the claim queue. - // Do not pop another assignment for the core. - continue - } else { - // Consider timed out assignments for on demand parachains as concluded for - // the assignment provider - let ret = concluded_paras.insert(core_idx, entry.assignment); - debug_assert!(ret.is_none()); + /// Push some assignments back to the provider. + fn push_back_to_assignment_provider( + assignments: impl core::iter::DoubleEndedIterator, + ) { + // Push back in reverse order so that when we pop from the provider again, + // the entries in the claim queue are in the same order as they are right + // now. + for assignment in assignments.rev() { + T::AssignmentProvider::push_back_assignment(assignment); + } + } + + fn fill_claim_queue(core_idx: CoreIndex, n_lookahead: u32) { + ClaimQueue::::mutate(|la| { + let cq = la.entry(core_idx).or_default(); + + let mut n_lookahead_used = cq.len() as u32; + + // If the claim queue used to be empty, we need to double the first assignment. + // Otherwise, the para will only be able to get the collation in right at the next block + // (synchronous backing). + // Only do this if the configured lookahead is greater than 1. Otherwise, it doesn't + // make sense. + if n_lookahead_used == 0 && n_lookahead > 1 { + if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { + T::AssignmentProvider::assignment_duplicated(&assignment); + cq.push_back(assignment.clone()); + cq.push_back(assignment); + n_lookahead_used += 2; } } - if let Some(concluded_para) = concluded_paras.remove(&core_idx) { - T::AssignmentProvider::report_processed(concluded_para); - } for _ in n_lookahead_used..n_lookahead { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { - Self::add_to_claim_queue(core_idx, ParasEntry::new(assignment, now + ttl)); + cq.push_back(assignment); + } else { + break } } - } - debug_assert!(timedout_paras.is_empty()); - debug_assert!(concluded_paras.is_empty()); - } - - fn add_to_claim_queue(core_idx: CoreIndex, pe: ParasEntryType) { - ClaimQueue::::mutate(|la| { - la.entry(core_idx).or_default().push_back(pe); + // If we didn't end up pushing anything, remove the entry. We don't want to waste the + // space if we've no assignments. + if cq.is_empty() { + la.remove(&core_idx); + } }); } - /// Returns `ParasEntry` with `para_id` at `core_idx` if found. - fn remove_from_claim_queue( - core_idx: CoreIndex, - para_id: ParaId, - ) -> Result<(PositionInClaimQueue, ParasEntryType), &'static str> { - ClaimQueue::::mutate(|cq| { - let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; - - let pos = core_claims - .iter() - .position(|pe| pe.para_id() == para_id) - .ok_or("para id not found at core_idx lookahead")?; - - let pe = core_claims.remove(pos).ok_or("remove returned None")?; - - Ok((pos as u32, pe)) - }) - } - - /// Paras scheduled next in the claim queue. - pub(crate) fn scheduled_paras() -> impl Iterator { - let claim_queue = ClaimQueue::::get(); - claim_queue - .into_iter() - .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.assignment.para_id()))) - } - - /// Paras that may get backed on cores. - /// - /// 1. The para must be scheduled on core. - /// 2. Core needs to be free, otherwise backing is not possible. - pub(crate) fn eligible_paras() -> impl Iterator { - let availability_cores = AvailabilityCores::::get(); - - Self::claim_queue_iterator().zip(availability_cores.into_iter()).filter_map( - |((core_idx, queue), core)| { - if core != CoreOccupied::Free { - return None - } - let next_scheduled = queue.front()?; - Some((core_idx, next_scheduled.assignment.para_id())) - }, - ) + fn pop_front_of_claim_queue(core_idx: &CoreIndex) -> Option { + ClaimQueue::::mutate(|cq| cq.get_mut(core_idx)?.pop_front()) } #[cfg(any(feature = "try-runtime", test))] @@ -726,7 +443,7 @@ impl Pallet { } #[cfg(test)] - pub(crate) fn set_claim_queue(claim_queue: BTreeMap>>) { + pub(crate) fn set_claim_queue(claim_queue: BTreeMap>) { ClaimQueue::::set(claim_queue); } } diff --git a/polkadot/runtime/parachains/src/scheduler/common.rs b/polkadot/runtime/parachains/src/scheduler/common.rs index 114cd4b940bcdbeacdf0cbb7f62e74fc09dbad4d..bf8a2bee74e3c96e8b6771066c0eb8cbf4f02be4 100644 --- a/polkadot/runtime/parachains/src/scheduler/common.rs +++ b/polkadot/runtime/parachains/src/scheduler/common.rs @@ -77,11 +77,6 @@ pub trait AssignmentProvider { #[cfg(any(feature = "runtime-benchmarks", test))] fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Assignment; - /// How many cores are allocated to this provider. - /// - /// As the name suggests the core count has to be session buffered: - /// - /// - Core count has to be predetermined for the next session in the current session. - /// - Core count must not change during a session. - fn session_core_count() -> u32; + /// Report that an assignment was duplicated by the scheduler. + fn assignment_duplicated(assignment: &Assignment); } diff --git a/polkadot/runtime/parachains/src/scheduler/migration.rs b/polkadot/runtime/parachains/src/scheduler/migration.rs index 125f105ef70668c8ebacfdb9e2b267b9d00bfcbf..e741711cad6d899557ea87788b4eec996784c330 100644 --- a/polkadot/runtime/parachains/src/scheduler/migration.rs +++ b/polkadot/runtime/parachains/src/scheduler/migration.rs @@ -268,7 +268,7 @@ pub type MigrateV0ToV1 = VersionedMigration< ::DbWeight, >; -mod v2 { +pub(crate) mod v2 { use super::*; use crate::scheduler; @@ -406,3 +406,89 @@ pub type MigrateV1ToV2 = VersionedMigration< Pallet, ::DbWeight, >; + +/// Migration for TTL and availability timeout retries removal. +/// AvailabilityCores storage is removed and ClaimQueue now holds `Assignment`s instead of +/// `ParasEntryType` +mod v3 { + use super::*; + use crate::scheduler; + + #[storage_alias] + pub(crate) type ClaimQueue = + StorageValue, BTreeMap>, ValueQuery>; + /// Migration to V3 + pub struct UncheckedMigrateToV3(core::marker::PhantomData); + + impl UncheckedOnRuntimeUpgrade for UncheckedMigrateToV3 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Migrate ClaimQueuee to new format. + + let old = v2::ClaimQueue::::take(); + let new = old + .into_iter() + .map(|(k, v)| { + ( + k, + v.into_iter() + .map(|paras_entry| paras_entry.assignment) + .collect::>(), + ) + }) + .collect::>>(); + + v3::ClaimQueue::::put(new); + + // Clear AvailabilityCores storage + v2::AvailabilityCores::::kill(); + + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + + log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v3"); + + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + log::trace!( + target: crate::scheduler::LOG_TARGET, + "ClaimQueue before migration: {}", + v2::ClaimQueue::::get().len() + ); + + let bytes = u32::to_be_bytes(v2::ClaimQueue::::get().len() as u32); + + Ok(bytes.to_vec()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::DispatchError> { + log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()"); + + let old_len = u32::from_be_bytes(state.try_into().unwrap()); + ensure!( + v3::ClaimQueue::::get().len() as u32 == old_len, + "Old ClaimQueue completely moved to new ClaimQueue after migration" + ); + + ensure!( + !v2::AvailabilityCores::::exists(), + "AvailabilityCores storage should have been completely killed" + ); + + Ok(()) + } + } +} + +/// Migrate `V2` to `V3` of the storage format. +pub type MigrateV2ToV3 = VersionedMigration< + 2, + 3, + v3::UncheckedMigrateToV3, + Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/parachains/src/scheduler/tests.rs b/polkadot/runtime/parachains/src/scheduler/tests.rs index 5f80114b596368135e91ca59ca9c7e6f0b59510f..5be7e084f3bca3588301fa3970d0873fe83ba687 100644 --- a/polkadot/runtime/parachains/src/scheduler/tests.rs +++ b/polkadot/runtime/parachains/src/scheduler/tests.rs @@ -16,7 +16,7 @@ use super::*; -use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use alloc::collections::btree_map::BTreeMap; use frame_support::assert_ok; use polkadot_primitives::{ BlockNumber, SchedulerParams, SessionIndex, ValidationCode, ValidatorId, @@ -27,14 +27,14 @@ use crate::{ configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, MockAssigner, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin, - Scheduler, System, Test, + new_test_ext, Configuration, MockAssigner, MockGenesisConfig, Paras, ParasShared, + RuntimeOrigin, Scheduler, System, Test, }, paras::{ParaGenesisArgs, ParaKind}, scheduler::{self, common::Assignment, ClaimQueue}, }; -fn schedule_blank_para(id: ParaId) { +fn register_para(id: ParaId) { let validation_code: ValidationCode = vec![1, 2, 3].into(); assert_ok!(Paras::schedule_para_initialize( id, @@ -58,17 +58,18 @@ fn run_to_block( Scheduler::initializer_finalize(); Paras::initializer_finalize(b); - if let Some(notification) = new_session(b + 1) { - let mut notification_with_session_index = notification; + if let Some(mut notification) = new_session(b + 1) { // We will make every session change trigger an action queue. Normally this may require // 2 or more session changes. - if notification_with_session_index.session_index == SessionIndex::default() { - notification_with_session_index.session_index = ParasShared::scheduled_session(); + if notification.session_index == SessionIndex::default() { + notification.session_index = ParasShared::scheduled_session(); } - Scheduler::pre_new_session(); - Paras::initializer_on_new_session(¬ification_with_session_index); - Scheduler::initializer_on_new_session(¬ification_with_session_index); + Configuration::force_set_active_config(notification.new_config.clone()); + + Paras::initializer_on_new_session(¬ification); + + Scheduler::initializer_on_new_session(¬ification); } System::on_finalize(b); @@ -79,28 +80,8 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); - // In the real runtime this is expected to be called by the `InclusionInherent` pallet. - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), b + 1); - } -} - -fn run_to_end_of_block( - to: BlockNumber, - new_session: impl Fn(BlockNumber) -> Option>, -) { - run_to_block(to, &new_session); - - Scheduler::initializer_finalize(); - Paras::initializer_finalize(to); - - if let Some(notification) = new_session(to + 1) { - Scheduler::pre_new_session(); - - Paras::initializer_on_new_session(¬ification); - Scheduler::initializer_on_new_session(¬ification); + Scheduler::advance_claim_queue(&Default::default()); } - - System::on_finalize(to); } fn default_config() -> HostConfiguration { @@ -110,6 +91,7 @@ fn default_config() -> HostConfiguration { // `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and // `thread_availability_period`. minimum_validation_upgrade_delay: 6, + #[allow(deprecated)] scheduler_params: SchedulerParams { group_rotation_frequency: 10, paras_availability_period: 3, @@ -129,172 +111,27 @@ fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig } } -fn claimqueue_contains_para_ids(pids: Vec) -> bool { - let set: BTreeSet = ClaimQueue::::get() +/// Internal access to assignments at the top of the claim queue. +fn next_assignments() -> impl Iterator { + let claim_queue = ClaimQueue::::get(); + claim_queue .into_iter() - .flat_map(|(_, paras_entries)| paras_entries.into_iter().map(|pe| pe.assignment.para_id())) - .collect(); - - pids.into_iter().all(|pid| set.contains(&pid)) -} - -fn availability_cores_contains_para_ids(pids: Vec) -> bool { - let set: BTreeSet = AvailabilityCores::::get() - .into_iter() - .filter_map(|core| match core { - CoreOccupied::Free => None, - CoreOccupied::Paras(entry) => Some(entry.para_id()), - }) - .collect(); - - pids.into_iter().all(|pid| set.contains(&pid)) -} - -/// Internal access to entries at the top of the claim queue. -fn scheduled_entries() -> impl Iterator>)> { - let claimqueue = ClaimQueue::::get(); - claimqueue - .into_iter() - .filter_map(|(core_idx, v)| v.front().map(|e| (core_idx, e.clone()))) -} - -#[test] -fn claim_queue_iterator_handles_holes_correctly() { - let mut queue = BTreeMap::new(); - queue.insert(CoreIndex(1), ["abc"].into_iter().collect()); - queue.insert(CoreIndex(4), ["cde"].into_iter().collect()); - let queue = queue.into_iter().peekable(); - let mut i = ClaimQueueIterator { next_idx: 0, queue }; - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(0)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(1)); - assert!(e.len() == 1); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(2)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(3)); - assert!(e.is_empty()); - - let (idx, e) = i.next().unwrap(); - assert_eq!(idx, CoreIndex(4)); - assert!(e.len() == 1); - - assert!(i.next().is_none()); + .filter_map(|(core_idx, v)| v.front().map(|a| (core_idx, a.clone()))) } #[test] -fn claimqueue_ttl_drop_fn_works() { +fn session_change_shuffles_validators() { let mut config = default_config(); - config.scheduler_params.lookahead = 3; + // Need five cores for this test + config.scheduler_params.num_cores = 5; let genesis_config = genesis_config(&config); - let para_id = ParaId::from(100); - let core_idx = CoreIndex::from(0); - let mut now = 10; - new_test_ext(genesis_config).execute_with(|| { - assert!(config.scheduler_params.ttl == 5); - // Register and run to a blockheight where the para is in a valid state. - schedule_blank_para(para_id); - run_to_block(now, |n| if n == now { Some(Default::default()) } else { None }); - - // Add a claim on core 0 with a ttl in the past. - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now - 5 as u32); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue prior to call. - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - // Claim is dropped post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); - - // Add a claim on core 0 with a ttl in the future (15). - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now + 5); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - now = now + 6; - run_to_block(now, |_| None); - - // Claim is dropped - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); + assert!(ValidatorGroups::::get().is_empty()); - // Add a claim on core 0 with a ttl == now (16) - let paras_entry = ParasEntry::new(Assignment::Bulk(para_id), now); - Scheduler::add_to_claim_queue(core_idx, paras_entry.clone()); - - // Claim is in queue post call. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(claimqueue_contains_para_ids::(vec![para_id])); - - now = now + 1; - run_to_block(now, |_| None); - - // Drop expired claim. - Scheduler::drop_expired_claims_from_claim_queue(); - assert!(!claimqueue_contains_para_ids::(vec![para_id])); - - // Add a claim on core 0 with a ttl == now (17) - let paras_entry_non_expired = ParasEntry::new(Assignment::Bulk(para_id), now); - let paras_entry_expired = ParasEntry::new(Assignment::Bulk(para_id), now - 2); - // ttls = [17, 15, 17] - Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); - Scheduler::add_to_claim_queue(core_idx, paras_entry_expired.clone()); - Scheduler::add_to_claim_queue(core_idx, paras_entry_non_expired.clone()); - let cq = scheduler::ClaimQueue::::get(); - assert_eq!(cq.get(&core_idx).unwrap().len(), 3); - - // Add a claim to the test assignment provider. - let assignment = Assignment::Bulk(para_id); - - MockAssigner::add_test_assignment(assignment.clone()); - - // Drop expired claim. - Scheduler::drop_expired_claims_from_claim_queue(); - - let cq = scheduler::ClaimQueue::::get(); - let cqc = cq.get(&core_idx).unwrap(); - // Same number of claims, because a new claim is popped from `MockAssigner` instead of the - // expired one - assert_eq!(cqc.len(), 3); - - // The first 2 claims in the queue should have a ttl of 17, - // being the ones set up prior in this test as claims 1 and 3. - // The third claim is popped from the assignment provider and - // has a new ttl set by the scheduler of now + - // assignment_provider_ttl. ttls = [17, 17, 22] - assert!(cqc.iter().enumerate().all(|(index, entry)| { - match index { - 0 | 1 => entry.clone().ttl == 17, - 2 => entry.clone().ttl == 22, - _ => false, - } - })) - }); -} - -#[test] -fn session_change_shuffles_validators() { - let genesis_config = genesis_config(&default_config()); - - new_test_ext(genesis_config).execute_with(|| { - // Need five cores for this test - MockAssigner::set_core_count(5); run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), @@ -328,6 +165,8 @@ fn session_change_shuffles_validators() { fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); + // Simulate 2 cores between all usage types + config.scheduler_params.num_cores = 2; config.scheduler_params.max_validators_per_core = Some(1); config }; @@ -335,9 +174,6 @@ fn session_change_takes_only_max_per_core() { let genesis_config = genesis_config(&config); new_test_ext(genesis_config).execute_with(|| { - // Simulate 2 cores between all usage types - MockAssigner::set_core_count(2); - run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: config.clone(), @@ -367,8 +203,12 @@ fn session_change_takes_only_max_per_core() { } #[test] -fn fill_claimqueue_fills() { - let config = default_config(); +// Test that `advance_claim_queue` doubles the first assignment only for a core that didn't use to +// have any assignments. +fn advance_claim_queue_doubles_assignment_only_if_empty() { + let mut config = default_config(); + config.scheduler_params.lookahead = 3; + config.scheduler_params.num_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); @@ -380,18 +220,15 @@ fn fill_claimqueue_fills() { let assignment_c = Assignment::Bulk(para_c); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(2); - let coretime_ttl = config.scheduler_params.ttl; - // Add 3 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); + register_para(para_a); + register_para(para_b); + register_para(para_c); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), @@ -406,224 +243,108 @@ fn fill_claimqueue_fills() { MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_c.clone()); + // This will call advance_claim_queue run_to_block(2, |_| None); { - assert_eq!(Scheduler::claim_queue_len(), 3); - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); + assert_eq!(Scheduler::claim_queue_len(), 5); + let mut claim_queue = scheduler::ClaimQueue::::get(); - // Was added a block later, note the TTL. - assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - assignment: assignment_a.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - }, - ); - // Sits on the same core as `para_a` + // Because the claim queue used to be empty, the first assignment is doubled for every + // core so that the first para gets a fair shot at backing something. assert_eq!( - scheduler::ClaimQueue::::get().get(&CoreIndex(0)).unwrap()[1], - ParasEntry { - assignment: assignment_b.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a, assignment_b] + .into_iter() + .collect::>() ); assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: assignment_c.clone(), - availability_timeouts: 0, - ttl: 2 + coretime_ttl - }, + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_c.clone(), assignment_c].into_iter().collect::>() ); } }); } #[test] -fn schedule_schedules_including_just_freed() { +// Test that `advance_claim_queue` doesn't populate for cores which have no assignments. +fn advance_claim_queue_no_entry_if_empty() { let mut config = default_config(); - // NOTE: This test expects on demand cores to each get slotted on to a different core - // and not fill up the claimqueue of each core first. - config.scheduler_params.lookahead = 1; + config.scheduler_params.lookahead = 3; + config.scheduler_params.num_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(3_u32); - let para_b = ParaId::from(4_u32); - let para_c = ParaId::from(5_u32); - let para_d = ParaId::from(6_u32); - let para_e = ParaId::from(7_u32); - let assignment_a = Assignment::Bulk(para_a); - let assignment_b = Assignment::Bulk(para_b); - let assignment_c = Assignment::Bulk(para_c); - let assignment_d = Assignment::Bulk(para_d); - let assignment_e = Assignment::Bulk(para_e); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(3); - - // add 5 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); - schedule_blank_para(para_d); - schedule_blank_para(para_e); + // Add 1 para + register_para(para_a); - // start a new session to activate, 3 validators for 3 cores. + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), ], ..Default::default() }), _ => None, }); - // add a couple of para claims now that paras are live - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); - - let mut now = 2; - run_to_block(now, |_| None); - - assert_eq!(Scheduler::scheduled_paras().collect::>().len(), 2); - - // cores 0, 1 should be occupied. mark them as such. - let mut occupied_map: BTreeMap = BTreeMap::new(); - occupied_map.insert(CoreIndex(0), para_a); - occupied_map.insert(CoreIndex(1), para_c); - Scheduler::occupied(occupied_map); - - { - let cores = AvailabilityCores::::get(); - - // cores 0, 1 are `CoreOccupied::Paras(ParasEntry...)` - assert!(cores[0] != CoreOccupied::Free); - assert!(cores[1] != CoreOccupied::Free); - - // core 2 is free - assert!(cores[2] == CoreOccupied::Free); - - assert!(Scheduler::scheduled_paras().collect::>().is_empty()); - - // All `core_queue`s should be empty - scheduler::ClaimQueue::::get() - .iter() - .for_each(|(_core_idx, core_queue)| assert_eq!(core_queue.len(), 0)) - } - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - MockAssigner::add_test_assignment(assignment_d.clone()); - MockAssigner::add_test_assignment(assignment_e.clone()); - now = 3; - run_to_block(now, |_| None); - { - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); - - assert_eq!(scheduled.len(), 3); - assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0, - ttl: 8 - }, - ); - } - - // now note that cores 0 and 1 were freed. - let just_updated: BTreeMap = vec![ - (CoreIndex(0), FreedReason::Concluded), - (CoreIndex(1), FreedReason::TimedOut), // should go back on queue. - ] - .into_iter() - .collect(); - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); + // This will call advance_claim_queue + run_to_block(3, |_| None); { - let scheduled: BTreeMap<_, _> = scheduled_entries().collect(); + let mut claim_queue = scheduler::ClaimQueue::::get(); - // 1 thing scheduled before, + 2 cores freed. - assert_eq!(scheduled.len(), 3); - assert_eq!( - scheduled.get(&CoreIndex(0)).unwrap(), - &ParasEntry { - // Next entry in queue is `a` again: - assignment: Assignment::Bulk(para_a), - availability_timeouts: 0, - ttl: 8 - }, - ); - // Although C was descheduled, the core `2` was occupied so C goes back to the queue. assert_eq!( - scheduler::ClaimQueue::::get()[&CoreIndex(1)][1], - ParasEntry { - assignment: Assignment::Bulk(para_c), - // End of the queue should be the pushed back entry: - availability_timeouts: 1, - // ttl 1 higher: - ttl: 9 - }, - ); - assert_eq!( - scheduled.get(&CoreIndex(1)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_c), - availability_timeouts: 0, - ttl: 8 - }, - ); - assert_eq!( - scheduled.get(&CoreIndex(2)).unwrap(), - &ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0, - ttl: 8 - }, + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a].into_iter().collect::>() ); - assert!(claimqueue_contains_para_ids::(vec![para_c])); - assert!(!availability_cores_contains_para_ids::(vec![para_a, para_c])); + // Even though core 1 exists, there's no assignment for it so it's not present in the + // claim queue. + assert!(claim_queue.remove(&CoreIndex(1)).is_none()); } }); } #[test] -fn schedule_clears_availability_cores() { +// Test that `advance_claim_queue` only advances for cores that are not part of the `except_for` +// set. +fn advance_claim_queue_except_for() { let mut config = default_config(); + // NOTE: This test expects on demand cores to each get slotted on to a different core + // and not fill up the claimqueue of each core first. config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = 3; + let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); let para_c = ParaId::from(3_u32); + let para_d = ParaId::from(4_u32); + let para_e = ParaId::from(5_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); let assignment_c = Assignment::Bulk(para_c); + let assignment_d = Assignment::Bulk(para_d); + let assignment_e = Assignment::Bulk(para_e); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(3); - - // register 3 paras - schedule_blank_para(para_a); - schedule_blank_para(para_b); - schedule_blank_para(para_c); - - // Adding assignments then running block to populate claim queue - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - MockAssigner::add_test_assignment(assignment_c.clone()); + // add 5 paras + register_para(para_a); + register_para(para_b); + register_para(para_c); + register_para(para_d); + register_para(para_e); // start a new session to activate, 3 validators for 3 cores. run_to_block(1, |number| match number { @@ -639,91 +360,69 @@ fn schedule_clears_availability_cores() { _ => None, }); - run_to_block(2, |_| None); - - assert_eq!(scheduler::ClaimQueue::::get().len(), 3); - - // cores 0, 1, and 2 should be occupied. mark them as such. - Scheduler::occupied( - vec![(CoreIndex(0), para_a), (CoreIndex(1), para_b), (CoreIndex(2), para_c)] - .into_iter() - .collect(), - ); + // add a couple of para claims now that paras are live + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_c.clone()); - { - let cores = AvailabilityCores::::get(); + run_to_block(2, |_| None); - assert_eq!(cores[0].is_free(), false); - assert_eq!(cores[1].is_free(), false); - assert_eq!(cores[2].is_free(), false); + Scheduler::advance_claim_queue(&Default::default()); - // All `core_queue`s should be empty - scheduler::ClaimQueue::::get() - .iter() - .for_each(|(_core_idx, core_queue)| assert!(core_queue.len() == 0)) - } + // Queues of all cores should be empty + assert_eq!(Scheduler::claim_queue_len(), 0); - // Add more assignments MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); MockAssigner::add_test_assignment(assignment_c.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_d.clone()); + MockAssigner::add_test_assignment(assignment_e.clone()); run_to_block(3, |_| None); - // now note that cores 0 and 2 were freed. - Scheduler::free_cores_and_fill_claim_queue( - vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)] - .into_iter() - .collect::>(), - 3, - ); + { + let scheduled: BTreeMap<_, _> = next_assignments().collect(); + + assert_eq!(scheduled.len(), 3); + assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_a)); + assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_c)); + assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b)); + } + + // now note that cores 0 and 1 were freed. + Scheduler::advance_claim_queue(&std::iter::once(CoreIndex(2)).collect()); { - let claimqueue = ClaimQueue::::get(); - let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone(); - let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone(); - let entry_ttl = 8; - assert_eq!(claimqueue_0.len(), 1); - assert_eq!(claimqueue_2.len(), 1); - let queue_0_expectation: VecDeque> = - vec![ParasEntry::new(assignment_a, entry_ttl as u32)].into_iter().collect(); - let queue_2_expectation: VecDeque> = - vec![ParasEntry::new(assignment_c, entry_ttl as u32)].into_iter().collect(); - assert_eq!(claimqueue_0, queue_0_expectation); - assert_eq!(claimqueue_2, queue_2_expectation); - - // The freed cores should be `Free` in `AvailabilityCores`. - let cores = AvailabilityCores::::get(); - assert!(cores[0].is_free()); - assert!(cores[2].is_free()); + let scheduled: BTreeMap<_, _> = next_assignments().collect(); + + // 1 thing scheduled before, + 2 cores freed. + assert_eq!(scheduled.len(), 3); + assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_d)); + assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_e)); + assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b)); } }); } #[test] fn schedule_rotates_groups() { + let on_demand_cores = 2; let config = { let mut config = default_config(); config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = on_demand_cores; config }; let rotation_frequency = config.scheduler_params.group_rotation_frequency; - let on_demand_cores = 2; let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); - let assignment_a = Assignment::Bulk(para_a); - let assignment_b = Assignment::Bulk(para_b); - new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(on_demand_cores); - - schedule_blank_para(para_a); - schedule_blank_para(para_b); + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { @@ -741,15 +440,10 @@ fn schedule_rotates_groups() { let session_start_block = scheduler::SessionStartBlock::::get(); assert_eq!(session_start_block, 1); - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); - let mut now = 2; run_to_block(now, |_| None); let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor| { - let scheduled: BTreeMap<_, _> = Scheduler::scheduled_paras().collect(); - assert_eq!(scheduled.len(), 2); assert_eq!( Scheduler::group_assigned_to_core(CoreIndex(0), *now).unwrap(), GroupIndex((0u32 + rotations) % on_demand_cores) @@ -764,7 +458,7 @@ fn schedule_rotates_groups() { // one block before first rotation. now = rotation_frequency; - run_to_block(rotation_frequency, |_| None); + run_to_block(now, |_| None); assert_groups_rotated(0, &now); @@ -785,134 +479,6 @@ fn schedule_rotates_groups() { }); } -#[test] -fn on_demand_claims_are_pruned_after_timing_out() { - let max_timeouts = 20; - let mut config = default_config(); - config.scheduler_params.lookahead = 1; - // Need more timeouts for this test - config.scheduler_params.max_availability_timeouts = max_timeouts; - config.scheduler_params.ttl = BlockNumber::from(5u32); - let genesis_config = genesis_config(&config); - - let para_a = ParaId::from(1_u32); - - let assignment_a = Assignment::Bulk(para_a); - - new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(2); - schedule_blank_para(para_a); - - // #1 - let mut now = 1; - run_to_block(now, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: default_config(), - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - MockAssigner::add_test_assignment(assignment_a.clone()); - - // #2 - now += 1; - run_to_block(now, |_| None); - assert_eq!(scheduler::ClaimQueue::::get().len(), 1); - // ParaId a is in the claimqueue. - assert!(claimqueue_contains_para_ids::(vec![para_a])); - - Scheduler::occupied(vec![(CoreIndex(0), para_a)].into_iter().collect()); - // ParaId a is no longer in the claimqueue. - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - // It is in availability cores. - assert!(availability_cores_contains_para_ids::(vec![para_a])); - - // #3 - now += 1; - // Run to block #n over the max_retries value. - // In this case, both validator groups with time out on availability and - // the assignment will be dropped. - for n in now..=(now + max_timeouts + 1) { - // #n - run_to_block(n, |_| None); - // Time out on core 0. - let just_updated: BTreeMap = vec![ - (CoreIndex(0), FreedReason::TimedOut), // should go back on queue. - ] - .into_iter() - .collect(); - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); - - // ParaId a exists in the claim queue until max_retries is reached. - if n < max_timeouts + now { - assert!(claimqueue_contains_para_ids::(vec![para_a])); - } else { - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - } - - let core_assignments = Scheduler::scheduled_paras().collect(); - Scheduler::occupied(core_assignments); - } - - // ParaId a does not exist in the claimqueue/availability_cores after - // threshold has been reached. - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - assert!(!availability_cores_contains_para_ids::(vec![para_a])); - - // #25 - now += max_timeouts + 2; - - // Add assignment back to the mix. - MockAssigner::add_test_assignment(assignment_a.clone()); - run_to_block(now, |_| None); - - assert!(claimqueue_contains_para_ids::(vec![para_a])); - - // #26 - now += 1; - // Run to block #n but this time have group 1 conclude the availability. - for n in now..=(now + max_timeouts + 1) { - // #n - run_to_block(n, |_| None); - // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. - let mut just_updated: BTreeMap = BTreeMap::new(); - if let Some(group) = Scheduler::group_assigned_to_core(CoreIndex(0), n) { - match group { - GroupIndex(0) => { - just_updated.insert(CoreIndex(0), FreedReason::TimedOut); // should go back on queue. - }, - GroupIndex(1) => { - just_updated.insert(CoreIndex(0), FreedReason::Concluded); - }, - _ => panic!("Should only have 2 groups here"), - } - } - - Scheduler::free_cores_and_fill_claim_queue(just_updated, now); - - // ParaId a exists in the claim queue until groups are rotated. - if n < 31 { - assert!(claimqueue_contains_para_ids::(vec![para_a])); - } else { - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - } - - let core_assignments = Scheduler::scheduled_paras().collect(); - Scheduler::occupied(core_assignments); - } - - // ParaId a does not exist in the claimqueue/availability_cores after - // being concluded - assert!(!claimqueue_contains_para_ids::(vec![para_a])); - assert!(!availability_cores_contains_para_ids::(vec![para_a])); - }); -} - #[test] fn availability_predicate_works() { let genesis_config = genesis_config(&default_config()); @@ -948,20 +514,21 @@ fn availability_predicate_works() { #[test] fn next_up_on_available_uses_next_scheduled_or_none() { - let genesis_config = genesis_config(&default_config()); + let mut config = default_config(); + config.scheduler_params.num_cores = 1; + let genesis_config = genesis_config(&config); let para_a = ParaId::from(1_u32); let para_b = ParaId::from(2_u32); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(1); - schedule_blank_para(para_a); - schedule_blank_para(para_b); + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Eve.public()), @@ -971,69 +538,57 @@ fn next_up_on_available_uses_next_scheduled_or_none() { _ => None, }); - let entry_a = ParasEntry { - assignment: Assignment::Bulk(para_a), - availability_timeouts: 0 as u32, - ttl: 5 as u32, - }; - let entry_b = ParasEntry { - assignment: Assignment::Bulk(para_b), - availability_timeouts: 0 as u32, - ttl: 5 as u32, - }; - - Scheduler::add_to_claim_queue(CoreIndex(0), entry_a.clone()); + MockAssigner::add_test_assignment(Assignment::Bulk(para_a)); run_to_block(2, |_| None); { - assert_eq!(Scheduler::claim_queue_len(), 1); - assert_eq!(scheduler::AvailabilityCores::::get().len(), 1); - - let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), para_a); - Scheduler::occupied(map); + // Two assignments for A on core 0, because the claim queue used to be empty. + assert_eq!(Scheduler::claim_queue_len(), 2); - let cores = scheduler::AvailabilityCores::::get(); - match &cores[0] { - CoreOccupied::Paras(entry) => assert_eq!(entry, &entry_a), - _ => panic!("There should only be one test assigner core"), - } - - assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); + assert!(Scheduler::next_up_on_available(CoreIndex(1)).is_none()); - Scheduler::add_to_claim_queue(CoreIndex(0), entry_b); + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { para_id: para_a, collator: None } + ); + Scheduler::advance_claim_queue(&Default::default()); assert_eq!( Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_b, collator: None } + ScheduledCore { para_id: para_a, collator: None } ); + + Scheduler::advance_claim_queue(&Default::default()); + assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); } }); } #[test] -fn next_up_on_time_out_reuses_claim_if_nothing_queued() { - let genesis_config = genesis_config(&default_config()); +fn session_change_increasing_number_of_cores() { + let mut config = default_config(); + config.scheduler_params.num_cores = 2; + let genesis_config = genesis_config(&config); - let para_a = ParaId::from(1_u32); - let para_b = ParaId::from(2_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - MockAssigner::set_core_count(1); - schedule_blank_para(para_a); - schedule_blank_para(para_b); + // Add 2 paras + register_para(para_a); + register_para(para_b); // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), ], ..Default::default() }), @@ -1041,193 +596,236 @@ fn next_up_on_time_out_reuses_claim_if_nothing_queued() { }); MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + // This will call advance_claim_queue run_to_block(2, |_| None); { - assert_eq!(scheduler::ClaimQueue::::get().len(), 1); - assert_eq!(scheduler::AvailabilityCores::::get().len(), 1); - - let mut map = BTreeMap::new(); - map.insert(CoreIndex(0), para_a); - Scheduler::occupied(map); - - let cores = scheduler::AvailabilityCores::::get(); - match cores.get(0).unwrap() { - CoreOccupied::Paras(entry) => { - assert_eq!(entry.assignment, assignment_a.clone()); - }, - _ => panic!("There should only be a single test assigner core"), - } - - // There's nothing more to pop for core 0 from the assignment provider. - assert!(MockAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 4); assert_eq!( - Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_a, collator: None } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_b.clone(), assignment_b.clone()] + .into_iter() + .collect::>() ); + } + + // Increase number of cores to 4. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.num_cores = 4; - MockAssigner::add_test_assignment(assignment_b.clone()); + // add another assignment for para b. + MockAssigner::add_test_assignment(assignment_b.clone()); + + run_to_block(3, |number| match number { + 3 => Some(SessionChangeNotification { + new_config: new_config.clone(), + prev_config: old_config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ], + ..Default::default() + }), + _ => None, + }); - // Pop assignment_b into the claimqueue - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 2); + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 3); - //// Now that there is an earlier next-up, we use that. assert_eq!( - Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), - ScheduledCore { para_id: para_b, collator: None } + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a].into_iter().collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(2)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() ); } }); } #[test] -fn session_change_requires_reschedule_dropping_removed_paras() { +fn session_change_decreasing_number_of_cores() { let mut config = default_config(); - config.scheduler_params.lookahead = 1; + config.scheduler_params.num_cores = 3; let genesis_config = genesis_config(&config); - let para_a = ParaId::from(1_u32); - let para_b = ParaId::from(2_u32); + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); let assignment_a = Assignment::Bulk(para_a); let assignment_b = Assignment::Bulk(para_b); new_test_ext(genesis_config).execute_with(|| { - // Setting explicit core count - MockAssigner::set_core_count(5); - let coretime_ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; - - schedule_blank_para(para_a); - schedule_blank_para(para_b); - - // Add assignments - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); + // Add 2 paras + register_para(para_a); + register_para(para_b); + // start a new session to activate, 2 validators for 2 cores. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - assert_eq!(scheduler::ClaimQueue::::get().len(), 2); + scheduler::Pallet::::set_claim_queue(BTreeMap::from([ + (CoreIndex::from(0), VecDeque::from([assignment_a.clone()])), + // Leave a hole for core 1. + (CoreIndex::from(2), VecDeque::from([assignment_b.clone(), assignment_b.clone()])), + ])); - let groups = ValidatorGroups::::get(); - assert_eq!(groups.len(), 5); + // Decrease number of cores to 1. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.num_cores = 1; - assert_ok!(Paras::schedule_para_cleanup(para_b)); + // Session change. + // Assignment A had its shot already so will be dropped for good. + // The two assignments of B will be pushed back to the assignment provider. + run_to_block(3, |number| match number { + 3 => Some(SessionChangeNotification { + new_config: new_config.clone(), + prev_config: old_config.clone(), + validators: vec![ValidatorId::from(Sr25519Keyring::Alice.public())], + ..Default::default() + }), + _ => None, + }); - // Add assignment - MockAssigner::add_test_assignment(assignment_a.clone()); + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 1); - run_to_end_of_block(2, |number| match number { - 2 => Some(SessionChangeNotification { - new_config: default_config(), + // There's only one assignment for B because run_to_block also calls advance_claim_queue at + // the end. + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_b.clone()].into_iter().collect::>() + ); + + // No more assignments now. + Scheduler::advance_claim_queue(&Default::default()); + assert_eq!(Scheduler::claim_queue_len(), 0); + }); +} + +#[test] +fn session_change_increasing_lookahead() { + let mut config = default_config(); + config.scheduler_params.num_cores = 2; + config.scheduler_params.lookahead = 2; + let genesis_config = genesis_config(&config); + + let para_a = ParaId::from(3_u32); + let para_b = ParaId::from(4_u32); + + let assignment_a = Assignment::Bulk(para_a); + let assignment_b = Assignment::Bulk(para_b); + + new_test_ext(genesis_config).execute_with(|| { + // Add 2 paras + register_para(para_a); + register_para(para_b); + + // start a new session to activate, 2 validators for 2 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 3); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_a.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + MockAssigner::add_test_assignment(assignment_b.clone()); + + // Lookahead is currently 2. - assert_eq!( - scheduler::ClaimQueue::::get(), - vec![( - CoreIndex(0), - vec![ParasEntry::new( - Assignment::Bulk(para_a), - // At end of block 2 - coretime_ttl + 2 - )] - .into_iter() - .collect() - )] - .into_iter() - .collect() - ); + run_to_block(2, |_| None); - // Add para back - schedule_blank_para(para_b); + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 4); - // Add assignments - MockAssigner::add_test_assignment(assignment_a.clone()); - MockAssigner::add_test_assignment(assignment_b.clone()); + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_a.clone(), assignment_a.clone()] + .into_iter() + .collect::>() + ); + } + + // Increase lookahead to 4. + let old_config = config; + let mut new_config = old_config.clone(); + new_config.scheduler_params.lookahead = 4; run_to_block(3, |number| match number { 3 => Some(SessionChangeNotification { - new_config: default_config(), + new_config: new_config.clone(), + prev_config: old_config.clone(), validators: vec![ ValidatorId::from(Sr25519Keyring::Alice.public()), ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ValidatorId::from(Sr25519Keyring::Ferdie.public()), - ValidatorId::from(Sr25519Keyring::One.public()), ], - random_seed: [99; 32], ..Default::default() }), _ => None, }); - assert_eq!(scheduler::ClaimQueue::::get().len(), 2); - - let groups = ValidatorGroups::::get(); - assert_eq!(groups.len(), 5); - - Scheduler::free_cores_and_fill_claim_queue(BTreeMap::new(), 4); + { + let mut claim_queue = scheduler::ClaimQueue::::get(); + assert_eq!(Scheduler::claim_queue_len(), 6); - assert_eq!( - scheduler::ClaimQueue::::get(), - vec![ - ( - CoreIndex(0), - vec![ParasEntry::new( - Assignment::Bulk(para_a), - // At block 3 - coretime_ttl + 3 - )] + assert_eq!( + claim_queue.remove(&CoreIndex(0)).unwrap(), + [assignment_a.clone(), assignment_a.clone(), assignment_b.clone()] .into_iter() - .collect() - ), - ( - CoreIndex(1), - vec![ParasEntry::new( - Assignment::Bulk(para_b), - // At block 3 - coretime_ttl + 3 - )] + .collect::>() + ); + assert_eq!( + claim_queue.remove(&CoreIndex(1)).unwrap(), + [assignment_a.clone(), assignment_b.clone(), assignment_b.clone()] .into_iter() - .collect() - ), - ] - .into_iter() - .collect() - ); + .collect::>() + ); + } }); } diff --git a/polkadot/runtime/parachains/src/session_info.rs b/polkadot/runtime/parachains/src/session_info.rs index ea05c1aacaa946aa50bcb68e69930acbd60d79eb..0ec01755095bc140f23870b0a5b5fc0ad4d35cd9 100644 --- a/polkadot/runtime/parachains/src/session_info.rs +++ b/polkadot/runtime/parachains/src/session_info.rs @@ -135,8 +135,8 @@ impl Pallet { let assignment_keys = AssignmentKeysUnsafe::::get(); let active_set = shared::ActiveValidatorIndices::::get(); - let validator_groups = scheduler::ValidatorGroups::::get().into(); - let n_cores = scheduler::AvailabilityCores::::get().len() as u32; + let validator_groups = scheduler::ValidatorGroups::::get(); + let n_cores = validator_groups.len() as u32; let zeroth_delay_tranche_width = config.zeroth_delay_tranche_width; let relay_vrf_modulo_samples = config.relay_vrf_modulo_samples; let n_delay_tranches = config.n_delay_tranches; @@ -177,7 +177,7 @@ impl Pallet { validators, // these are from the notification and are thus already correct. discovery_keys: take_active_subset_and_inactive(&active_set, &discovery_keys), assignment_keys: take_active_subset(&active_set, &assignment_keys), - validator_groups, + validator_groups: validator_groups.into(), n_cores, zeroth_delay_tranche_width, relay_vrf_modulo_samples, diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs index f582bf0d90b5e400ac6ff558653bc69cd68930df..473c1aba7a066d198f7bcdad8ad920cf7cf955e8 100644 --- a/polkadot/runtime/parachains/src/shared.rs +++ b/polkadot/runtime/parachains/src/shared.rs @@ -80,6 +80,7 @@ impl /// Add a new relay-parent to the allowed relay parents, along with info about the header. /// Provide a maximum ancestry length for the buffer, which will cause old relay-parents to be /// pruned. + /// If the relay parent hash is already present, do nothing. pub(crate) fn update( &mut self, relay_parent: Hash, @@ -88,6 +89,11 @@ impl number: BlockNumber, max_ancestry_len: u32, ) { + if self.buffer.iter().any(|info| info.relay_parent == relay_parent) { + // Already present. + return + } + let claim_queue = transpose_claim_queue(claim_queue); // + 1 for the most recent block, which is always allowed. diff --git a/polkadot/runtime/parachains/src/shared/tests.rs b/polkadot/runtime/parachains/src/shared/tests.rs index 6da84e254f051f393612adb2f7f6a5e9a60cfc7c..f7ea5148ce33417740f09b9e39b64f94b0a2c29b 100644 --- a/polkadot/runtime/parachains/src/shared/tests.rs +++ b/polkadot/runtime/parachains/src/shared/tests.rs @@ -43,7 +43,13 @@ fn tracker_earliest_block_number() { let max_ancestry_len = 4; let now = 4; for i in 1..now { - tracker.update(Hash::zero(), Hash::zero(), Default::default(), i, max_ancestry_len); + tracker.update( + Hash::from([i as u8; 32]), + Hash::zero(), + Default::default(), + i, + max_ancestry_len, + ); assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len), 0); } @@ -53,7 +59,7 @@ fn tracker_earliest_block_number() { } #[test] -fn tracker_claim_queue_remap() { +fn tracker_claim_queue_transpose() { let mut tracker = AllowedRelayParentsTracker::::default(); let mut claim_queue = BTreeMap::new(); @@ -120,6 +126,14 @@ fn tracker_acquire_info() { Some((s, b)) if s.state_root == state_root && b == 0 ); + // Try to push a duplicate. Should be ignored. + tracker.update(relay_parent, Hash::repeat_byte(13), Default::default(), 0, max_ancestry_len); + assert_eq!(tracker.buffer.len(), 1); + assert_matches!( + tracker.acquire_info(relay_parent, None), + Some((s, b)) if s.state_root == state_root && b == 0 + ); + let (relay_parent, state_root) = blocks[1]; tracker.update(relay_parent, state_root, Default::default(), 1u32, max_ancestry_len); let (relay_parent, state_root) = blocks[2]; diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 7becf6376c349fe3878675dd8800afd859981c99..6bcb0da3d999cc75d40dfc5c1a1a3e7b6b8770f3 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -256,6 +256,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs index e2aa4a6cab7febc90418c3222819109e256a18c8..f7dc2f19316d5e6b13deb76044fb0ebf43b25d3b 100644 --- a/polkadot/runtime/rococo/constants/src/weights/block_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/block_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 408_659, 450_716 - /// Average: 417_412 - /// Median: 411_177 - /// Std-Dev: 12242.31 + /// Min, Max: 440_142, 476_907 + /// Average: 450_240 + /// Median: 448_633 + /// Std-Dev: 7301.18 /// /// Percentiles nanoseconds: - /// 99th: 445_142 - /// 95th: 442_275 - /// 75th: 414_217 + /// 99th: 470_733 + /// 95th: 465_082 + /// 75th: 452_536 pub const BlockExecutionWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(417_412), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(450_240), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs index adce840ebbc120a4e7905ad6312b9d27b1e8d3fb..000cee8a237c3ef306e3356f9733422765b67306 100644 --- a/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs +++ b/polkadot/runtime/rococo/constants/src/weights/extrinsic_weights.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26 (Y/M/D) -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29 (Y/M/D) +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` -//! WEIGHT-PATH: `runtime/rococo/constants/src/weights/` +//! WEIGHT-PATH: `./polkadot/runtime/rococo/constants/src/weights/` //! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: @@ -28,12 +28,11 @@ // benchmark // overhead // --chain=rococo-dev -// --execution=wasm // --wasm-execution=compiled -// --weight-path=runtime/rococo/constants/src/weights/ +// --weight-path=./polkadot/runtime/rococo/constants/src/weights/ // --warmup=10 // --repeat=100 -// --header=./file_header.txt +// --header=./polkadot/file_header.txt use sp_core::parameter_types; use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; @@ -43,17 +42,17 @@ parameter_types! { /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 97_574, 100_119 - /// Average: 98_236 - /// Median: 98_179 - /// Std-Dev: 394.9 + /// Min, Max: 92_961, 94_143 + /// Average: 93_369 + /// Median: 93_331 + /// Std-Dev: 217.39 /// /// Percentiles nanoseconds: - /// 99th: 99_893 - /// 95th: 98_850 - /// 75th: 98_318 + /// 99th: 93_848 + /// 95th: 93_691 + /// 75th: 93_514 pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(98_236), 0); + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(93_369), 0); } #[cfg(test)] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1b5f7b5157d83479e9004bfcb4c3a5a31bdd8464..44dd820f8c3ccaf828d2d4034b2c1931f9fd43e8 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -102,9 +102,8 @@ use sp_core::{ConstU128, ConstU8, Get, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, - Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, - Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, @@ -113,7 +112,10 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; +use xcm::{ + latest::prelude::*, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; pub use frame_system::Call as SystemCall; @@ -219,6 +221,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -418,6 +421,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -606,18 +610,33 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } -/// Submits a transaction with the node's public and signature type. Adheres to the signed extension -/// format of the chain. +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed +/// extension format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -629,7 +648,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -642,31 +661,39 @@ where frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), frame_metadata_hash_extension::CheckMetadataHash::new(true), - ); - - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, tx_ext: Self::Extension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, tx_ext) + } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -1538,8 +1565,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1553,7 +1580,10 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// All migrations that will run on the next runtime upgrade. /// @@ -1683,7 +1713,8 @@ pub mod migrations { // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, parachains_inclusion::migration::MigrateToV1, - parachains_shared::migration::MigrateToV1, + parachains_shared::migration::MigrateToV1, + parachains_scheduler::migration::MigrateV2ToV3, ); } @@ -1697,7 +1728,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) @@ -1771,7 +1802,9 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2358,6 +2391,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; @@ -2378,6 +2412,7 @@ sp_api::impl_runtime_apis! { use frame_support::traits::WhitelistedStorageKeys; use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; @@ -2590,6 +2625,15 @@ sp_api::impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) + } + } } #[cfg(all(test, feature = "try-runtime"))] diff --git a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs index dfba0cfc4aa90938c70376796117eea9ff00b676..0f68a5c6fb373b5d7b871aba540aa7210d710a53 100644 --- a/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs +++ b/polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_benchmarking::baseline` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_benchmarking::baseline // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/frame_benchmarking_baseline.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/frame_benchmarking_baseline.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,8 +55,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 157_000 picoseconds. - Weight::from_parts(175_233, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(199_481, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -61,8 +64,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 149_000 picoseconds. - Weight::from_parts(183_285, 0) + // Minimum execution time: 171_000 picoseconds. + Weight::from_parts(197_821, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -70,8 +73,8 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 158_000 picoseconds. - Weight::from_parts(184_720, 0) + // Minimum execution time: 172_000 picoseconds. + Weight::from_parts(200_942, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 1000000]`. @@ -79,16 +82,16 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 152_000 picoseconds. - Weight::from_parts(177_496, 0) + // Minimum execution time: 170_000 picoseconds. + Weight::from_parts(196_906, 0) .saturating_add(Weight::from_parts(0, 0)) } fn hashing() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 19_907_376_000 picoseconds. - Weight::from_parts(19_988_727_000, 0) + // Minimum execution time: 23_346_876_000 picoseconds. + Weight::from_parts(23_363_744_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `i` is `[0, 100]`. @@ -96,10 +99,10 @@ impl frame_benchmarking::baseline::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 198_000 picoseconds. - Weight::from_parts(228_000, 0) + // Minimum execution time: 201_000 picoseconds. + Weight::from_parts(219_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 20_467 - .saturating_add(Weight::from_parts(47_443_635, 0).saturating_mul(i.into())) + // Standard Error: 14_372 + .saturating_add(Weight::from_parts(45_375_800, 0).saturating_mul(i.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/frame_system.rs b/polkadot/runtime/rococo/src/weights/frame_system.rs index 2e49483dcc62728f3554bb1364efd740f8b03fd2..1742a761ca77baa50c79f51cb4ac854cba0fa274 100644 --- a/polkadot/runtime/rococo/src/weights/frame_system.rs +++ b/polkadot/runtime/rococo/src/weights/frame_system.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `frame_system` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=frame_system // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,91 +55,91 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_283_000 picoseconds. - Weight::from_parts(2_305_000, 0) + // Minimum execution time: 1_541_000 picoseconds. + Weight::from_parts(2_581_470, 0) .saturating_add(Weight::from_parts(0, 0)) // Standard Error: 0 - .saturating_add(Weight::from_parts(366, 0).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_581_000, 0) + // Minimum execution time: 5_060_000 picoseconds. + Weight::from_parts(5_167_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_408, 0).saturating_mul(b.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_696, 0).saturating_mul(b.into())) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a686561707061676573` (r:0 w:1) - /// Proof Skipped: unknown `0x3a686561707061676573` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) fn set_heap_pages() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 4_010_000 picoseconds. - Weight::from_parts(4_112_000, 0) + // Minimum execution time: 2_649_000 picoseconds. + Weight::from_parts(2_909_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a636f6465` (r:0 w:1) - /// Proof Skipped: unknown `0x3a636f6465` (r:0 w:1) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) fn set_code() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 80_405_511_000 picoseconds. - Weight::from_parts(83_066_478_000, 0) + // Minimum execution time: 88_417_540_000 picoseconds. + Weight::from_parts(91_809_291_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_210_000 picoseconds. - Weight::from_parts(2_247_000, 0) + // Minimum execution time: 1_538_000 picoseconds. + Weight::from_parts(1_589_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_058 - .saturating_add(Weight::from_parts(673_943, 0).saturating_mul(i.into())) + // Standard Error: 1_740 + .saturating_add(Weight::from_parts(730_941, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_125_000 picoseconds. - Weight::from_parts(2_154_000, 0) + // Minimum execution time: 1_567_000 picoseconds. + Weight::from_parts(1_750_000, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 816 - .saturating_add(Weight::from_parts(491_194, 0).saturating_mul(i.into())) + // Standard Error: 835 + .saturating_add(Weight::from_parts(543_218, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `129 + p * (69 ±0)` - // Estimated: `125 + p * (70 ±0)` - // Minimum execution time: 4_002_000 picoseconds. - Weight::from_parts(4_145_000, 0) - .saturating_add(Weight::from_parts(0, 125)) - // Standard Error: 1_108 - .saturating_add(Weight::from_parts(1_014_971, 0).saturating_mul(p.into())) + // Measured: `80 + p * (69 ±0)` + // Estimated: `83 + p * (70 ±0)` + // Minimum execution time: 3_412_000 picoseconds. + Weight::from_parts(3_448_000, 0) + .saturating_add(Weight::from_parts(0, 83)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(1_142_347, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) @@ -147,8 +150,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 33_027_000 picoseconds. - Weight::from_parts(33_027_000, 0) + // Minimum execution time: 9_178_000 picoseconds. + Weight::from_parts(9_780_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -162,8 +165,8 @@ impl frame_system::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `1518` - // Minimum execution time: 118_101_992_000 picoseconds. - Weight::from_parts(118_101_992_000, 0) + // Minimum execution time: 94_523_563_000 picoseconds. + Weight::from_parts(96_983_131_000, 0) .saturating_add(Weight::from_parts(0, 1518)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..99dac1ba75f06f8d8e5e9b835a48f3b5f2974d90 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs @@ -0,0 +1,134 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=frame_system_extensions +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_262_000 picoseconds. + Weight::from_parts(3_497_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 5_416_000 picoseconds. + Weight::from_parts(5_690_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 471_000 picoseconds. + Weight::from_parts(552_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 4_847_000 picoseconds. + Weight::from_parts(5_091_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 388_000 picoseconds. + Weight::from_parts(421_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 378_000 picoseconds. + Weight::from_parts(440_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 3_402_000 picoseconds. + Weight::from_parts(3_627_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 020f8e22594a602f4bf12336eba92470ec754c4e..99477baeb2811770c414ea3ebddb29b7b49d70fa 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -16,6 +16,7 @@ //! A list of the different weight modules for our runtime. pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_balances_balances; pub mod pallet_balances_nis_counterpart_balances; @@ -39,6 +40,7 @@ pub mod pallet_scheduler; pub mod pallet_session; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs index da2d1958cefcfeb41bcdba7ed2dd9c6b4272e873..56b1e2cbc5717fa932d9a5419773ce5fbd246d7f 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_asset_rate.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_asset_rate` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-03, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_asset_rate // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,39 +50,39 @@ use core::marker::PhantomData; /// Weight functions for `pallet_asset_rate`. pub struct WeightInfo(PhantomData); impl pallet_asset_rate::WeightInfo for WeightInfo { - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `4702` - // Minimum execution time: 143_000_000 picoseconds. - Weight::from_parts(155_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `142` + // Estimated: `4703` + // Minimum execution time: 10_277_000 picoseconds. + Weight::from_parts(10_487_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn update() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 156_000_000 picoseconds. - Weight::from_parts(172_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 10_917_000 picoseconds. + Weight::from_parts(11_249_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:1) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) fn remove() -> Weight { // Proof Size summary in bytes: - // Measured: `110` - // Estimated: `4702` - // Minimum execution time: 150_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 11_332_000 picoseconds. + Weight::from_parts(11_866_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs index d37bb9369c688b596b22c25da25463719c428e32..c3c3315edff278e0332f63081dc709844c07195b 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_balances.rs @@ -23,17 +23,19 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ diff --git a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs index 706653aeb7692188103102bbe8aba72bbd6acb30..697e51faf537462f6bf19a7461028b7ee331e340 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_balances_nis_counterpart_balances.rs @@ -23,17 +23,19 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_balances // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_balances -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ diff --git a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs index 38d3645316f25faba74fff9d37c66db2ab4f08d9..8f8be5f2386f7983d42dbad355a266ff68cf332a 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,118 +50,181 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bounties`. pub struct WeightInfo(PhantomData); impl pallet_bounties::WeightInfo for WeightInfo { - /// Storage: Bounties BountyCount (r:1 w:1) - /// Proof: Bounties BountyCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) - /// Storage: Bounties Bounties (r:0 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyCount` (r:1 w:1) + /// Proof: `Bounties::BountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:0 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3593` - // Minimum execution time: 28_907_000 picoseconds. - Weight::from_parts(31_356_074, 0) + // Minimum execution time: 21_772_000 picoseconds. + Weight::from_parts(22_861_341, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 18 - .saturating_add(Weight::from_parts(606, 0).saturating_mul(d.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(721, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `302` + // Estimated: `3642` + // Minimum execution time: 11_218_000 picoseconds. + Weight::from_parts(11_796_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `322` + // Estimated: `3642` + // Minimum execution time: 10_959_000 picoseconds. + Weight::from_parts(11_658_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `498` + // Estimated: `3642` + // Minimum execution time: 37_419_000 picoseconds. + Weight::from_parts(38_362_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `494` + // Estimated: `3642` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_661_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `400` + // Estimated: `3642` + // Minimum execution time: 16_067_000 picoseconds. + Weight::from_parts(16_865_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `764` + // Estimated: `8799` + // Minimum execution time: 101_153_000 picoseconds. + Weight::from_parts(102_480_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Bounties Bounties (r:1 w:1) - /// Proof: Bounties Bounties (max_values: None, max_size: Some(177), added: 2652, mode: MaxEncodedLen) - /// Storage: ChildBounties ParentChildBounties (r:1 w:0) - /// Proof: ChildBounties ParentChildBounties (max_values: None, max_size: Some(16), added: 2491, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyDescriptions (r:0 w:1) - /// Proof: Bounties BountyDescriptions (max_values: None, max_size: Some(16400), added: 18875, mode: MaxEncodedLen) + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `482` + // Measured: `444` // Estimated: `3642` - // Minimum execution time: 46_020_000 picoseconds. - Weight::from_parts(46_711_000, 0) + // Minimum execution time: 38_838_000 picoseconds. + Weight::from_parts(39_549_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:0) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:0 w:1) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `680` + // Estimated: `6196` + // Minimum execution time: 68_592_000 picoseconds. + Weight::from_parts(70_727_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `358` + // Estimated: `3642` + // Minimum execution time: 11_272_000 picoseconds. + Weight::from_parts(11_592_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:100 w:100) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:200 w:200) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `b` is `[0, 100]`. - fn spend_funds(_b: u32, ) -> Weight { + fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1887` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(2_405_233, 0) + // Measured: `0 + b * (297 ±0)` + // Estimated: `1887 + b * (5206 ±0)` + // Minimum execution time: 2_844_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 1887)) + // Standard Error: 9_467 + .saturating_add(Weight::from_parts(32_326_595, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs index e8c798d45e727618b892e4ebd7e11d7ba81f66ca..47ae3a5c90d1e8524b914bb3b4b5bbaeb71f3c25 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_child_bounties.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_child_bounties` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_child_bounties // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,69 +50,153 @@ use core::marker::PhantomData; /// Weight functions for `pallet_child_bounties`. pub struct WeightInfo(PhantomData); impl pallet_child_bounties::WeightInfo for WeightInfo { + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyCount` (r:1 w:1) + /// Proof: `ChildBounties::ChildBountyCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:0 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) /// The range of component `d` is `[0, 16384]`. - fn add_child_bounty(_d: u32, ) -> Weight { + fn add_child_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `540` + // Estimated: `6196` + // Minimum execution time: 57_964_000 picoseconds. + Weight::from_parts(59_559_565, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(697, 0).saturating_mul(d.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `594` + // Estimated: `3642` + // Minimum execution time: 17_527_000 picoseconds. + Weight::from_parts(18_257_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 29_354_000 picoseconds. + Weight::from_parts(30_629_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `740` + // Estimated: `3642` + // Minimum execution time: 40_643_000 picoseconds. + Weight::from_parts(42_072_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) fn award_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `637` + // Estimated: `3642` + // Minimum execution time: 18_616_000 picoseconds. + Weight::from_parts(19_316_000, 0) + .saturating_add(Weight::from_parts(0, 3642)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn claim_child_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `576` + // Estimated: `8799` + // Minimum execution time: 96_376_000 picoseconds. + Weight::from_parts(98_476_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_added() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `840` + // Estimated: `6196` + // Minimum execution time: 64_640_000 picoseconds. + Weight::from_parts(66_174_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) } + /// Storage: `Bounties::Bounties` (r:1 w:0) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ChildBounties` (`max_values`: None, `max_size`: Some(145), added: 2620, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildrenCuratorFees` (r:1 w:1) + /// Proof: `ChildBounties::ChildrenCuratorFees` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ParentChildBounties` (r:1 w:1) + /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `ChildBounties::ChildBountyDescriptions` (r:0 w:1) + /// Proof: `ChildBounties::ChildBountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) fn close_child_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 0_000 picoseconds. - Weight::from_parts(0, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `1027` + // Estimated: `8799` + // Minimum execution time: 78_159_000 picoseconds. + Weight::from_parts(79_820_000, 0) + .saturating_add(Weight::from_parts(0, 8799)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs index ba505737f1b070d21931c7e467b4853de1f119d2..5d92c158df44ee17d9e473cc222e4bae76597b67 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_conviction_voting.rs @@ -16,17 +16,17 @@ //! Autogenerated weights for `pallet_conviction_voting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot // benchmark // pallet -// --chain=kusama-dev +// --chain=rococo-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -36,8 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/kusama/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,144 +50,152 @@ use core::marker::PhantomData; /// Weight functions for `pallet_conviction_voting`. pub struct WeightInfo(PhantomData); impl pallet_conviction_voting::WeightInfo for WeightInfo { - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn vote_new() -> Weight { // Proof Size summary in bytes: - // Measured: `13445` + // Measured: `13407` // Estimated: `42428` - // Minimum execution time: 151_077_000 picoseconds. - Weight::from_parts(165_283_000, 0) + // Minimum execution time: 128_378_000 picoseconds. + Weight::from_parts(131_028_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote_existing() -> Weight { // Proof Size summary in bytes: - // Measured: `14166` + // Measured: `14128` // Estimated: `83866` - // Minimum execution time: 232_420_000 picoseconds. - Weight::from_parts(244_439_000, 0) + // Minimum execution time: 155_379_000 picoseconds. + Weight::from_parts(161_597_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:1) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn remove_vote() -> Weight { // Proof Size summary in bytes: // Measured: `13918` // Estimated: `83866` - // Minimum execution time: 205_017_000 picoseconds. - Weight::from_parts(216_594_000, 0) + // Minimum execution time: 130_885_000 picoseconds. + Weight::from_parts(138_080_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:1 w:0) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn remove_other_vote() -> Weight { // Proof Size summary in bytes: - // Measured: `13004` + // Measured: `13005` // Estimated: `30706` - // Minimum execution time: 84_226_000 picoseconds. - Weight::from_parts(91_255_000, 0) + // Minimum execution time: 71_743_000 picoseconds. + Weight::from_parts(75_170_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn delegate(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `29640 + r * (365 ±0)` + // Measured: `29602 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 78_708_000 picoseconds. - Weight::from_parts(2_053_488_615, 0) + // Minimum execution time: 58_504_000 picoseconds. + Weight::from_parts(814_301_018, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 179_271 - .saturating_add(Weight::from_parts(47_806_482, 0).saturating_mul(r.into())) + // Standard Error: 59_961 + .saturating_add(Weight::from_parts(20_002_833, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(45)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:2 w:2) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: Referenda ReferendumInfoFor (r:512 w:512) - /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:2 w:2) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `r` is `[0, 512]`. fn undelegate(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `29555 + r * (365 ±0)` // Estimated: `83866 + r * (3411 ±0)` - // Minimum execution time: 45_232_000 picoseconds. - Weight::from_parts(2_045_021_014, 0) + // Minimum execution time: 34_970_000 picoseconds. + Weight::from_parts(771_155_804, 0) .saturating_add(Weight::from_parts(0, 83866)) - // Standard Error: 185_130 - .saturating_add(Weight::from_parts(47_896_011, 0).saturating_mul(r.into())) + // Standard Error: 57_795 + .saturating_add(Weight::from_parts(19_781_645, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(43)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) } - /// Storage: ConvictionVoting VotingFor (r:1 w:1) - /// Proof: ConvictionVoting VotingFor (max_values: None, max_size: Some(27241), added: 29716, mode: MaxEncodedLen) - /// Storage: ConvictionVoting ClassLocksFor (r:1 w:1) - /// Proof: ConvictionVoting ClassLocksFor (max_values: None, max_size: Some(311), added: 2786, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn unlock() -> Weight { // Proof Size summary in bytes: - // Measured: `12218` + // Measured: `12180` // Estimated: `30706` - // Minimum execution time: 116_446_000 picoseconds. - Weight::from_parts(124_043_000, 0) + // Minimum execution time: 89_648_000 picoseconds. + Weight::from_parts(97_144_000, 0) .saturating_add(Weight::from_parts(0, 30706)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_identity.rs b/polkadot/runtime/rococo/src/weights/pallet_identity.rs index b334e21ea03127a749ff1bf2455f69627f832922..6df16351f2c28f96f807bb197f9059b51e19f703 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_identity.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_identity.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_identity` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_identity // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,290 +50,291 @@ use core::marker::PhantomData; /// Weight functions for `pallet_identity`. pub struct WeightInfo(PhantomData); impl pallet_identity::WeightInfo for WeightInfo { - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `32 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 12_290_000 picoseconds. - Weight::from_parts(12_664_362, 0) + // Minimum execution time: 7_673_000 picoseconds. + Weight::from_parts(8_351_866, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_347 - .saturating_add(Weight::from_parts(88_179, 0).saturating_mul(r.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(79_198, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn set_identity(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `442 + r * (5 ±0)` - // Estimated: `11003` - // Minimum execution time: 31_373_000 picoseconds. - Weight::from_parts(30_435_545, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_307 - .saturating_add(Weight::from_parts(92_753, 0).saturating_mul(r.into())) + // Measured: `6978 + r * (5 ±0)` + // Estimated: `11037` + // Minimum execution time: 111_646_000 picoseconds. + Weight::from_parts(113_254_991, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_611 + .saturating_add(Weight::from_parts(162_119, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:100 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `101` - // Estimated: `11003 + s * (2589 ±0)` - // Minimum execution time: 9_251_000 picoseconds. - Weight::from_parts(22_039_210, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 40_779 - .saturating_add(Weight::from_parts(2_898_525, 0).saturating_mul(s.into())) + // Estimated: `11037 + s * (2589 ±0)` + // Minimum execution time: 8_010_000 picoseconds. + Weight::from_parts(19_868_412, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_018 + .saturating_add(Weight::from_parts(3_115_007, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `194 + p * (32 ±0)` - // Estimated: `11003` - // Minimum execution time: 9_329_000 picoseconds. - Weight::from_parts(24_055_061, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 3_428 - .saturating_add(Weight::from_parts(1_130_604, 0).saturating_mul(p.into())) + // Estimated: `11037` + // Minimum execution time: 8_111_000 picoseconds. + Weight::from_parts(19_482_392, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_156 + .saturating_add(Weight::from_parts(1_305_890, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. - fn clear_identity(_r: u32, s: u32, ) -> Weight { + fn clear_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 53_365_000 picoseconds. - Weight::from_parts(35_391_422, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_353 - .saturating_add(Weight::from_parts(1_074_019, 0).saturating_mul(s.into())) + // Measured: `7070 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 54_107_000 picoseconds. + Weight::from_parts(56_347_715, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 10_944 + .saturating_add(Weight::from_parts(191_321, 0).saturating_mul(r.into())) + // Standard Error: 2_135 + .saturating_add(Weight::from_parts(1_295_872, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn request_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_509_000 picoseconds. - Weight::from_parts(31_745_585, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_214 - .saturating_add(Weight::from_parts(83_822, 0).saturating_mul(r.into())) - + // Measured: `6968 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(76_869_773, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 5_456 + .saturating_add(Weight::from_parts(135_316, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. fn cancel_request(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `398 + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 29_609_000 picoseconds. - Weight::from_parts(28_572_602, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_528 - .saturating_add(Weight::from_parts(85_593, 0).saturating_mul(r.into())) + // Measured: `6999` + // Estimated: `11037` + // Minimum execution time: 75_769_000 picoseconds. + Weight::from_parts(76_805_143, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 3_598 + .saturating_add(Weight::from_parts(84_593, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_793_000 picoseconds. - Weight::from_parts(8_173_888, 0) + // Minimum execution time: 5_357_000 picoseconds. + Weight::from_parts(5_732_132, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_569 - .saturating_add(Weight::from_parts(72_367, 0).saturating_mul(r.into())) + // Standard Error: 927 + .saturating_add(Weight::from_parts(70_832, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_708_000 picoseconds. - Weight::from_parts(8_091_149, 0) + // Minimum execution time: 5_484_000 picoseconds. + Weight::from_parts(5_892_704, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 869 - .saturating_add(Weight::from_parts(87_993, 0).saturating_mul(r.into())) + // Standard Error: 947 + .saturating_add(Weight::from_parts(71_231, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:1) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `89 + r * (57 ±0)` // Estimated: `2626` - // Minimum execution time: 7_601_000 picoseconds. - Weight::from_parts(8_038_414, 0) + // Minimum execution time: 5_310_000 picoseconds. + Weight::from_parts(5_766_651, 0) .saturating_add(Weight::from_parts(0, 2626)) - // Standard Error: 1_041 - .saturating_add(Weight::from_parts(82_588, 0).saturating_mul(r.into())) + // Standard Error: 916 + .saturating_add(Weight::from_parts(74_776, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity Registrars (r:1 w:0) - /// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 19]`. fn provide_judgement(r: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `445 + r * (57 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 23_114_000 picoseconds. - Weight::from_parts(22_076_548, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 2_881 - .saturating_add(Weight::from_parts(109_812, 0).saturating_mul(r.into())) + // Measured: `7046 + r * (57 ±0)` + // Estimated: `11037` + // Minimum execution time: 98_200_000 picoseconds. + Weight::from_parts(100_105_482, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 6_152 + .saturating_add(Weight::from_parts(58_906, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: Identity IdentityOf (r:1 w:1) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:0 w:100) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `r` is `[1, 20]`. /// The range of component `s` is `[0, 100]`. fn kill_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)` - // Estimated: `11003` - // Minimum execution time: 70_007_000 picoseconds. - Weight::from_parts(50_186_495, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 6_533 - .saturating_add(Weight::from_parts(15_486, 0).saturating_mul(r.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(1_085_117, 0).saturating_mul(s.into())) + // Measured: `7277 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037` + // Minimum execution time: 64_647_000 picoseconds. + Weight::from_parts(68_877_027, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 9_965 + .saturating_add(Weight::from_parts(135_044, 0).saturating_mul(r.into())) + // Standard Error: 1_944 + .saturating_add(Weight::from_parts(1_388_151, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `475 + s * (36 ±0)` - // Estimated: `11003` - // Minimum execution time: 28_453_000 picoseconds. - Weight::from_parts(33_165_934, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_217 - .saturating_add(Weight::from_parts(65_401, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 23_550_000 picoseconds. + Weight::from_parts(29_439_842, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 1_453 + .saturating_add(Weight::from_parts(96_324, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `591 + s * (3 ±0)` - // Estimated: `11003` - // Minimum execution time: 12_846_000 picoseconds. - Weight::from_parts(14_710_284, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 496 - .saturating_add(Weight::from_parts(19_539, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 13_704_000 picoseconds. + Weight::from_parts(15_241_441, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 498 + .saturating_add(Weight::from_parts(40_973, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Identity IdentityOf (r:1 w:0) - /// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen) - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `638 + s * (35 ±0)` - // Estimated: `11003` - // Minimum execution time: 32_183_000 picoseconds. - Weight::from_parts(35_296_731, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(52_028, 0).saturating_mul(s.into())) + // Estimated: `11037` + // Minimum execution time: 29_310_000 picoseconds. + Weight::from_parts(31_712_666, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 967 + .saturating_add(Weight::from_parts(81_250, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Identity SuperOf (r:1 w:1) - /// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen) - /// Storage: Identity SubsOf (r:1 w:1) - /// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `704 + s * (37 ±0)` // Estimated: `6723` - // Minimum execution time: 24_941_000 picoseconds. - Weight::from_parts(27_433_059, 0) + // Minimum execution time: 22_906_000 picoseconds. + Weight::from_parts(24_638_729, 0) .saturating_add(Weight::from_parts(0, 6723)) - // Standard Error: 856 - .saturating_add(Weight::from_parts(57_463, 0).saturating_mul(s.into())) + // Standard Error: 645 + .saturating_add(Weight::from_parts(75_121, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -340,90 +344,93 @@ impl pallet_identity::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_873_000 picoseconds. - Weight::from_parts(13_873_000, 0) + // Minimum execution time: 6_056_000 picoseconds. + Weight::from_parts(6_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Identity::UsernameAuthorities` (r:0 w:1) + /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn remove_username_authority() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 10_653_000 picoseconds. - Weight::from_parts(10_653_000, 0) - .saturating_add(Weight::from_parts(0, 0)) + // Measured: `80` + // Estimated: `3517` + // Minimum execution time: 9_003_000 picoseconds. + Weight::from_parts(9_276_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::UsernameAuthorities` (r:1 w:1) /// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_username_for() -> Weight { // Proof Size summary in bytes: // Measured: `80` // Estimated: `11037` - // Minimum execution time: 75_928_000 picoseconds. - Weight::from_parts(75_928_000, 0) + // Minimum execution time: 64_724_000 picoseconds. + Weight::from_parts(66_597_000, 0) .saturating_add(Weight::from_parts(0, 11037)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::AccountOfUsername` (r:0 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn accept_username() -> Weight { // Proof Size summary in bytes: - // Measured: `106` + // Measured: `115` // Estimated: `11037` - // Minimum execution time: 38_157_000 picoseconds. - Weight::from_parts(38_157_000, 0) + // Minimum execution time: 19_538_000 picoseconds. + Weight::from_parts(20_204_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Identity::PendingUsernames` (r:1 w:1) - /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) fn remove_expired_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `3542` - // Minimum execution time: 46_821_000 picoseconds. - Weight::from_parts(46_821_000, 0) - .saturating_add(Weight::from_parts(0, 3542)) + // Measured: `115` + // Estimated: `3550` + // Minimum execution time: 16_000_000 picoseconds. + Weight::from_parts(19_354_000, 0) + .saturating_add(Weight::from_parts(0, 3550)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:0) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:1) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn set_primary_username() -> Weight { // Proof Size summary in bytes: - // Measured: `247` + // Measured: `257` // Estimated: `11037` - // Minimum execution time: 22_515_000 picoseconds. - Weight::from_parts(22_515_000, 0) + // Minimum execution time: 15_298_000 picoseconds. + Weight::from_parts(15_760_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Identity::AccountOfUsername` (r:1 w:1) - /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) /// Storage: `Identity::IdentityOf` (r:1 w:0) /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) fn remove_dangling_username() -> Weight { // Proof Size summary in bytes: - // Measured: `126` + // Measured: `98` // Estimated: `11037` - // Minimum execution time: 15_997_000 picoseconds. - Weight::from_parts(15_997_000, 0) + // Minimum execution time: 10_829_000 picoseconds. + Weight::from_parts(11_113_000, 0) .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_indices.rs b/polkadot/runtime/rococo/src/weights/pallet_indices.rs index 99ffd3210ed24f04e170a66c57b01ea9232b0475..434db97d4a79faa0d51dcad13c16d6840f357483 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_indices.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_indices.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_indices` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_indices // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,66 +50,66 @@ use core::marker::PhantomData; /// Weight functions for `pallet_indices`. pub struct WeightInfo(PhantomData); impl pallet_indices::WeightInfo for WeightInfo { - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn claim() -> Weight { // Proof Size summary in bytes: - // Measured: `142` + // Measured: `4` // Estimated: `3534` - // Minimum execution time: 25_107_000 picoseconds. - Weight::from_parts(25_655_000, 0) + // Minimum execution time: 18_092_000 picoseconds. + Weight::from_parts(18_533_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(36_521_000, 0) + // Minimum execution time: 31_616_000 picoseconds. + Weight::from_parts(32_556_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn free() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 25_915_000 picoseconds. - Weight::from_parts(26_220_000, 0) + // Minimum execution time: 19_593_000 picoseconds. + Weight::from_parts(20_100_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn force_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `341` + // Measured: `203` // Estimated: `3593` - // Minimum execution time: 28_232_000 picoseconds. - Weight::from_parts(28_845_000, 0) + // Minimum execution time: 21_429_000 picoseconds. + Weight::from_parts(22_146_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Indices Accounts (r:1 w:1) - /// Proof: Indices Accounts (max_values: None, max_size: Some(69), added: 2544, mode: MaxEncodedLen) + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) fn freeze() -> Weight { // Proof Size summary in bytes: - // Measured: `238` + // Measured: `100` // Estimated: `3534` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_754_000, 0) + // Minimum execution time: 20_425_000 picoseconds. + Weight::from_parts(21_023_000, 0) .saturating_add(Weight::from_parts(0, 3534)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs index e1e360d374a0646b4fed8a29f3509565309d89c0..6ebfcd060b642d03c6eebdf7918d032253041e28 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_message_queue // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,150 +50,149 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_106_000 picoseconds. - Weight::from_parts(12_387_000, 0) + // Minimum execution time: 12_830_000 picoseconds. + Weight::from_parts(13_476_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_227_000 picoseconds. - Weight::from_parts(11_616_000, 0) + // Minimum execution time: 11_583_000 picoseconds. + Weight::from_parts(11_902_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 5_052_000 picoseconds. - Weight::from_parts(5_216_000, 0) + // Minimum execution time: 3_801_000 picoseconds. + Weight::from_parts(3_943_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_522_000 picoseconds. - Weight::from_parts(6_794_000, 0) + // Minimum execution time: 5_517_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(7_083_000, 0) + // Minimum execution time: 5_870_000 picoseconds. + Weight::from_parts(6_028_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 28_445_000 picoseconds. - Weight::from_parts(28_659_000, 0) + // Minimum execution time: 80_681_000 picoseconds. + Weight::from_parts(81_818_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `149` + // Measured: `220` // Estimated: `3520` - // Minimum execution time: 7_224_000 picoseconds. - Weight::from_parts(7_441_000, 0) + // Minimum execution time: 8_641_000 picoseconds. + Weight::from_parts(8_995_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 45_211_000 picoseconds. - Weight::from_parts(45_505_000, 0) + // Minimum execution time: 38_473_000 picoseconds. + Weight::from_parts(39_831_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 52_346_000 picoseconds. - Weight::from_parts(52_745_000, 0) + // Minimum execution time: 48_717_000 picoseconds. + Weight::from_parts(49_724_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(32818), added: 35293, mode: MaxEncodedLen) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(32818), added: 35293, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `33232` + // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 72_567_000 picoseconds. - Weight::from_parts(73_300_000, 0) + // Minimum execution time: 72_718_000 picoseconds. + Weight::from_parts(74_081_000, 0) .saturating_add(Weight::from_parts(0, 36283)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs index a4f33fe198ca167e9b8112a13b1549ba727da354..f1b81759ece6c4c360a4bfb61afafb76836d11b2 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_multisig` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,110 +55,110 @@ impl pallet_multisig::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_475_000 picoseconds. - Weight::from_parts(11_904_745, 0) + // Minimum execution time: 12_023_000 picoseconds. + Weight::from_parts(12_643_116, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(492, 0).saturating_mul(z.into())) + // Standard Error: 3 + .saturating_add(Weight::from_parts(582, 0).saturating_mul(z.into())) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 38_857_000 picoseconds. - Weight::from_parts(33_611_791, 0) + // Minimum execution time: 39_339_000 picoseconds. + Weight::from_parts(27_243_033, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 400 - .saturating_add(Weight::from_parts(59_263, 0).saturating_mul(s.into())) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_211, 0).saturating_mul(z.into())) + // Standard Error: 1_319 + .saturating_add(Weight::from_parts(142_212, 0).saturating_mul(s.into())) + // Standard Error: 12 + .saturating_add(Weight::from_parts(1_592, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 25_715_000 picoseconds. - Weight::from_parts(20_607_294, 0) + // Minimum execution time: 27_647_000 picoseconds. + Weight::from_parts(15_828_725, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 285 - .saturating_add(Weight::from_parts(58_225, 0).saturating_mul(s.into())) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_160, 0).saturating_mul(z.into())) + // Standard Error: 908 + .saturating_add(Weight::from_parts(130_880, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_532, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `317 + s * (33 ±0)` + // Measured: `354 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 43_751_000 picoseconds. - Weight::from_parts(37_398_513, 0) + // Minimum execution time: 46_971_000 picoseconds. + Weight::from_parts(32_150_393, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 426 - .saturating_add(Weight::from_parts(70_904, 0).saturating_mul(s.into())) - // Standard Error: 4 - .saturating_add(Weight::from_parts(1_235, 0).saturating_mul(z.into())) + // Standard Error: 1_129 + .saturating_add(Weight::from_parts(154_796, 0).saturating_mul(s.into())) + // Standard Error: 11 + .saturating_add(Weight::from_parts(1_603, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `193 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 31_278_000 picoseconds. - Weight::from_parts(32_075_573, 0) + // Minimum execution time: 24_947_000 picoseconds. + Weight::from_parts(26_497_183, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(62_018, 0).saturating_mul(s.into())) + // Standard Error: 1_615 + .saturating_add(Weight::from_parts(147_071, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `211` + // Measured: `248` // Estimated: `6811` - // Minimum execution time: 18_178_000 picoseconds. - Weight::from_parts(18_649_867, 0) + // Minimum execution time: 13_897_000 picoseconds. + Weight::from_parts(14_828_339, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 293 - .saturating_add(Weight::from_parts(56_475, 0).saturating_mul(s.into())) + // Standard Error: 1_136 + .saturating_add(Weight::from_parts(133_925, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Multisig Multisigs (r:1 w:1) - /// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen) + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `383 + s * (1 ±0)` + // Measured: `420 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 32_265_000 picoseconds. - Weight::from_parts(32_984_014, 0) + // Minimum execution time: 28_984_000 picoseconds. + Weight::from_parts(29_853_232, 0) .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 452 - .saturating_add(Weight::from_parts(59_934, 0).saturating_mul(s.into())) + // Standard Error: 650 + .saturating_add(Weight::from_parts(113_440, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_nis.rs b/polkadot/runtime/rococo/src/weights/pallet_nis.rs index 35dad482129e6c63e0aca0aceb544c66fba78ee7..38b41f3a8e241185e1ccb40332bab8e96a846d2b 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_nis.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_nis.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_nis` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_nis // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,202 +50,186 @@ use core::marker::PhantomData; /// Weight functions for `pallet_nis`. pub struct WeightInfo(PhantomData); impl pallet_nis::WeightInfo for WeightInfo { - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 44_704_000 picoseconds. - Weight::from_parts(44_933_886, 0) + // Minimum execution time: 39_592_000 picoseconds. + Weight::from_parts(38_234_037, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 712 - .saturating_add(Weight::from_parts(71_570, 0).saturating_mul(l.into())) + // Standard Error: 1_237 + .saturating_add(Weight::from_parts(88_816, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn place_bid_max() -> Weight { // Proof Size summary in bytes: // Measured: `54211` // Estimated: `51487` - // Minimum execution time: 126_544_000 picoseconds. - Weight::from_parts(128_271_000, 0) + // Minimum execution time: 134_847_000 picoseconds. + Weight::from_parts(139_510_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `6209 + l * (48 ±0)` // Estimated: `51487` - // Minimum execution time: 47_640_000 picoseconds. - Weight::from_parts(42_214_261, 0) + // Minimum execution time: 43_330_000 picoseconds. + Weight::from_parts(35_097_881, 0) .saturating_add(Weight::from_parts(0, 51487)) - // Standard Error: 732 - .saturating_add(Weight::from_parts(87_277, 0).saturating_mul(l.into())) + // Standard Error: 1_119 + .saturating_add(Weight::from_parts(73_640, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Summary (r:1 w:0) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:0) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn fund_deficit() -> Weight { // Proof Size summary in bytes: // Measured: `225` // Estimated: `3593` - // Minimum execution time: 38_031_000 picoseconds. - Weight::from_parts(38_441_000, 0) + // Minimum execution time: 29_989_000 picoseconds. + Weight::from_parts(30_865_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) fn communify() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `387` // Estimated: `3593` - // Minimum execution time: 69_269_000 picoseconds. - Weight::from_parts(70_000_000, 0) + // Minimum execution time: 58_114_000 picoseconds. + Weight::from_parts(59_540_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn privatize() -> Weight { // Proof Size summary in bytes: - // Measured: `659` + // Measured: `543` // Estimated: `3593` - // Minimum execution time: 85_763_000 picoseconds. - Weight::from_parts(86_707_000, 0) + // Minimum execution time: 75_780_000 picoseconds. + Weight::from_parts(77_097_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Holds (r:1 w:1) - /// Proof: Balances Holds (max_values: None, max_size: Some(67), added: 2542, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) fn thaw_private() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3593` - // Minimum execution time: 47_336_000 picoseconds. - Weight::from_parts(47_623_000, 0) + // Minimum execution time: 46_133_000 picoseconds. + Weight::from_parts(47_250_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Nis Receipts (r:1 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances Account (r:1 w:1) - /// Proof: NisCounterpartBalances Account (max_values: None, max_size: Some(112), added: 2587, mode: MaxEncodedLen) - /// Storage: NisCounterpartBalances TotalIssuance (r:1 w:1) - /// Proof: NisCounterpartBalances TotalIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:1 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `NisCounterpartBalances::Account` (r:1 w:1) + /// Proof: `NisCounterpartBalances::Account` (`max_values`: None, `max_size`: Some(112), added: 2587, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn thaw_communal() -> Weight { // Proof Size summary in bytes: - // Measured: `604` + // Measured: `488` // Estimated: `3593` - // Minimum execution time: 90_972_000 picoseconds. - Weight::from_parts(92_074_000, 0) + // Minimum execution time: 77_916_000 picoseconds. + Weight::from_parts(79_427_000, 0) .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Nis Summary (r:1 w:1) - /// Proof: Nis Summary (max_values: Some(1), max_size: Some(40), added: 535, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:0) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Nis QueueTotals (r:1 w:1) - /// Proof: Nis QueueTotals (max_values: Some(1), max_size: Some(6002), added: 6497, mode: MaxEncodedLen) + /// Storage: `Nis::Summary` (r:1 w:1) + /// Proof: `Nis::Summary` (`max_values`: Some(1), `max_size`: Some(40), added: 535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nis::QueueTotals` (r:1 w:1) + /// Proof: `Nis::QueueTotals` (`max_values`: Some(1), `max_size`: Some(6002), added: 6497, mode: `MaxEncodedLen`) fn process_queues() -> Weight { // Proof Size summary in bytes: // Measured: `6658` // Estimated: `7487` - // Minimum execution time: 21_469_000 picoseconds. - Weight::from_parts(21_983_000, 0) + // Minimum execution time: 22_992_000 picoseconds. + Weight::from_parts(24_112_000, 0) .saturating_add(Weight::from_parts(0, 7487)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Nis Queues (r:1 w:1) - /// Proof: Nis Queues (max_values: None, max_size: Some(48022), added: 50497, mode: MaxEncodedLen) + /// Storage: `Nis::Queues` (r:1 w:1) + /// Proof: `Nis::Queues` (`max_values`: None, `max_size`: Some(48022), added: 50497, mode: `MaxEncodedLen`) fn process_queue() -> Weight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `51487` - // Minimum execution time: 4_912_000 picoseconds. - Weight::from_parts(5_013_000, 0) + // Minimum execution time: 3_856_000 picoseconds. + Weight::from_parts(4_125_000, 0) .saturating_add(Weight::from_parts(0, 51487)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Nis Receipts (r:0 w:1) - /// Proof: Nis Receipts (max_values: None, max_size: Some(81), added: 2556, mode: MaxEncodedLen) + /// Storage: `Nis::Receipts` (r:0 w:1) + /// Proof: `Nis::Receipts` (`max_values`: None, `max_size`: Some(81), added: 2556, mode: `MaxEncodedLen`) fn process_bid() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_048_000 picoseconds. - Weight::from_parts(7_278_000, 0) + // Minimum execution time: 4_344_000 picoseconds. + Weight::from_parts(4_545_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs index e051ebd5bbab84cf98d34853666926a0f281e5c1..7a2b77b84d80cad3067f41a63da845af86e16816 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_preimage.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_preimage.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_preimage` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +50,219 @@ use core::marker::PhantomData; /// Weight functions for `pallet_preimage`. pub struct WeightInfo(PhantomData); impl pallet_preimage::WeightInfo for WeightInfo { - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_000_000, 3593) - // Standard Error: 13_720 - .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } - - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `3556` - // Minimum execution time: 31_040_000 picoseconds. - Weight::from_parts(31_236_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `114` + // Estimated: `3568` + // Minimum execution time: 40_363_000 picoseconds. + Weight::from_parts(41_052_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 18_025_000 picoseconds. - Weight::from_parts(18_264_000, 0) + // Minimum execution time: 14_570_000 picoseconds. + Weight::from_parts(14_890_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_364, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 17_122_000 picoseconds. - Weight::from_parts(17_332_000, 0) + // Minimum execution time: 13_933_000 picoseconds. + Weight::from_parts(14_290_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_968, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_349, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_preimage() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3556` - // Minimum execution time: 38_218_000 picoseconds. - Weight::from_parts(39_841_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `315` + // Estimated: `3568` + // Minimum execution time: 54_373_000 picoseconds. + Weight::from_parts(58_205_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unnote_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 23_217_000 picoseconds. - Weight::from_parts(24_246_000, 0) + // Minimum execution time: 24_267_000 picoseconds. + Weight::from_parts(27_063_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `260` // Estimated: `3556` - // Minimum execution time: 21_032_000 picoseconds. - Weight::from_parts(21_844_000, 0) + // Minimum execution time: 25_569_000 picoseconds. + Weight::from_parts(27_895_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_no_deposit_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 13_954_000 picoseconds. - Weight::from_parts(14_501_000, 0) + // Minimum execution time: 14_182_000 picoseconds. + Weight::from_parts(16_098_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `114` // Estimated: `3556` - // Minimum execution time: 14_874_000 picoseconds. - Weight::from_parts(15_380_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn request_requested_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_199_000 picoseconds. - Weight::from_parts(10_493_000, 0) + // Minimum execution time: 9_577_000 picoseconds. + Weight::from_parts(10_146_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) - /// Storage: Preimage PreimageFor (r:0 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) fn unrequest_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `3556` - // Minimum execution time: 21_772_000 picoseconds. - Weight::from_parts(22_554_000, 0) + // Minimum execution time: 21_003_000 picoseconds. + Weight::from_parts(23_549_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_unnoted_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_115_000 picoseconds. - Weight::from_parts(10_452_000, 0) + // Minimum execution time: 9_507_000 picoseconds. + Weight::from_parts(10_013_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn unrequest_multi_referenced_preimage() -> Weight { // Proof Size summary in bytes: // Measured: `178` // Estimated: `3556` - // Minimum execution time: 10_031_000 picoseconds. - Weight::from_parts(10_310_000, 0) + // Minimum execution time: 9_293_000 picoseconds. + Weight::from_parts(10_055_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1023 w:1023) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (227 ±0)` + // Estimated: `990 + n * (2603 ±0)` + // Minimum execution time: 48_846_000 picoseconds. + Weight::from_parts(49_378_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 38_493 + .saturating_add(Weight::from_parts(47_418_285, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs index d9737a85c05ad2b55759c141539f34fc60d3c808..c92025930950e61c3d4d253b2ae33e153ef70ccb 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_proxy.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_proxy.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_proxy` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_proxy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,172 +50,176 @@ use core::marker::PhantomData; /// Weight functions for `pallet_proxy`. pub struct WeightInfo(PhantomData); impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 15_956_000 picoseconds. - Weight::from_parts(16_300_358, 0) + // Minimum execution time: 11_267_000 picoseconds. + Weight::from_parts(11_798_007, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 652 - .saturating_add(Weight::from_parts(30_807, 0).saturating_mul(p.into())) + // Standard Error: 858 + .saturating_add(Weight::from_parts(43_735, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `554 + a * (68 ±0) + p * (37 ±0)` + // Measured: `416 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 37_584_000 picoseconds. - Weight::from_parts(37_858_207, 0) + // Minimum execution time: 32_791_000 picoseconds. + Weight::from_parts(32_776_904, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_868 - .saturating_add(Weight::from_parts(148_967, 0).saturating_mul(a.into())) - // Standard Error: 1_930 - .saturating_add(Weight::from_parts(13_017, 0).saturating_mul(p.into())) + // Standard Error: 2_382 + .saturating_add(Weight::from_parts(143_857, 0).saturating_mul(a.into())) + // Standard Error: 2_461 + .saturating_add(Weight::from_parts(40_024, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, _p: u32, ) -> Weight { + fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_642_000 picoseconds. - Weight::from_parts(25_526_588, 0) + // Minimum execution time: 21_831_000 picoseconds. + Weight::from_parts(22_479_938, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_138 - .saturating_add(Weight::from_parts(131_157, 0).saturating_mul(a.into())) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(146_532, 0).saturating_mul(a.into())) + // Standard Error: 1_796 + .saturating_add(Weight::from_parts(7_499, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, _p: u32, ) -> Weight { + fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `469 + a * (68 ±0)` + // Measured: `331 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 24_377_000 picoseconds. - Weight::from_parts(25_464_033, 0) + // Minimum execution time: 21_776_000 picoseconds. + Weight::from_parts(22_762_843, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_116 - .saturating_add(Weight::from_parts(130_722, 0).saturating_mul(a.into())) + // Standard Error: 1_402 + .saturating_add(Weight::from_parts(137_512, 0).saturating_mul(a.into())) + // Standard Error: 1_449 + .saturating_add(Weight::from_parts(3_645, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:0) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) - /// Storage: Proxy Announcements (r:1 w:1) - /// Proof: Proxy Announcements (max_values: None, max_size: Some(2233), added: 4708, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `a` is `[0, 31]`. /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `486 + a * (68 ±0) + p * (37 ±0)` + // Measured: `348 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 34_202_000 picoseconds. - Weight::from_parts(34_610_079, 0) + // Minimum execution time: 29_108_000 picoseconds. + Weight::from_parts(29_508_910, 0) .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_234 - .saturating_add(Weight::from_parts(134_197, 0).saturating_mul(a.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(15_970, 0).saturating_mul(p.into())) + // Standard Error: 2_268 + .saturating_add(Weight::from_parts(144_770, 0).saturating_mul(a.into())) + // Standard Error: 2_343 + .saturating_add(Weight::from_parts(25_851, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(25_984_867, 0) + // Minimum execution time: 18_942_000 picoseconds. + Weight::from_parts(19_518_812, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 893 - .saturating_add(Weight::from_parts(51_868, 0).saturating_mul(p.into())) + // Standard Error: 1_078 + .saturating_add(Weight::from_parts(46_147, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 25_492_000 picoseconds. - Weight::from_parts(26_283_445, 0) + // Minimum execution time: 18_993_000 picoseconds. + Weight::from_parts(19_871_741, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_442 - .saturating_add(Weight::from_parts(53_504, 0).saturating_mul(p.into())) + // Standard Error: 1_883 + .saturating_add(Weight::from_parts(46_033, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `227 + p * (37 ±0)` + // Measured: `89 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 22_083_000 picoseconds. - Weight::from_parts(22_688_835, 0) + // Minimum execution time: 17_849_000 picoseconds. + Weight::from_parts(18_776_170, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 994 - .saturating_add(Weight::from_parts(32_994, 0).saturating_mul(p.into())) + // Standard Error: 1_239 + .saturating_add(Weight::from_parts(27_960, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `239` + // Measured: `101` // Estimated: `4706` - // Minimum execution time: 27_042_000 picoseconds. - Weight::from_parts(27_624_587, 0) + // Minimum execution time: 20_049_000 picoseconds. + Weight::from_parts(20_881_515, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 671 - .saturating_add(Weight::from_parts(5_888, 0).saturating_mul(p.into())) + // Standard Error: 952 + .saturating_add(Weight::from_parts(5_970, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Proxy Proxies (r:1 w:1) - /// Proof: Proxy Proxies (max_values: None, max_size: Some(1241), added: 3716, mode: MaxEncodedLen) + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `264 + p * (37 ±0)` + // Measured: `126 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 23_396_000 picoseconds. - Weight::from_parts(24_003_080, 0) + // Minimum execution time: 18_528_000 picoseconds. + Weight::from_parts(19_384_189, 0) .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 684 - .saturating_add(Weight::from_parts(29_878, 0).saturating_mul(p.into())) + // Standard Error: 1_106 + .saturating_add(Weight::from_parts(35_698, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs index ce9d5fcc0c7131b227e205a470532e8042cc93ae..fa2decb16716656c9329050c6a211b30ccf24769 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_ranked_collective.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_ranked_collective` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_ranked_collective // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_ranked_collective -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `42` // Estimated: `3507` - // Minimum execution time: 13_480_000 picoseconds. - Weight::from_parts(13_786_000, 0) + // Minimum execution time: 13_428_000 picoseconds. + Weight::from_parts(14_019_000, 0) .saturating_add(Weight::from_parts(0, 3507)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -79,11 +81,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `516 + r * (281 ±0)` // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 28_771_000 picoseconds. - Weight::from_parts(29_256_825, 0) + // Minimum execution time: 28_566_000 picoseconds. + Weight::from_parts(29_346_952, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 21_594 - .saturating_add(Weight::from_parts(14_649_527, 0).saturating_mul(r.into())) + // Standard Error: 21_068 + .saturating_add(Weight::from_parts(14_471_237, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -103,11 +105,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `214 + r * (17 ±0)` // Estimated: `3507` - // Minimum execution time: 16_117_000 picoseconds. - Weight::from_parts(16_978_453, 0) + // Minimum execution time: 16_161_000 picoseconds. + Weight::from_parts(16_981_334, 0) .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 4_511 - .saturating_add(Weight::from_parts(324_261, 0).saturating_mul(r.into())) + // Standard Error: 4_596 + .saturating_add(Weight::from_parts(313_386, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -124,11 +126,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `532 + r * (72 ±0)` // Estimated: `3519` - // Minimum execution time: 28_995_000 picoseconds. - Weight::from_parts(31_343_215, 0) + // Minimum execution time: 28_406_000 picoseconds. + Weight::from_parts(31_178_557, 0) .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 16_438 - .saturating_add(Weight::from_parts(637_462, 0).saturating_mul(r.into())) + // Standard Error: 17_737 + .saturating_add(Weight::from_parts(627_757, 0).saturating_mul(r.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -140,15 +142,17 @@ impl pallet_ranked_collective::WeightInfo for WeightInf /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn vote() -> Weight { // Proof Size summary in bytes: // Measured: `603` // Estimated: `83866` - // Minimum execution time: 38_820_000 picoseconds. - Weight::from_parts(40_240_000, 0) + // Minimum execution time: 41_164_000 picoseconds. + Weight::from_parts(42_163_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -161,11 +165,11 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `400 + n * (50 ±0)` // Estimated: `4365 + n * (2540 ±0)` - // Minimum execution time: 12_972_000 picoseconds. - Weight::from_parts(15_829_333, 0) + // Minimum execution time: 13_183_000 picoseconds. + Weight::from_parts(15_604_064, 0) .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 1_754 - .saturating_add(Weight::from_parts(1_116_520, 0).saturating_mul(n.into())) + // Standard Error: 2_018 + .saturating_add(Weight::from_parts(1_101_088, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -183,8 +187,8 @@ impl pallet_ranked_collective::WeightInfo for WeightInf // Proof Size summary in bytes: // Measured: `337` // Estimated: `6048` - // Minimum execution time: 44_601_000 picoseconds. - Weight::from_parts(45_714_000, 0) + // Minimum execution time: 43_603_000 picoseconds. + Weight::from_parts(44_809_000, 0) .saturating_add(Weight::from_parts(0, 6048)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(10)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_recovery.rs b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs new file mode 100644 index 0000000000000000000000000000000000000000..ed79aa2b1f175d65d33efad4901d1800fb5cbc28 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_recovery.rs @@ -0,0 +1,186 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_recovery` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_recovery +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_recovery`. +pub struct WeightInfo(PhantomData); +impl pallet_recovery::WeightInfo for WeightInfo { + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn as_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 7_899_000 picoseconds. + Weight::from_parts(8_205_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn set_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_258_000 picoseconds. + Weight::from_parts(6_494_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn create_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3816` + // Minimum execution time: 19_369_000 picoseconds. + Weight::from_parts(20_185_132, 0) + .saturating_add(Weight::from_parts(0, 3816)) + // Standard Error: 4_275 + .saturating_add(Weight::from_parts(78_024, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + fn initiate_recovery() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `3854` + // Minimum execution time: 22_425_000 picoseconds. + Weight::from_parts(23_171_000, 0) + .saturating_add(Weight::from_parts(0, 3854)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn vouch_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `294 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 17_308_000 picoseconds. + Weight::from_parts(18_118_782, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_309 + .saturating_add(Weight::from_parts(126_278, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn claim_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `326 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 20_755_000 picoseconds. + Weight::from_parts(21_821_713, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_550 + .saturating_add(Weight::from_parts(101_916, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn close_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `447 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 29_957_000 picoseconds. + Weight::from_parts(31_010_309, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 5_913 + .saturating_add(Weight::from_parts(110_070, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn remove_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `204 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 24_430_000 picoseconds. + Weight::from_parts(24_462_856, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 13_646 + .saturating_add(Weight::from_parts(507_715, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn cancel_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `3545` + // Minimum execution time: 9_686_000 picoseconds. + Weight::from_parts(10_071_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs index 96f172230e13f5aed92b47338b2387f4db79fa27..6dfcea2b8327a853927c45aeef76036752650718 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_fellowship_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,10 +60,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `327` + // Measured: `292` // Estimated: `42428` - // Minimum execution time: 29_909_000 picoseconds. - Weight::from_parts(30_645_000, 0) + // Minimum execution time: 24_053_000 picoseconds. + Weight::from_parts(25_121_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -71,15 +72,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `438` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 54_405_000 picoseconds. - Weight::from_parts(55_583_000, 0) + // Minimum execution time: 45_064_000 picoseconds. + Weight::from_parts(46_112_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -89,15 +92,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2076` + // Measured: `2041` // Estimated: `42428` - // Minimum execution time: 110_477_000 picoseconds. - Weight::from_parts(119_187_000, 0) + // Minimum execution time: 94_146_000 picoseconds. + Weight::from_parts(98_587_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -107,15 +112,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `2117` + // Measured: `2082` // Estimated: `42428` - // Minimum execution time: 111_467_000 picoseconds. - Weight::from_parts(117_758_000, 0) + // Minimum execution time: 93_002_000 picoseconds. + Weight::from_parts(96_924_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -125,15 +132,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `774` + // Measured: `739` // Estimated: `83866` - // Minimum execution time: 191_135_000 picoseconds. - Weight::from_parts(210_535_000, 0) + // Minimum execution time: 160_918_000 picoseconds. + Weight::from_parts(175_603_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -143,24 +152,26 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `639` + // Measured: `604` // Estimated: `83866` - // Minimum execution time: 67_168_000 picoseconds. - Weight::from_parts(68_895_000, 0) + // Minimum execution time: 55_253_000 picoseconds. + Weight::from_parts(56_488_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `351` + // Measured: `317` // Estimated: `4365` - // Minimum execution time: 31_298_000 picoseconds. - Weight::from_parts(32_570_000, 0) + // Minimum execution time: 24_497_000 picoseconds. + Weight::from_parts(25_280_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -169,10 +180,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `201` + // Measured: `167` // Estimated: `4365` - // Minimum execution time: 15_674_000 picoseconds. - Weight::from_parts(16_190_000, 0) + // Minimum execution time: 11_374_000 picoseconds. + Weight::from_parts(11_817_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -181,15 +192,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `83866` - // Minimum execution time: 38_927_000 picoseconds. - Weight::from_parts(40_545_000, 0) + // Minimum execution time: 31_805_000 picoseconds. + Weight::from_parts(32_622_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) @@ -197,15 +210,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `484` + // Measured: `449` // Estimated: `83866` - // Minimum execution time: 80_209_000 picoseconds. - Weight::from_parts(82_084_000, 0) + // Minimum execution time: 62_364_000 picoseconds. + Weight::from_parts(63_798_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) @@ -213,10 +228,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `174` + // Measured: `140` // Estimated: `4277` - // Minimum execution time: 9_520_000 picoseconds. - Weight::from_parts(10_088_000, 0) + // Minimum execution time: 8_811_000 picoseconds. + Weight::from_parts(9_224_000, 0) .saturating_add(Weight::from_parts(0, 4277)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -231,10 +246,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `2376` + // Measured: `2341` // Estimated: `42428` - // Minimum execution time: 93_893_000 picoseconds. - Weight::from_parts(101_065_000, 0) + // Minimum execution time: 83_292_000 picoseconds. + Weight::from_parts(89_114_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -249,10 +264,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `2362` + // Measured: `2327` // Estimated: `42428` - // Minimum execution time: 98_811_000 picoseconds. - Weight::from_parts(103_590_000, 0) + // Minimum execution time: 84_648_000 picoseconds. + Weight::from_parts(89_332_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -263,10 +278,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `1841` + // Measured: `1807` // Estimated: `4365` - // Minimum execution time: 43_230_000 picoseconds. - Weight::from_parts(46_120_000, 0) + // Minimum execution time: 40_529_000 picoseconds. + Weight::from_parts(45_217_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -277,10 +292,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `1808` + // Measured: `1774` // Estimated: `4365` - // Minimum execution time: 43_092_000 picoseconds. - Weight::from_parts(46_018_000, 0) + // Minimum execution time: 40_894_000 picoseconds. + Weight::from_parts(45_726_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -293,10 +308,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1824` + // Measured: `1790` // Estimated: `4365` - // Minimum execution time: 49_697_000 picoseconds. - Weight::from_parts(53_795_000, 0) + // Minimum execution time: 48_187_000 picoseconds. + Weight::from_parts(52_655_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -309,10 +324,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `1865` + // Measured: `1831` // Estimated: `4365` - // Minimum execution time: 50_417_000 picoseconds. - Weight::from_parts(53_214_000, 0) + // Minimum execution time: 47_548_000 picoseconds. + Weight::from_parts(51_547_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -323,10 +338,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `300` // Estimated: `42428` - // Minimum execution time: 25_688_000 picoseconds. - Weight::from_parts(26_575_000, 0) + // Minimum execution time: 20_959_000 picoseconds. + Weight::from_parts(21_837_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -337,10 +352,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `383` + // Measured: `348` // Estimated: `42428` - // Minimum execution time: 26_230_000 picoseconds. - Weight::from_parts(27_235_000, 0) + // Minimum execution time: 21_628_000 picoseconds. + Weight::from_parts(22_192_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -349,10 +364,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `4365` - // Minimum execution time: 17_585_000 picoseconds. - Weight::from_parts(18_225_000, 0) + // Minimum execution time: 12_309_000 picoseconds. + Weight::from_parts(12_644_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,10 +382,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `584` + // Measured: `549` // Estimated: `42428` - // Minimum execution time: 38_243_000 picoseconds. - Weight::from_parts(39_959_000, 0) + // Minimum execution time: 31_871_000 picoseconds. + Weight::from_parts(33_123_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -385,10 +400,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `719` + // Measured: `684` // Estimated: `42428` - // Minimum execution time: 88_424_000 picoseconds. - Weight::from_parts(92_969_000, 0) + // Minimum execution time: 73_715_000 picoseconds. + Weight::from_parts(79_980_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) @@ -401,10 +416,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 138_207_000 picoseconds. - Weight::from_parts(151_726_000, 0) + // Minimum execution time: 128_564_000 picoseconds. + Weight::from_parts(138_536_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -417,10 +432,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `755` + // Measured: `720` // Estimated: `42428` - // Minimum execution time: 131_001_000 picoseconds. - Weight::from_parts(148_651_000, 0) + // Minimum execution time: 129_775_000 picoseconds. + Weight::from_parts(139_001_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -433,10 +448,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `770` + // Measured: `735` // Estimated: `42428` - // Minimum execution time: 109_612_000 picoseconds. - Weight::from_parts(143_626_000, 0) + // Minimum execution time: 128_233_000 picoseconds. + Weight::from_parts(135_796_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -449,10 +464,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `42428` - // Minimum execution time: 71_754_000 picoseconds. - Weight::from_parts(77_329_000, 0) + // Minimum execution time: 66_995_000 picoseconds. + Weight::from_parts(72_678_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -467,10 +482,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `776` + // Measured: `741` // Estimated: `83866` - // Minimum execution time: 153_244_000 picoseconds. - Weight::from_parts(169_961_000, 0) + // Minimum execution time: 137_764_000 picoseconds. + Weight::from_parts(152_260_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -483,10 +498,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `772` + // Measured: `737` // Estimated: `42428` - // Minimum execution time: 137_997_000 picoseconds. - Weight::from_parts(157_862_000, 0) + // Minimum execution time: 119_992_000 picoseconds. + Weight::from_parts(134_805_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -495,16 +510,18 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `458` + // Measured: `424` // Estimated: `4365` - // Minimum execution time: 21_794_000 picoseconds. - Weight::from_parts(22_341_000, 0) + // Minimum execution time: 20_927_000 picoseconds. + Weight::from_parts(21_802_000, 0) .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) @@ -513,10 +530,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `319` + // Measured: `285` // Estimated: `4365` - // Minimum execution time: 18_458_000 picoseconds. - Weight::from_parts(19_097_000, 0) + // Minimum execution time: 14_253_000 picoseconds. + Weight::from_parts(15_031_000, 0) .saturating_add(Weight::from_parts(0, 4365)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs index b7cc5df28b91cc2cc36cade14784b41d7d34ff71..c35925198f9d0d3c615416aa8dbb3e8e371eecca 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_referenda_referenda.rs @@ -16,27 +16,28 @@ //! Autogenerated weights for `pallet_referenda` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_referenda -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,10 +58,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn submit() -> Weight { // Proof Size summary in bytes: - // Measured: `324` + // Measured: `185` // Estimated: `42428` - // Minimum execution time: 39_852_000 picoseconds. - Weight::from_parts(41_610_000, 0) + // Minimum execution time: 28_612_000 picoseconds. + Weight::from_parts(30_060_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -69,15 +70,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 52_588_000 picoseconds. - Weight::from_parts(54_154_000, 0) + // Minimum execution time: 42_827_000 picoseconds. + Weight::from_parts(44_072_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -87,15 +90,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3334` + // Measured: `3225` // Estimated: `42428` - // Minimum execution time: 70_483_000 picoseconds. - Weight::from_parts(72_731_000, 0) + // Minimum execution time: 56_475_000 picoseconds. + Weight::from_parts(58_888_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -105,60 +110,62 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3354` + // Measured: `3245` // Estimated: `42428` - // Minimum execution time: 68_099_000 picoseconds. - Weight::from_parts(71_560_000, 0) + // Minimum execution time: 56_542_000 picoseconds. + Weight::from_parts(58_616_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 64_357_000 picoseconds. - Weight::from_parts(66_081_000, 0) + // Minimum execution time: 51_218_000 picoseconds. + Weight::from_parts(53_148_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn place_decision_deposit_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `577` + // Measured: `438` // Estimated: `83866` - // Minimum execution time: 62_709_000 picoseconds. - Weight::from_parts(64_534_000, 0) + // Minimum execution time: 49_097_000 picoseconds. + Weight::from_parts(50_796_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_decision_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `417` + // Measured: `279` // Estimated: `4401` - // Minimum execution time: 31_296_000 picoseconds. - Weight::from_parts(32_221_000, 0) + // Minimum execution time: 23_720_000 picoseconds. + Weight::from_parts(24_327_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -167,10 +174,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn refund_submission_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `407` + // Measured: `269` // Estimated: `4401` - // Minimum execution time: 31_209_000 picoseconds. - Weight::from_parts(32_168_000, 0) + // Minimum execution time: 24_089_000 picoseconds. + Weight::from_parts(24_556_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -179,15 +186,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn cancel() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `83866` - // Minimum execution time: 38_887_000 picoseconds. - Weight::from_parts(40_193_000, 0) + // Minimum execution time: 29_022_000 picoseconds. + Weight::from_parts(29_590_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) @@ -195,15 +204,17 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:1 w:0) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) fn kill() -> Weight { // Proof Size summary in bytes: - // Measured: `726` + // Measured: `587` // Estimated: `83866` - // Minimum execution time: 106_054_000 picoseconds. - Weight::from_parts(108_318_000, 0) + // Minimum execution time: 81_920_000 picoseconds. + Weight::from_parts(84_492_000, 0) .saturating_add(Weight::from_parts(0, 83866)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::TrackQueue` (r:1 w:0) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) @@ -211,10 +222,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) fn one_fewer_deciding_queue_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `240` + // Measured: `102` // Estimated: `5477` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_763_000, 0) + // Minimum execution time: 8_134_000 picoseconds. + Weight::from_parts(8_574_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -223,36 +234,32 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 50_080_000 picoseconds. - Weight::from_parts(51_858_000, 0) + // Minimum execution time: 39_932_000 picoseconds. + Weight::from_parts(42_086_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::TrackQueue` (r:1 w:1) /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn one_fewer_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `3254` + // Measured: `3115` // Estimated: `42428` - // Minimum execution time: 53_889_000 picoseconds. - Weight::from_parts(55_959_000, 0) + // Minimum execution time: 42_727_000 picoseconds. + Weight::from_parts(44_280_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -261,10 +268,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_insertion() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 23_266_000 picoseconds. - Weight::from_parts(24_624_000, 0) + // Minimum execution time: 20_918_000 picoseconds. + Weight::from_parts(22_180_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -275,10 +282,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_requeued_slide() -> Weight { // Proof Size summary in bytes: - // Measured: `3077` + // Measured: `2939` // Estimated: `5477` - // Minimum execution time: 22_846_000 picoseconds. - Weight::from_parts(24_793_000, 0) + // Minimum execution time: 20_943_000 picoseconds. + Weight::from_parts(21_932_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -291,10 +298,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3081` + // Measured: `2943` // Estimated: `5477` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_940_000, 0) + // Minimum execution time: 25_197_000 picoseconds. + Weight::from_parts(26_083_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -307,10 +314,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) fn nudge_referendum_not_queued() -> Weight { // Proof Size summary in bytes: - // Measured: `3101` + // Measured: `2963` // Estimated: `5477` - // Minimum execution time: 28_133_000 picoseconds. - Weight::from_parts(29_638_000, 0) + // Minimum execution time: 24_969_000 picoseconds. + Weight::from_parts(26_096_000, 0) .saturating_add(Weight::from_parts(0, 5477)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -321,10 +328,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_no_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `298` // Estimated: `42428` - // Minimum execution time: 25_710_000 picoseconds. - Weight::from_parts(26_500_000, 0) + // Minimum execution time: 18_050_000 picoseconds. + Weight::from_parts(18_790_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -335,10 +342,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_preparing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 25_935_000 picoseconds. - Weight::from_parts(26_803_000, 0) + // Minimum execution time: 18_357_000 picoseconds. + Weight::from_parts(18_957_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -347,10 +354,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) fn nudge_referendum_timed_out() -> Weight { // Proof Size summary in bytes: - // Measured: `344` + // Measured: `206` // Estimated: `4401` - // Minimum execution time: 17_390_000 picoseconds. - Weight::from_parts(18_042_000, 0) + // Minimum execution time: 11_479_000 picoseconds. + Weight::from_parts(11_968_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -359,150 +366,136 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_failing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 35_141_000 picoseconds. - Weight::from_parts(36_318_000, 0) + // Minimum execution time: 24_471_000 picoseconds. + Weight::from_parts(25_440_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Referenda::DecidingCount` (r:1 w:1) /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_deciding_passing() -> Weight { // Proof Size summary in bytes: - // Measured: `485` + // Measured: `346` // Estimated: `42428` - // Minimum execution time: 37_815_000 picoseconds. - Weight::from_parts(39_243_000, 0) + // Minimum execution time: 26_580_000 picoseconds. + Weight::from_parts(27_570_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_begin_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 30_779_000 picoseconds. - Weight::from_parts(31_845_000, 0) + // Minimum execution time: 24_331_000 picoseconds. + Weight::from_parts(25_291_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_end_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `521` + // Measured: `382` // Estimated: `42428` - // Minimum execution time: 31_908_000 picoseconds. - Weight::from_parts(33_253_000, 0) + // Minimum execution time: 24_768_000 picoseconds. + Weight::from_parts(25_746_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_not_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 28_951_000 picoseconds. - Weight::from_parts(30_004_000, 0) + // Minimum execution time: 23_171_000 picoseconds. + Weight::from_parts(24_161_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_continue_confirming() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `42428` - // Minimum execution time: 27_750_000 picoseconds. - Weight::from_parts(28_588_000, 0) + // Minimum execution time: 22_263_000 picoseconds. + Weight::from_parts(23_062_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:2 w:2) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn nudge_referendum_approved() -> Weight { // Proof Size summary in bytes: - // Measured: `542` + // Measured: `403` // Estimated: `83866` - // Minimum execution time: 43_950_000 picoseconds. - Weight::from_parts(46_164_000, 0) + // Minimum execution time: 33_710_000 picoseconds. + Weight::from_parts(34_871_000, 0) .saturating_add(Weight::from_parts(0, 83866)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) - /// Storage: `Balances::InactiveIssuance` (r:1 w:0) - /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) fn nudge_referendum_rejected() -> Weight { // Proof Size summary in bytes: - // Measured: `538` + // Measured: `399` // Estimated: `42428` - // Minimum execution time: 31_050_000 picoseconds. - Weight::from_parts(32_169_000, 0) + // Minimum execution time: 24_260_000 picoseconds. + Weight::from_parts(25_104_000, 0) .saturating_add(Weight::from_parts(0, 42428)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// Storage: `Referenda::MetadataOf` (r:0 w:1) /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn set_some_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `560` + // Measured: `422` // Estimated: `4401` - // Minimum execution time: 21_193_000 picoseconds. - Weight::from_parts(22_116_000, 0) + // Minimum execution time: 19_821_000 picoseconds. + Weight::from_parts(20_641_000, 0) .saturating_add(Weight::from_parts(0, 4401)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) @@ -511,10 +504,10 @@ impl pallet_referenda::WeightInfo for WeightInfo { /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn clear_metadata() -> Weight { // Proof Size summary in bytes: - // Measured: `421` + // Measured: `283` // Estimated: `4401` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_781_000, 0) + // Minimum execution time: 13_411_000 picoseconds. + Weight::from_parts(14_070_000, 0) .saturating_add(Weight::from_parts(0, 4401)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs index 0f36dbd384df87d5a90c2a11392923e7ae8633aa..5f6b41d2b54ea95b398877b0ebcaccfbfce313ac 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_scheduler` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_scheduler // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_scheduler -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `68` // Estimated: `1489` - // Minimum execution time: 2_869_000 picoseconds. - Weight::from_parts(3_109_000, 0) + // Minimum execution time: 3_114_000 picoseconds. + Weight::from_parts(3_245_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -67,11 +69,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 3_326_000 picoseconds. - Weight::from_parts(5_818_563, 0) + // Minimum execution time: 3_430_000 picoseconds. + Weight::from_parts(6_250_920, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_261 - .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into())) + // Standard Error: 1_350 + .saturating_add(Weight::from_parts(333_245, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -79,8 +81,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_007_000 picoseconds. - Weight::from_parts(3_197_000, 0) + // Minimum execution time: 3_166_000 picoseconds. + Weight::from_parts(3_295_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) @@ -94,11 +96,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `251 + s * (1 ±0)` // Estimated: `3716 + s * (1 ±0)` - // Minimum execution time: 16_590_000 picoseconds. - Weight::from_parts(16_869_000, 0) + // Minimum execution time: 17_072_000 picoseconds. + Weight::from_parts(17_393_000, 0) .saturating_add(Weight::from_parts(0, 3716)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_204, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) @@ -109,8 +111,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_320_000 picoseconds. - Weight::from_parts(4_594_000, 0) + // Minimum execution time: 4_566_000 picoseconds. + Weight::from_parts(4_775_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,24 +120,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_956_000 picoseconds. - Weight::from_parts(3_216_000, 0) + // Minimum execution time: 3_180_000 picoseconds. + Weight::from_parts(3_339_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_824_000 picoseconds. - Weight::from_parts(1_929_000, 0) + // Minimum execution time: 1_656_000 picoseconds. + Weight::from_parts(1_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_749_000 picoseconds. - Weight::from_parts(1_916_000, 0) + // Minimum execution time: 1_628_000 picoseconds. + Weight::from_parts(1_840_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) @@ -145,16 +147,18 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 9_086_000 picoseconds. - Weight::from_parts(11_733_696, 0) + // Minimum execution time: 9_523_000 picoseconds. + Weight::from_parts(12_482_434, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_362 - .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into())) + // Standard Error: 1_663 + .saturating_add(Weight::from_parts(370_122, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Lookup` (r:0 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. @@ -162,13 +166,13 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 12_716_000 picoseconds. - Weight::from_parts(12_529_180, 0) + // Minimum execution time: 14_649_000 picoseconds. + Weight::from_parts(14_705_132, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 867 - .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into())) + // Standard Error: 1_126 + .saturating_add(Weight::from_parts(547_438, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `Scheduler::Lookup` (r:1 w:1) /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) @@ -179,11 +183,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 12_053_000 picoseconds. - Weight::from_parts(15_358_056, 0) + // Minimum execution time: 12_335_000 picoseconds. + Weight::from_parts(16_144_217, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 3_176 - .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into())) + // Standard Error: 3_533 + .saturating_add(Weight::from_parts(413_823, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -191,49 +195,48 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `318 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 14_803_000 picoseconds. - Weight::from_parts(15_805_714, 0) + // Minimum execution time: 16_906_000 picoseconds. + Weight::from_parts(17_846_662, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_597 - .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into())) + // Standard Error: 2_687 + .saturating_add(Weight::from_parts(613_356, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: `Scheduler::Retries` (r:1 w:2) - /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Agenda` (r:1 w:1) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn schedule_retry(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `155` // Estimated: `42428` - // Minimum execution time: 13_156_000 picoseconds. - Weight::from_parts(13_801_287, 0) + // Minimum execution time: 8_988_000 picoseconds. + Weight::from_parts(9_527_838, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 568 - .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) + // Standard Error: 523 + .saturating_add(Weight::from_parts(25_453, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Scheduler::Agenda` (r:1 w:0) /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8965` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 23_337_000 picoseconds. + Weight::from_parts(24_255_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -244,13 +247,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn set_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9643` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 30_704_000 picoseconds. + Weight::from_parts(31_646_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -259,13 +261,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry() -> Weight { // Proof Size summary in bytes: - // Measured: `115 + s * (177 ±0)` + // Measured: `8977` // Estimated: `42428` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_081_460, 0) + // Minimum execution time: 22_279_000 picoseconds. + Weight::from_parts(23_106_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -276,13 +277,12 @@ impl pallet_scheduler::WeightInfo for WeightInfo { /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// Storage: `Scheduler::Retries` (r:0 w:1) /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 50]`. fn cancel_retry_named() -> Weight { // Proof Size summary in bytes: - // Measured: `324 + s * (185 ±0)` + // Measured: `9655` // Estimated: `42428` - // Minimum execution time: 10_673_000 picoseconds. - Weight::from_parts(12_212_185, 0) + // Minimum execution time: 29_649_000 picoseconds. + Weight::from_parts(30_472_000, 0) .saturating_add(Weight::from_parts(0, 42428)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 694174954fc78b1eeb4d93f78e6b26d5bab22b83..ecc31dc3fa9df6c17a5541fcd5a6d957f6ca2e2d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -16,24 +16,26 @@ //! Autogenerated weights for `pallet_sudo` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_sudo // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_sudo -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -54,8 +56,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 8_432_000 picoseconds. - Weight::from_parts(8_757_000, 0) + // Minimum execution time: 8_336_000 picoseconds. + Weight::from_parts(8_569_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -66,8 +68,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_167_000 picoseconds. - Weight::from_parts(9_397_000, 0) + // Minimum execution time: 8_858_000 picoseconds. + Weight::from_parts(9_238_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -77,8 +79,8 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 9_133_000 picoseconds. - Weight::from_parts(9_573_000, 0) + // Minimum execution time: 8_921_000 picoseconds. + Weight::from_parts(9_324_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -88,10 +90,21 @@ impl pallet_sudo::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 7_374_000 picoseconds. - Weight::from_parts(7_702_000, 0) + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_869_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 3_146_000 picoseconds. + Weight::from_parts(3_314_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs index 1bb2e227ab7d540ba8cfd9e4609e957bf025d57c..7d79621b9e65f959221fe382254db93db6237ff7 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_timestamp.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_timestamp` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_timestamp // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,26 +50,26 @@ use core::marker::PhantomData; /// Weight functions for `pallet_timestamp`. pub struct WeightInfo(PhantomData); impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: Timestamp Now (r:1 w:1) - /// Proof: Timestamp Now (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe CurrentSlot (r:1 w:0) - /// Proof: Babe CurrentSlot (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn set() -> Weight { // Proof Size summary in bytes: - // Measured: `311` + // Measured: `137` // Estimated: `1493` - // Minimum execution time: 10_103_000 picoseconds. - Weight::from_parts(10_597_000, 0) + // Minimum execution time: 5_596_000 picoseconds. + Weight::from_parts(5_823_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } fn on_finalize() -> Weight { // Proof Size summary in bytes: - // Measured: `94` + // Measured: `57` // Estimated: `0` - // Minimum execution time: 4_718_000 picoseconds. - Weight::from_parts(4_839_000, 0) + // Minimum execution time: 2_777_000 picoseconds. + Weight::from_parts(2_900_000, 0) .saturating_add(Weight::from_parts(0, 0)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..44dfab289fb2dd22fd6bc81cfaae81769d14313f --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_transaction_payment +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `252` + // Estimated: `1737` + // Minimum execution time: 33_070_000 picoseconds. + Weight::from_parts(33_730_000, 0) + .saturating_add(Weight::from_parts(0, 1737)) + .saturating_add(T::DbWeight::get().reads(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs index 06246ada72f16fb04623f3fc6534265cc1156b79..42d7b26076454023228f06208649f0dae25d1114 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_treasury.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_treasury.rs @@ -16,25 +16,28 @@ //! Autogenerated weights for `pallet_treasury` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/debug/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev // --steps=50 -// --repeat=2 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_treasury // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --output=./runtime/rococo/src/weights/ -// --header=./file_header.txt +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,18 +50,18 @@ use core::marker::PhantomData; /// Weight functions for `pallet_treasury`. pub struct WeightInfo(PhantomData); impl pallet_treasury::WeightInfo for WeightInfo { - /// Storage: Treasury ProposalCount (r:1 w:1) - /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:0 w:1) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) fn spend_local() -> Weight { // Proof Size summary in bytes: - // Measured: `42` + // Measured: `142` // Estimated: `1887` - // Minimum execution time: 177_000_000 picoseconds. - Weight::from_parts(191_000_000, 0) + // Minimum execution time: 9_928_000 picoseconds. + Weight::from_parts(10_560_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -67,111 +70,103 @@ impl pallet_treasury::WeightInfo for WeightInfo { /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) fn remove_approval() -> Weight { // Proof Size summary in bytes: - // Measured: `127` + // Measured: `227` // Estimated: `1887` - // Minimum execution time: 80_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) + // Minimum execution time: 5_386_000 picoseconds. + Weight::from_parts(5_585_000, 0) .saturating_add(Weight::from_parts(0, 1887)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Treasury Deactivated (r:1 w:1) - /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Treasury Approvals (r:1 w:1) - /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) - /// Storage: Treasury Proposals (r:99 w:99) - /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) - /// Storage: System Account (r:199 w:199) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Bounties BountyApprovals (r:1 w:1) - /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:99 w:99) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:199 w:199) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyApprovals` (r:1 w:1) + /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) /// The range of component `p` is `[0, 99]`. fn on_initialize_proposals(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `331 + p * (251 ±0)` + // Measured: `431 + p * (251 ±0)` // Estimated: `3593 + p * (5206 ±0)` - // Minimum execution time: 887_000_000 picoseconds. - Weight::from_parts(828_616_021, 0) + // Minimum execution time: 43_737_000 picoseconds. + Weight::from_parts(39_883_021, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 695_351 - .saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Standard Error: 12_917 + .saturating_add(Weight::from_parts(31_796_205, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) } - /// Storage: AssetRate ConversionRateToNative (r:1 w:0) - /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) - /// Storage: Treasury SpendCount (r:1 w:1) - /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Treasury Spends (r:0 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn spend() -> Weight { // Proof Size summary in bytes: - // Measured: `114` - // Estimated: `4702` - // Minimum execution time: 208_000_000 picoseconds. - Weight::from_parts(222_000_000, 0) - .saturating_add(Weight::from_parts(0, 4702)) + // Measured: `215` + // Estimated: `4703` + // Minimum execution time: 16_829_000 picoseconds. + Weight::from_parts(17_251_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn payout() -> Weight { // Proof Size summary in bytes: - // Measured: `737` - // Estimated: `5313` - // Minimum execution time: 551_000_000 picoseconds. - Weight::from_parts(569_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `458` + // Estimated: `5318` + // Minimum execution time: 41_554_000 picoseconds. + Weight::from_parts(42_451_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) - /// Storage: XcmPallet Queries (r:1 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn check_status() -> Weight { // Proof Size summary in bytes: - // Measured: `442` - // Estimated: `5313` - // Minimum execution time: 245_000_000 picoseconds. - Weight::from_parts(281_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `306` + // Estimated: `5318` + // Minimum execution time: 22_546_000 picoseconds. + Weight::from_parts(23_151_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Treasury Spends (r:1 w:1) - /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) fn void_spend() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `5313` - // Minimum execution time: 147_000_000 picoseconds. - Weight::from_parts(160_000_000, 0) - .saturating_add(Weight::from_parts(0, 5313)) + // Measured: `278` + // Estimated: `5318` + // Minimum execution time: 12_169_000 picoseconds. + Weight::from_parts(12_484_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_utility.rs b/polkadot/runtime/rococo/src/weights/pallet_utility.rs index f50f60eaad7fec2c09e9d5620db0551f1c7b1364..6f2a374247f8f28d1deb281b8256393a74294fad 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_utility.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_utility.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_utility` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_utility // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,18 +55,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_738_000 picoseconds. - Weight::from_parts(2_704_821, 0) + // Minimum execution time: 4_041_000 picoseconds. + Weight::from_parts(5_685_496, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_999 - .saturating_add(Weight::from_parts(4_627_278, 0).saturating_mul(c.into())) + // Standard Error: 810 + .saturating_add(Weight::from_parts(3_177_197, 0).saturating_mul(c.into())) } fn as_derivative() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_294_000 picoseconds. - Weight::from_parts(5_467_000, 0) + // Minimum execution time: 3_667_000 picoseconds. + Weight::from_parts(3_871_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -71,18 +74,18 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_828_000 picoseconds. - Weight::from_parts(4_650_678, 0) + // Minimum execution time: 4_116_000 picoseconds. + Weight::from_parts(6_453_932, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_789 - .saturating_add(Weight::from_parts(4_885_004, 0).saturating_mul(c.into())) + // Standard Error: 825 + .saturating_add(Weight::from_parts(3_366_112, 0).saturating_mul(c.into())) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_020_000 picoseconds. - Weight::from_parts(9_205_000, 0) + // Minimum execution time: 5_630_000 picoseconds. + Weight::from_parts(5_956_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// The range of component `c` is `[0, 1000]`. @@ -90,10 +93,10 @@ impl pallet_utility::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(20_703_134, 0) + // Minimum execution time: 4_165_000 picoseconds. + Weight::from_parts(5_442_561, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_924 - .saturating_add(Weight::from_parts(4_604_529, 0).saturating_mul(c.into())) + // Standard Error: 460 + .saturating_add(Weight::from_parts(3_173_577, 0).saturating_mul(c.into())) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs index 2596207d5837df6776177b71480c57af61ac02ce..c21ab0877742019b238adf33bec4a378d4cb82ea 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_vesting.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_vesting.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_vesting` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=pallet_vesting // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,143 +50,143 @@ use core::marker::PhantomData; /// Weight functions for `pallet_vesting`. pub struct WeightInfo(PhantomData); impl pallet_vesting::WeightInfo for WeightInfo { - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 32_820_000 picoseconds. - Weight::from_parts(31_640_992, 0) + // Minimum execution time: 29_288_000 picoseconds. + Weight::from_parts(29_095_507, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 449 - .saturating_add(Weight::from_parts(45_254, 0).saturating_mul(l.into())) - // Standard Error: 800 - .saturating_add(Weight::from_parts(72_178, 0).saturating_mul(s.into())) + // Standard Error: 1_679 + .saturating_add(Weight::from_parts(33_164, 0).saturating_mul(l.into())) + // Standard Error: 2_988 + .saturating_add(Weight::from_parts(67_092, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `277 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_054_000 picoseconds. - Weight::from_parts(35_825_428, 0) + // Minimum execution time: 31_003_000 picoseconds. + Weight::from_parts(30_528_438, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 749 - .saturating_add(Weight::from_parts(31_738, 0).saturating_mul(l.into())) - // Standard Error: 1_333 - .saturating_add(Weight::from_parts(40_580, 0).saturating_mul(s.into())) + // Standard Error: 1_586 + .saturating_add(Weight::from_parts(35_429, 0).saturating_mul(l.into())) + // Standard Error: 2_823 + .saturating_add(Weight::from_parts(76_505, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 35_440_000 picoseconds. - Weight::from_parts(34_652_647, 0) + // Minimum execution time: 31_269_000 picoseconds. + Weight::from_parts(30_661_898, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(41_942, 0).saturating_mul(l.into())) - // Standard Error: 920 - .saturating_add(Weight::from_parts(66_074, 0).saturating_mul(s.into())) + // Standard Error: 1_394 + .saturating_add(Weight::from_parts(39_300, 0).saturating_mul(l.into())) + // Standard Error: 2_480 + .saturating_add(Weight::from_parts(78_849, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `380 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 38_880_000 picoseconds. - Weight::from_parts(39_625_819, 0) + // Minimum execution time: 33_040_000 picoseconds. + Weight::from_parts(32_469_674, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 1_032 - .saturating_add(Weight::from_parts(29_856, 0).saturating_mul(l.into())) - // Standard Error: 1_837 - .saturating_add(Weight::from_parts(6_210, 0).saturating_mul(s.into())) + // Standard Error: 1_418 + .saturating_add(Weight::from_parts(44_206, 0).saturating_mul(l.into())) + // Standard Error: 2_523 + .saturating_add(Weight::from_parts(74_224, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `451 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 68_294_000 picoseconds. - Weight::from_parts(68_313_394, 0) + // Minimum execution time: 62_032_000 picoseconds. + Weight::from_parts(63_305_621, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 983 - .saturating_add(Weight::from_parts(48_156, 0).saturating_mul(l.into())) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(87_719, 0).saturating_mul(s.into())) + // Standard Error: 2_277 + .saturating_add(Weight::from_parts(42_767, 0).saturating_mul(l.into())) + // Standard Error: 4_051 + .saturating_add(Weight::from_parts(65_487, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `554 + l * (25 ±0) + s * (36 ±0)` // Estimated: `6196` - // Minimum execution time: 70_529_000 picoseconds. - Weight::from_parts(70_619_962, 0) + // Minimum execution time: 63_303_000 picoseconds. + Weight::from_parts(65_180_847, 0) .saturating_add(Weight::from_parts(0, 6196)) - // Standard Error: 1_259 - .saturating_add(Weight::from_parts(50_685, 0).saturating_mul(l.into())) - // Standard Error: 2_241 - .saturating_add(Weight::from_parts(91_444, 0).saturating_mul(s.into())) + // Standard Error: 2_220 + .saturating_add(Weight::from_parts(28_829, 0).saturating_mul(l.into())) + // Standard Error: 3_951 + .saturating_add(Weight::from_parts(84_970, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,59 +195,70 @@ impl pallet_vesting::WeightInfo for WeightInfo { /// Storage: `Balances::Locks` (r:1 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. - fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `555 + l * (25 ±0) + s * (36 ±0)` - // Estimated: `4764` - // Minimum execution time: 41_497_000 picoseconds. - Weight::from_parts(38_763_834, 4764) - // Standard Error: 2_030 - .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) - // Standard Error: 3_750 - .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 36_428_000 picoseconds. - Weight::from_parts(35_604_430, 0) + // Minimum execution time: 31_440_000 picoseconds. + Weight::from_parts(30_773_053, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 504 - .saturating_add(Weight::from_parts(43_191, 0).saturating_mul(l.into())) - // Standard Error: 931 - .saturating_add(Weight::from_parts(66_795, 0).saturating_mul(s.into())) + // Standard Error: 1_474 + .saturating_add(Weight::from_parts(43_019, 0).saturating_mul(l.into())) + // Standard Error: 2_723 + .saturating_add(Weight::from_parts(73_360, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `l` is `[0, 49]`. /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `378 + l * (25 ±0) + s * (36 ±0)` // Estimated: `4764` - // Minimum execution time: 40_696_000 picoseconds. - Weight::from_parts(39_741_284, 0) + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(33_201_125, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_751 + .saturating_add(Weight::from_parts(44_088, 0).saturating_mul(l.into())) + // Standard Error: 3_234 + .saturating_add(Weight::from_parts(86_228, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 35_553_000 picoseconds. + Weight::from_parts(34_974_083, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 478 - .saturating_add(Weight::from_parts(43_792, 0).saturating_mul(l.into())) - // Standard Error: 883 - .saturating_add(Weight::from_parts(66_540, 0).saturating_mul(s.into())) + // Standard Error: 1_560 + .saturating_add(Weight::from_parts(34_615, 0).saturating_mul(l.into())) + // Standard Error: 2_882 + .saturating_add(Weight::from_parts(83_419, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs index 7c307deec4c6f8480019b5559117cfc47ee84b40..ec67268d1449952be992bd321154dd676c2a8a4d 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_whitelist.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `pallet_whitelist` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-aahe6cbd-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_whitelist // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_whitelist -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,67 +52,75 @@ pub struct WeightInfo(PhantomData); impl pallet_whitelist::WeightInfo for WeightInfo { /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn whitelist_call() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `3556` - // Minimum execution time: 20_035_000 picoseconds. - Weight::from_parts(20_452_000, 0) + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_042_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) fn remove_whitelisted_call() -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 20_247_000 picoseconds. - Weight::from_parts(20_808_000, 0) + // Minimum execution time: 18_250_000 picoseconds. + Weight::from_parts(19_026_000, 0) .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 4194294]`. fn dispatch_whitelisted_call(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `428 + n * (1 ±0)` // Estimated: `3892 + n * (1 ±0)` - // Minimum execution time: 32_633_000 picoseconds. - Weight::from_parts(32_855_000, 0) + // Minimum execution time: 28_741_000 picoseconds. + Weight::from_parts(29_024_000, 0) .saturating_add(Weight::from_parts(0, 3892)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_223, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(3)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_305, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `3556` - // Minimum execution time: 23_833_000 picoseconds. - Weight::from_parts(24_698_994, 0) + // Minimum execution time: 21_670_000 picoseconds. + Weight::from_parts(22_561_364, 0) .saturating_add(Weight::from_parts(0, 3556)) // Standard Error: 4 - .saturating_add(Weight::from_parts(1_454, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(1_468, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 5544ca44658cc09fa51cd75679ba90132eb16444..d5cf33515e6be23e7cde0d892b97c8c1d07d8c2f 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,23 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=rococo-dev // --header=./polkadot/file_header.txt // --output=./polkadot/runtime/rococo/src/weights/ @@ -60,8 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_682_000, 0) + // Minimum execution time: 25_521_000 picoseconds. + Weight::from_parts(25_922_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -80,8 +82,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_570_000 picoseconds. - Weight::from_parts(109_878_000, 0) + // Minimum execution time: 112_185_000 picoseconds. + Weight::from_parts(115_991_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -100,8 +102,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 106_341_000 picoseconds. - Weight::from_parts(109_135_000, 0) + // Minimum execution time: 108_693_000 picoseconds. + Weight::from_parts(111_853_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -120,8 +122,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 108_372_000 picoseconds. - Weight::from_parts(112_890_000, 0) + // Minimum execution time: 113_040_000 picoseconds. + Weight::from_parts(115_635_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -130,8 +132,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_957_000 picoseconds. - Weight::from_parts(7_417_000, 0) + // Minimum execution time: 6_979_000 picoseconds. + Weight::from_parts(7_342_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -140,8 +142,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_053_000 picoseconds. - Weight::from_parts(7_462_000, 0) + // Minimum execution time: 7_144_000 picoseconds. + Weight::from_parts(7_297_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -149,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_037_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_995_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -171,8 +173,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_417_000 picoseconds. - Weight::from_parts(31_191_000, 0) + // Minimum execution time: 31_238_000 picoseconds. + Weight::from_parts(31_955_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -193,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_666_000 picoseconds. - Weight::from_parts(37_779_000, 0) + // Minimum execution time: 37_237_000 picoseconds. + Weight::from_parts(38_569_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -205,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 1_884_000 picoseconds. + Weight::from_parts(2_028_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -216,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_188_000 picoseconds. - Weight::from_parts(16_435_000, 0) + // Minimum execution time: 16_048_000 picoseconds. + Weight::from_parts(16_617_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -228,8 +230,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_431_000 picoseconds. - Weight::from_parts(16_935_000, 0) + // Minimum execution time: 16_073_000 picoseconds. + Weight::from_parts(16_672_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -240,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_460_000 picoseconds. - Weight::from_parts(18_885_000, 0) + // Minimum execution time: 18_422_000 picoseconds. + Weight::from_parts(18_900_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -259,8 +261,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_623_000 picoseconds. - Weight::from_parts(30_661_000, 0) + // Minimum execution time: 30_373_000 picoseconds. + Weight::from_parts(30_972_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -271,8 +273,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_043_000 picoseconds. - Weight::from_parts(12_360_000, 0) + // Minimum execution time: 11_863_000 picoseconds. + Weight::from_parts(12_270_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -282,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_511_000 picoseconds. - Weight::from_parts(17_011_000, 0) + // Minimum execution time: 16_733_000 picoseconds. + Weight::from_parts(17_094_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -302,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 39_041_000 picoseconds. - Weight::from_parts(39_883_000, 0) + // Minimum execution time: 39_236_000 picoseconds. + Weight::from_parts(40_587_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -316,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_030_000 picoseconds. - Weight::from_parts(2_150_000, 0) + // Minimum execution time: 2_145_000 picoseconds. + Weight::from_parts(2_255_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +330,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_615_000 picoseconds. - Weight::from_parts(23_008_000, 0) + // Minimum execution time: 22_518_000 picoseconds. + Weight::from_parts(22_926_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000000000000000000000000000000..dc5e5d8ca4b1ca7ba0cceb6fdd8ddedc0d7e3c28 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,191 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::fungible +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_fungible.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::fungible::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 27_223_000 picoseconds. + Weight::from_parts(27_947_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 36_502_000 picoseconds. + Weight::from_parts(37_023_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6196` + // Minimum execution time: 85_152_000 picoseconds. + Weight::from_parts(86_442_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 56_571_000 picoseconds. + Weight::from_parts(58_163_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 27_411_000 picoseconds. + Weight::from_parts(27_953_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 20_776_000 picoseconds. + Weight::from_parts(21_145_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 51_738_000 picoseconds. + Weight::from_parts(53_251_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 39_333_000 picoseconds. + Weight::from_parts(40_515_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000000000000000000000000000000..b62f36172baf545ac83e55137f11e6bd5e5ad74f --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,347 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_xcm_benchmarks::generic +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/pallet_xcm_benchmarks_generic.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_benchmarks::generic::WeightInfo for WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 55_210_000 picoseconds. + Weight::from_parts(56_613_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_246_000 picoseconds. + Weight::from_parts(1_339_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 5_377_000 picoseconds. + Weight::from_parts(5_549_000, 0) + .saturating_add(Weight::from_parts(0, 3465)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_008_000 picoseconds. + Weight::from_parts(7_361_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_700_000 picoseconds. + Weight::from_parts(1_848_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_198_000 picoseconds. + Weight::from_parts(1_265_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_267_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_193_000 picoseconds. + Weight::from_parts(1_258_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_268_000 picoseconds. + Weight::from_parts(1_342_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_173_000 picoseconds. + Weight::from_parts(1_248_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 53_715_000 picoseconds. + Weight::from_parts(54_860_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 8_621_000 picoseconds. + Weight::from_parts(8_903_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_211_000 picoseconds. + Weight::from_parts(1_281_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 26_448_000 picoseconds. + Weight::from_parts(27_057_000, 0) + .saturating_add(Weight::from_parts(0, 3645)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_498_000 picoseconds. + Weight::from_parts(3_614_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_575_000 picoseconds. + Weight::from_parts(1_698_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_334_000 picoseconds. + Weight::from_parts(1_435_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_337_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_244_000 picoseconds. + Weight::from_parts(1_331_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_407_000 picoseconds. + Weight::from_parts(1_522_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 62_963_000 picoseconds. + Weight::from_parts(64_556_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_458_000 picoseconds. + Weight::from_parts(8_741_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 54_068_000 picoseconds. + Weight::from_parts(55_665_000, 0) + .saturating_add(Weight::from_parts(0, 3746)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_290_000 picoseconds. + Weight::from_parts(1_348_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_189_000 picoseconds. + Weight::from_parts(1_268_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_276_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_197_000 picoseconds. + Weight::from_parts(1_253_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_250_000 picoseconds. + Weight::from_parts(1_354_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs index 2aaf282c59d5c82a48e9cb228412eb2e4b83f7a1..fd13c2ac9461b2898eb23db0c97a1b17209829a7 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_assigned_slots.rs @@ -16,26 +16,28 @@ //! Autogenerated weights for `runtime_common::assigned_slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::assigned_slots // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=runtime_common::assigned_slots -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_assigned_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -68,15 +70,15 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_perm_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 84_646_000 picoseconds. - Weight::from_parts(91_791_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 71_337_000 picoseconds. + Weight::from_parts(80_807_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: `Registrar::Paras` (r:1 w:1) + /// Storage: `Registrar::Paras` (r:1 w:0) /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:1) /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -98,13 +100,13 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn assign_temp_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `673` - // Estimated: `4138` - // Minimum execution time: 68_091_000 picoseconds. - Weight::from_parts(77_310_000, 0) - .saturating_add(Weight::from_parts(0, 4138)) + // Measured: `730` + // Estimated: `4195` + // Minimum execution time: 60_188_000 picoseconds. + Weight::from_parts(63_932_000, 0) + .saturating_add(Weight::from_parts(0, 4195)) .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) + .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) @@ -118,11 +120,11 @@ impl polkadot_runtime_common::assigned_slots::WeightInf /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unassign_parachain_slot() -> Weight { // Proof Size summary in bytes: - // Measured: `823` - // Estimated: `4288` - // Minimum execution time: 38_081_000 picoseconds. - Weight::from_parts(40_987_000, 0) - .saturating_add(Weight::from_parts(0, 4288)) + // Measured: `856` + // Estimated: `4321` + // Minimum execution time: 35_764_000 picoseconds. + Weight::from_parts(38_355_000, 0) + .saturating_add(Weight::from_parts(0, 4321)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -132,8 +134,8 @@ impl polkadot_runtime_common::assigned_slots::WeightInf // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_182_000 picoseconds. - Weight::from_parts(7_437_000, 0) + // Minimum execution time: 4_634_000 picoseconds. + Weight::from_parts(4_852_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -143,8 +145,8 @@ impl polkadot_runtime_common::assigned_slots::WeightInf // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_153_000 picoseconds. - Weight::from_parts(7_456_000, 0) + // Minimum execution time: 4_563_000 picoseconds. + Weight::from_parts(4_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs index 897dc1c1752a5697f903b53a394237736fcc4c0c..acf2da8cab969c379e5e51b464740815a684099b 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_auctions.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::auctions` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::auctions // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_auctions.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_auctions.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,84 +58,82 @@ impl polkadot_runtime_common::auctions::WeightInfo for // Proof Size summary in bytes: // Measured: `4` // Estimated: `1493` - // Minimum execution time: 12_805_000 picoseconds. - Weight::from_parts(13_153_000, 0) + // Minimum execution time: 7_307_000 picoseconds. + Weight::from_parts(7_680_000, 0) .saturating_add(Weight::from_parts(0, 1493)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras ParaLifecycles (r:1 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:2 w:2) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:2 w:2) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn bid() -> Weight { // Proof Size summary in bytes: - // Measured: `728` + // Measured: `761` // Estimated: `6060` - // Minimum execution time: 77_380_000 picoseconds. - Weight::from_parts(80_503_000, 0) + // Minimum execution time: 75_448_000 picoseconds. + Weight::from_parts(78_716_000, 0) .saturating_add(Weight::from_parts(0, 6060)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Auctions AuctionInfo (r:1 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Babe NextRandomness (r:1 w:0) - /// Proof: Babe NextRandomness (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) - /// Storage: Babe EpochStart (r:1 w:0) - /// Proof: Babe EpochStart (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::NextRandomness` (r:1 w:0) + /// Proof: `Babe::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochStart` (r:1 w:0) + /// Proof: `Babe::EpochStart` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn on_initialize() -> Weight { // Proof Size summary in bytes: - // Measured: `6947789` + // Measured: `6947017` // Estimated: `15822990` - // Minimum execution time: 6_311_055_000 picoseconds. - Weight::from_parts(6_409_142_000, 0) + // Minimum execution time: 7_120_207_000 picoseconds. + Weight::from_parts(7_273_496_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) - .saturating_add(T::DbWeight::get().reads(3683)) - .saturating_add(T::DbWeight::get().writes(3678)) + .saturating_add(T::DbWeight::get().reads(3682)) + .saturating_add(T::DbWeight::get().writes(3677)) } - /// Storage: Auctions ReservedAmounts (r:37 w:36) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:36 w:36) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Auctions Winning (r:3600 w:3600) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions AuctionInfo (r:0 w:1) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:0 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) fn cancel_auction() -> Weight { // Proof Size summary in bytes: // Measured: `177732` // Estimated: `15822990` - // Minimum execution time: 4_849_561_000 picoseconds. - Weight::from_parts(4_955_226_000, 0) + // Minimum execution time: 5_536_281_000 picoseconds. + Weight::from_parts(5_675_163_000, 0) .saturating_add(Weight::from_parts(0, 15822990)) .saturating_add(T::DbWeight::get().reads(3673)) .saturating_add(T::DbWeight::get().writes(3673)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs index 8fbc798dbd46156c93e606b1ca669b2db706c613..3871310678ef8a81bb0eb62b735a49f88768cd0f 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_claims.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::claims` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::claims // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_claims.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_claims.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -67,100 +70,113 @@ impl polkadot_runtime_common::claims::WeightInfo for We // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 144_931_000 picoseconds. - Weight::from_parts(156_550_000, 0) + // Minimum execution time: 181_028_000 picoseconds. + Weight::from_parts(194_590_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:0 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:0 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:0 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:0 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:0 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:0 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) fn mint_claim() -> Weight { // Proof Size summary in bytes: // Measured: `216` // Estimated: `1701` - // Minimum execution time: 11_300_000 picoseconds. - Weight::from_parts(11_642_000, 0) + // Minimum execution time: 11_224_000 picoseconds. + Weight::from_parts(13_342_000, 0) .saturating_add(Weight::from_parts(0, 1701)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn claim_attest() -> Weight { // Proof Size summary in bytes: // Measured: `558` // Estimated: `4764` - // Minimum execution time: 149_112_000 picoseconds. - Weight::from_parts(153_872_000, 0) + // Minimum execution time: 187_964_000 picoseconds. + Weight::from_parts(202_553_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:1) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Claims (r:1 w:1) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Total (r:1 w:1) - /// Proof Skipped: Claims Total (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:1) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Vesting Vesting (r:1 w:1) - /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:0) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances Locks (r:1 w:1) - /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) - /// Storage: Balances Freezes (r:1 w:0) - /// Proof: Balances Freezes (max_values: None, max_size: Some(65), added: 2540, mode: MaxEncodedLen) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:1) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Claims` (r:1 w:1) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Total` (r:1 w:1) + /// Proof: `Claims::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:1) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) fn attest() -> Weight { // Proof Size summary in bytes: // Measured: `632` // Estimated: `4764` - // Minimum execution time: 69_619_000 picoseconds. - Weight::from_parts(79_242_000, 0) + // Minimum execution time: 78_210_000 picoseconds. + Weight::from_parts(84_581_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Claims Claims (r:1 w:2) - /// Proof Skipped: Claims Claims (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Vesting (r:1 w:2) - /// Proof Skipped: Claims Vesting (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Signing (r:1 w:2) - /// Proof Skipped: Claims Signing (max_values: None, max_size: None, mode: Measured) - /// Storage: Claims Preclaims (r:1 w:1) - /// Proof Skipped: Claims Preclaims (max_values: None, max_size: None, mode: Measured) + /// Storage: `Claims::Claims` (r:1 w:2) + /// Proof: `Claims::Claims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Vesting` (r:1 w:2) + /// Proof: `Claims::Vesting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:2) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Preclaims` (r:1 w:1) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) fn move_claim() -> Weight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `3905` - // Minimum execution time: 22_066_000 picoseconds. - Weight::from_parts(22_483_000, 0) + // Minimum execution time: 33_940_000 picoseconds. + Weight::from_parts(48_438_000, 0) .saturating_add(Weight::from_parts(0, 3905)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(7)) } + /// Storage: `Claims::Preclaims` (r:1 w:0) + /// Proof: `Claims::Preclaims` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Claims::Signing` (r:1 w:0) + /// Proof: `Claims::Signing` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn prevalidate_attests() -> Weight { + // Proof Size summary in bytes: + // Measured: `296` + // Estimated: `3761` + // Minimum execution time: 9_025_000 picoseconds. + Weight::from_parts(10_563_000, 0) + .saturating_add(Weight::from_parts(0, 3761)) + .saturating_add(T::DbWeight::get().reads(2)) + } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs index b75ff8d42e7e0949f89293fff1e7dd22f0cf8e1e..2a01de67acc5530397dd85cba784e07ddfeeda0d 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_crowdloan.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::crowdloan` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::crowdloan // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_crowdloan.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_crowdloan.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,158 +64,154 @@ impl polkadot_runtime_common::crowdloan::WeightInfo for // Proof Size summary in bytes: // Measured: `438` // Estimated: `3903` - // Minimum execution time: 50_399_000 picoseconds. - Weight::from_parts(51_641_000, 0) + // Minimum execution time: 46_095_000 picoseconds. + Weight::from_parts(48_111_000, 0) .saturating_add(Weight::from_parts(0, 3903)) .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: System Account (r:1 w:1) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:0) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:0) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn contribute() -> Weight { // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `3995` - // Minimum execution time: 128_898_000 picoseconds. - Weight::from_parts(130_277_000, 0) - .saturating_add(Weight::from_parts(0, 3995)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `563` + // Estimated: `4028` + // Minimum execution time: 133_059_000 picoseconds. + Weight::from_parts(136_515_000, 0) + .saturating_add(Weight::from_parts(0, 4028)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// Storage: Balances InactiveIssuance (r:1 w:1) - /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) - /// Storage: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) - /// Proof Skipped: unknown `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) fn withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `689` + // Measured: `687` // Estimated: `6196` - // Minimum execution time: 69_543_000 picoseconds. - Weight::from_parts(71_522_000, 0) + // Minimum execution time: 71_733_000 picoseconds. + Weight::from_parts(74_034_000, 0) .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Skipped Metadata (r:0 w:0) - /// Proof Skipped: Skipped Metadata (max_values: None, max_size: None, mode: Measured) + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `k` is `[0, 1000]`. fn refund(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `127 + k * (189 ±0)` - // Estimated: `140 + k * (189 ±0)` - // Minimum execution time: 50_735_000 picoseconds. - Weight::from_parts(52_282_000, 0) - .saturating_add(Weight::from_parts(0, 140)) - // Standard Error: 21_607 - .saturating_add(Weight::from_parts(38_955_985, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `125 + k * (189 ±0)` + // Estimated: `138 + k * (189 ±0)` + // Minimum execution time: 46_016_000 picoseconds. + Weight::from_parts(48_260_000, 0) + .saturating_add(Weight::from_parts(0, 138)) + // Standard Error: 21_140 + .saturating_add(Weight::from_parts(39_141_925, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 189).saturating_mul(k.into())) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:2 w:2) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn dissolve() -> Weight { // Proof Size summary in bytes: - // Measured: `515` + // Measured: `514` // Estimated: `6196` - // Minimum execution time: 43_100_000 picoseconds. - Weight::from_parts(44_272_000, 0) + // Minimum execution time: 44_724_000 picoseconds. + Weight::from_parts(47_931_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Crowdloan Funds (r:1 w:1) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) fn edit() -> Weight { // Proof Size summary in bytes: - // Measured: `235` - // Estimated: `3700` - // Minimum execution time: 18_702_000 picoseconds. - Weight::from_parts(19_408_000, 0) - .saturating_add(Weight::from_parts(0, 3700)) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 19_512_000 picoseconds. + Weight::from_parts(21_129_000, 0) + .saturating_add(Weight::from_parts(0, 3699)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) - /// Proof Skipped: unknown `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) fn add_memo() -> Weight { // Proof Size summary in bytes: // Measured: `412` // Estimated: `3877` - // Minimum execution time: 25_568_000 picoseconds. - Weight::from_parts(26_203_000, 0) + // Minimum execution time: 33_529_000 picoseconds. + Weight::from_parts(37_082_000, 0) .saturating_add(Weight::from_parts(0, 3877)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Crowdloan Funds (r:1 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn poke() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `3704` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_769_000, 0) - .saturating_add(Weight::from_parts(0, 3704)) + // Measured: `238` + // Estimated: `3703` + // Minimum execution time: 23_153_000 picoseconds. + Weight::from_parts(24_181_000, 0) + .saturating_add(Weight::from_parts(0, 3703)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Auctions AuctionInfo (r:1 w:0) - /// Proof: Auctions AuctionInfo (max_values: Some(1), max_size: Some(8), added: 503, mode: MaxEncodedLen) - /// Storage: Crowdloan EndingsCount (r:1 w:1) - /// Proof Skipped: Crowdloan EndingsCount (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan NewRaise (r:1 w:1) - /// Proof Skipped: Crowdloan NewRaise (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:100 w:0) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions AuctionCounter (r:1 w:0) - /// Proof: Auctions AuctionCounter (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: Paras ParaLifecycles (r:100 w:0) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:100 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Auctions Winning (r:1 w:1) - /// Proof: Auctions Winning (max_values: None, max_size: Some(1920), added: 4395, mode: MaxEncodedLen) - /// Storage: Auctions ReservedAmounts (r:100 w:100) - /// Proof: Auctions ReservedAmounts (max_values: None, max_size: Some(60), added: 2535, mode: MaxEncodedLen) - /// Storage: System Account (r:100 w:100) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:1) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:100 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:100 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:100 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:100 w:100) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// The range of component `n` is `[2, 100]`. fn on_initialize(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `197 + n * (356 ±0)` + // Measured: `229 + n * (356 ±0)` // Estimated: `5385 + n * (2832 ±0)` - // Minimum execution time: 128_319_000 picoseconds. - Weight::from_parts(130_877_000, 0) + // Minimum execution time: 120_164_000 picoseconds. + Weight::from_parts(3_390_119, 0) .saturating_add(Weight::from_parts(0, 5385)) - // Standard Error: 61_381 - .saturating_add(Weight::from_parts(60_209_202, 0).saturating_mul(n.into())) + // Standard Error: 41_727 + .saturating_add(Weight::from_parts(54_453_016, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs index 4ea6f67968017e312562842c5f2400754ac83b95..3df3c6c8dd92b71ea1e90dd88eeb4ac0e62a87b2 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_identity_migrator.rs @@ -1,36 +1,43 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Polkadot. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . //! Autogenerated weights for `runtime_common::identity_migrator` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot +// ./target/production/polkadot // benchmark // pallet // --chain=rococo-dev -// --steps=2 -// --repeat=1 +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::identity_migrator // --extrinsic=* -// --output=./migrator-release.rs +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,7 +51,7 @@ use core::marker::PhantomData; pub struct WeightInfo(PhantomData); impl polkadot_runtime_common::identity_migrator::WeightInfo for WeightInfo { /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) @@ -63,34 +70,34 @@ impl polkadot_runtime_common::identity_migrator::Weight /// The range of component `s` is `[0, 100]`. fn reap_identity(r: u32, s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` - // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` - // Minimum execution time: 163_756_000 picoseconds. - Weight::from_parts(158_982_500, 0) - .saturating_add(Weight::from_parts(0, 11003)) - // Standard Error: 1_143_629 - .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) - // Standard Error: 228_725 - .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + // Measured: `7457 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11037 + r * (7 ±0) + s * (32 ±0)` + // Minimum execution time: 157_343_000 picoseconds. + Weight::from_parts(159_289_236, 0) + .saturating_add(Weight::from_parts(0, 11037)) + // Standard Error: 16_439 + .saturating_add(Weight::from_parts(224_293, 0).saturating_mul(r.into())) + // Standard Error: 3_367 + .saturating_add(Weight::from_parts(1_383_637, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(6)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) - .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(s.into())) } /// Storage: `Identity::IdentityOf` (r:1 w:1) - /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Identity::SubsOf` (r:1 w:1) /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) fn poke_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `7229` - // Estimated: `11003` - // Minimum execution time: 137_570_000 picoseconds. - Weight::from_parts(137_570_000, 0) - .saturating_add(Weight::from_parts(0, 11003)) + // Measured: `7242` + // Estimated: `11037` + // Minimum execution time: 114_384_000 picoseconds. + Weight::from_parts(115_741_000, 0) + .saturating_add(Weight::from_parts(0, 11037)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs index 0ce09d1be2a46afebc421526bad324acef4bdeb6..ad261a7f7747b2fc1ae786a97dd53182114cc620 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_paras_registrar.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::paras_registrar` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::paras_registrar // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_paras_registrar.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_paras_registrar.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,167 +58,161 @@ impl polkadot_runtime_common::paras_registrar::WeightIn /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) fn reserve() -> Weight { // Proof Size summary in bytes: - // Measured: `97` - // Estimated: `3562` - // Minimum execution time: 29_948_000 picoseconds. - Weight::from_parts(30_433_000, 0) - .saturating_add(Weight::from_parts(0, 3562)) + // Measured: `96` + // Estimated: `3561` + // Minimum execution time: 24_109_000 picoseconds. + Weight::from_parts(24_922_000, 0) + .saturating_add(Weight::from_parts(0, 3561)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn register() -> Weight { // Proof Size summary in bytes: - // Measured: `616` - // Estimated: `4081` - // Minimum execution time: 6_332_113_000 picoseconds. - Weight::from_parts(6_407_158_000, 0) - .saturating_add(Weight::from_parts(0, 4081)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 7_207_580_000 picoseconds. + Weight::from_parts(7_298_567_000, 0) + .saturating_add(Weight::from_parts(0, 3817)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:0 w:1) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpcomingParasGenesis (r:0 w:1) - /// Proof Skipped: Paras UpcomingParasGenesis (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_register() -> Weight { // Proof Size summary in bytes: - // Measured: `533` - // Estimated: `3998` - // Minimum execution time: 6_245_403_000 picoseconds. - Weight::from_parts(6_289_575_000, 0) - .saturating_add(Weight::from_parts(0, 3998)) - .saturating_add(T::DbWeight::get().reads(8)) + // Measured: `269` + // Estimated: `3734` + // Minimum execution time: 7_196_460_000 picoseconds. + Weight::from_parts(7_385_729_000, 0) + .saturating_add(Weight::from_parts(0, 3734)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:0) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: Registrar PendingSwap (r:0 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `Registrar::PendingSwap` (r:0 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn deregister() -> Weight { // Proof Size summary in bytes: - // Measured: `476` - // Estimated: `3941` - // Minimum execution time: 49_822_000 picoseconds. - Weight::from_parts(50_604_000, 0) - .saturating_add(Weight::from_parts(0, 3941)) + // Measured: `499` + // Estimated: `3964` + // Minimum execution time: 54_761_000 picoseconds. + Weight::from_parts(57_931_000, 0) + .saturating_add(Weight::from_parts(0, 3964)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: Registrar Paras (r:1 w:0) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:2 w:2) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar PendingSwap (r:1 w:1) - /// Proof Skipped: Registrar PendingSwap (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Crowdloan Funds (r:2 w:2) - /// Proof Skipped: Crowdloan Funds (max_values: None, max_size: None, mode: Measured) - /// Storage: Slots Leases (r:2 w:2) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:2 w:2) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::PendingSwap` (r:1 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:2 w:2) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) fn swap() -> Weight { // Proof Size summary in bytes: - // Measured: `780` - // Estimated: `6720` - // Minimum execution time: 55_166_000 picoseconds. - Weight::from_parts(56_913_000, 0) - .saturating_add(Weight::from_parts(0, 6720)) + // Measured: `837` + // Estimated: `6777` + // Minimum execution time: 59_564_000 picoseconds. + Weight::from_parts(62_910_000, 0) + .saturating_add(Weight::from_parts(0, 6777)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(8)) } - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:1 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// The range of component `b` is `[1, 3145728]`. + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[9, 3145728]`. fn schedule_code_upgrade(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `464` - // Estimated: `3929` - // Minimum execution time: 43_650_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 3929)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_041, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(10)) + // Measured: `201` + // Estimated: `3666` + // Minimum execution time: 33_106_000 picoseconds. + Weight::from_parts(33_526_000, 0) + .saturating_add(Weight::from_parts(0, 3666)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `b` is `[1, 1048576]`. fn set_current_head(b: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_666_000 picoseconds. - Weight::from_parts(8_893_000, 0) + // Minimum execution time: 5_992_000 picoseconds. + Weight::from_parts(12_059_689, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(855, 0).saturating_mul(b.into())) + // Standard Error: 0 + .saturating_add(Weight::from_parts(959, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs index 8c601aa8486f86ad702c94dfa6ef69ba46407dbf..b99ee1f9a0d30cd98c414cdf40b1a5e89142b9e5 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_common_slots.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_common::slots` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_common::slots // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_common_slots.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_slots.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,80 +56,76 @@ impl polkadot_runtime_common::slots::WeightInfo for Wei /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) fn force_lease() -> Weight { // Proof Size summary in bytes: - // Measured: `287` - // Estimated: `3752` - // Minimum execution time: 29_932_000 picoseconds. - Weight::from_parts(30_334_000, 0) - .saturating_add(Weight::from_parts(0, 3752)) + // Measured: `320` + // Estimated: `3785` + // Minimum execution time: 26_570_000 picoseconds. + Weight::from_parts(27_619_000, 0) + .saturating_add(Weight::from_parts(0, 3785)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Paras Parachains (r:1 w:0) - /// Proof Skipped: Paras Parachains (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Slots Leases (r:101 w:100) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:200 w:200) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:100 w:100) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Parachains` (r:1 w:0) + /// Proof: `Paras::Parachains` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:101 w:100) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:200 w:200) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[0, 100]`. /// The range of component `t` is `[0, 100]`. fn manage_lease_period_start(c: u32, t: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `26 + c * (47 ±0) + t * (308 ±0)` - // Estimated: `2800 + c * (2526 ±0) + t * (2789 ±0)` - // Minimum execution time: 634_547_000 picoseconds. - Weight::from_parts(643_045_000, 0) - .saturating_add(Weight::from_parts(0, 2800)) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(2_705_219, 0).saturating_mul(c.into())) - // Standard Error: 81_521 - .saturating_add(Weight::from_parts(11_464_132, 0).saturating_mul(t.into())) + // Measured: `594 + c * (20 ±0) + t * (234 ±0)` + // Estimated: `4065 + c * (2496 ±0) + t * (2709 ±0)` + // Minimum execution time: 729_793_000 picoseconds. + Weight::from_parts(740_820_000, 0) + .saturating_add(Weight::from_parts(0, 4065)) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(2_793_142, 0).saturating_mul(c.into())) + // Standard Error: 88_206 + .saturating_add(Weight::from_parts(8_933_065, 0).saturating_mul(t.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(t.into()))) - .saturating_add(Weight::from_parts(0, 2526).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2789).saturating_mul(t.into())) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2709).saturating_mul(t.into())) } - /// Storage: Slots Leases (r:1 w:1) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: System Account (r:8 w:8) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn clear_all_leases() -> Weight { // Proof Size summary in bytes: - // Measured: `2759` + // Measured: `2792` // Estimated: `21814` - // Minimum execution time: 129_756_000 picoseconds. - Weight::from_parts(131_810_000, 0) + // Minimum execution time: 123_888_000 picoseconds. + Weight::from_parts(131_245_000, 0) .saturating_add(Weight::from_parts(0, 21814)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(9)) } - /// Storage: Slots Leases (r:1 w:0) - /// Proof Skipped: Slots Leases (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras ParaLifecycles (r:1 w:1) - /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) - /// Storage: Registrar Paras (r:1 w:1) - /// Proof Skipped: Registrar Paras (max_values: None, max_size: None, mode: Measured) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn trigger_onboard() -> Weight { // Proof Size summary in bytes: - // Measured: `707` - // Estimated: `4172` - // Minimum execution time: 29_527_000 picoseconds. - Weight::from_parts(30_055_000, 0) - .saturating_add(Weight::from_parts(0, 4172)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `612` + // Estimated: `4077` + // Minimum execution time: 27_341_000 picoseconds. + Weight::from_parts(28_697_000, 0) + .saturating_add(Weight::from_parts(0, 4077)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs index 5592a85c90fab339f57ca8d52f2e8e5330aa1813..3ca49aaa1651b23ca37ed90fdb4d754300e20cd7 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_configuration.rs @@ -17,25 +17,27 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::configuration // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=runtime_parachains::configuration -// --chain=rococo-dev // --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +60,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_789_000 picoseconds. - Weight::from_parts(8_269_000, 0) + // Minimum execution time: 7_689_000 picoseconds. + Weight::from_parts(8_089_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -74,8 +76,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_851_000 picoseconds. - Weight::from_parts(8_152_000, 0) + // Minimum execution time: 7_735_000 picoseconds. + Weight::from_parts(8_150_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -90,8 +92,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_960_000 picoseconds. - Weight::from_parts(8_276_000, 0) + // Minimum execution time: 7_902_000 picoseconds. + Weight::from_parts(8_196_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -116,8 +118,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_912_000 picoseconds. - Weight::from_parts(8_164_000, 0) + // Minimum execution time: 7_634_000 picoseconds. + Weight::from_parts(7_983_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -132,8 +134,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_782_000 picoseconds. - Weight::from_parts(10_373_000, 0) + // Minimum execution time: 9_580_000 picoseconds. + Weight::from_parts(9_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -148,8 +150,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_870_000 picoseconds. - Weight::from_parts(8_274_000, 0) + // Minimum execution time: 7_787_000 picoseconds. + Weight::from_parts(8_008_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -164,8 +166,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 9_960_000 picoseconds. - Weight::from_parts(10_514_000, 0) + // Minimum execution time: 9_557_000 picoseconds. + Weight::from_parts(9_994_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +182,8 @@ impl polkadot_runtime_parachains::configuration::Weight // Proof Size summary in bytes: // Measured: `151` // Estimated: `1636` - // Minimum execution time: 7_913_000 picoseconds. - Weight::from_parts(8_338_000, 0) + // Minimum execution time: 7_775_000 picoseconds. + Weight::from_parts(7_989_000, 0) .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs index a20515502b1910ccb8eaf6477d3c51e00db9064e..6f86d6a125996ab5fa6473af24f93a238b9e699e 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_disputes.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::disputes` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::disputes // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_disputes.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_disputes.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,8 +56,8 @@ impl polkadot_runtime_parachains::disputes::WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_937_000 picoseconds. - Weight::from_parts(3_082_000, 0) + // Minimum execution time: 1_855_000 picoseconds. + Weight::from_parts(2_015_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs index 6065c32b17411fe60ba4797fc9d34f224867abdd..b915c4ec0f362f3089157a7147cb1e78107993ae 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_initializer.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::initializer` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::initializer // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_initializer.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_initializer.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,11 +57,11 @@ impl polkadot_runtime_parachains::initializer::WeightIn // Proof Size summary in bytes: // Measured: `0 + d * (11 ±0)` // Estimated: `1480 + d * (11 ±0)` - // Minimum execution time: 3_771_000 picoseconds. - Weight::from_parts(6_491_437, 0) + // Minimum execution time: 2_634_000 picoseconds. + Weight::from_parts(2_728_000, 0) .saturating_add(Weight::from_parts(0, 1480)) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_356, 0).saturating_mul(d.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(2_499, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(Weight::from_parts(0, 11).saturating_mul(d.into())) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs index 0c36eeaf7d4506c0d4d6045b4720faf8ae4ae437..1dd62d129f9a0c5e06a67e5b949ab21decc0d260 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs @@ -23,12 +23,18 @@ //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_parachains::assigner_on_demand // --extrinsic=* +// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs index 2dcabb7c36bbdf994e2460b320566efca709facc..c463552b6ad4f51514f7f68d10ea47400a3e1081 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // ./target/production/polkadot @@ -29,12 +29,15 @@ // --chain=rococo-dev // --steps=50 // --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares // --pallet=runtime_parachains::paras // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_parachains_paras.rs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,230 +67,231 @@ impl polkadot_runtime_parachains::paras::WeightInfo for // Proof Size summary in bytes: // Measured: `8309` // Estimated: `11774` - // Minimum execution time: 31_941_000 picoseconds. - Weight::from_parts(32_139_000, 0) + // Minimum execution time: 27_488_000 picoseconds. + Weight::from_parts(27_810_000, 0) .saturating_add(Weight::from_parts(0, 11774)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_011, 0).saturating_mul(c.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(2_189, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_set_current_head(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_275_000 picoseconds. - Weight::from_parts(8_321_000, 0) + // Minimum execution time: 5_793_000 picoseconds. + Weight::from_parts(7_987_606, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) + // Standard Error: 1 + .saturating_add(Weight::from_parts(971, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: Paras Heads (r:0 w:1) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_set_most_recent_context() -> Weight { - Weight::from_parts(10_155_000, 0) - // Standard Error: 0 - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_733_000 picoseconds. + Weight::from_parts(2_954_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeHash (r:1 w:1) - /// Proof Skipped: Paras FutureCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CurrentCodeHash (r:1 w:0) - /// Proof Skipped: Paras CurrentCodeHash (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeCooldowns (r:1 w:1) - /// Proof Skipped: Paras UpgradeCooldowns (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:1 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras CodeByHashRefs (r:1 w:1) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeRestrictionSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeRestrictionSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn force_schedule_code_upgrade(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `8715` - // Estimated: `12180` - // Minimum execution time: 49_923_000 picoseconds. - Weight::from_parts(50_688_000, 0) - .saturating_add(Weight::from_parts(0, 12180)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_976, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `8452` + // Estimated: `11917` + // Minimum execution time: 6_072_000 picoseconds. + Weight::from_parts(6_128_000, 0) + .saturating_add(Weight::from_parts(0, 11917)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) } - /// Storage: Paras FutureCodeUpgrades (r:1 w:0) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras Heads (r:0 w:1) - /// Proof Skipped: Paras Heads (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras UpgradeGoAheadSignal (r:0 w:1) - /// Proof Skipped: Paras UpgradeGoAheadSignal (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 1048576]`. fn force_note_new_head(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `3560` - // Minimum execution time: 14_408_000 picoseconds. - Weight::from_parts(14_647_000, 0) - .saturating_add(Weight::from_parts(0, 3560)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(858, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `163` + // Estimated: `3628` + // Minimum execution time: 15_166_000 picoseconds. + Weight::from_parts(21_398_053, 0) + .saturating_add(Weight::from_parts(0, 3628)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(976, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_queue_action() -> Weight { // Proof Size summary in bytes: - // Measured: `4288` - // Estimated: `7753` - // Minimum execution time: 20_009_000 picoseconds. - Weight::from_parts(20_518_000, 0) - .saturating_add(Weight::from_parts(0, 7753)) + // Measured: `4312` + // Estimated: `7777` + // Minimum execution time: 16_345_000 picoseconds. + Weight::from_parts(16_712_000, 0) + .saturating_add(Weight::from_parts(0, 7777)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `c` is `[1, 3145728]`. fn add_trusted_validation_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `946` - // Estimated: `4411` - // Minimum execution time: 80_626_000 picoseconds. - Weight::from_parts(52_721_755, 0) - .saturating_add(Weight::from_parts(0, 4411)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(1_443, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `683` + // Estimated: `4148` + // Minimum execution time: 78_076_000 picoseconds. + Weight::from_parts(123_193_814, 0) + .saturating_add(Weight::from_parts(0, 4148)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_770, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: Paras CodeByHashRefs (r:1 w:0) - /// Proof Skipped: Paras CodeByHashRefs (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras CodeByHash (r:0 w:1) - /// Proof Skipped: Paras CodeByHash (max_values: None, max_size: None, mode: Measured) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:0) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) fn poke_unused_validation_code() -> Weight { // Proof Size summary in bytes: // Measured: `28` // Estimated: `3493` - // Minimum execution time: 6_692_000 picoseconds. - Weight::from_parts(7_009_000, 0) + // Minimum execution time: 5_184_000 picoseconds. + Weight::from_parts(5_430_000, 0) .saturating_add(Weight::from_parts(0, 3493)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 87_994_000 picoseconds. - Weight::from_parts(89_933_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 102_995_000 picoseconds. + Weight::from_parts(108_977_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras UpcomingUpgrades (r:1 w:1) - /// Proof Skipped: Paras UpcomingUpgrades (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: System Digest (r:1 w:1) - /// Proof Skipped: System Digest (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras FutureCodeUpgrades (r:0 w:100) - /// Proof Skipped: Paras FutureCodeUpgrades (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingUpgrades` (r:1 w:1) + /// Proof: `Paras::UpcomingUpgrades` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:0 w:100) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `27523` - // Estimated: `30988` - // Minimum execution time: 783_222_000 picoseconds. - Weight::from_parts(794_959_000, 0) - .saturating_add(Weight::from_parts(0, 30988)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `27360` + // Estimated: `30825` + // Minimum execution time: 709_433_000 picoseconds. + Weight::from_parts(725_074_000, 0) + .saturating_add(Weight::from_parts(0, 30825)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(104)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_upgrade_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `27214` - // Estimated: `30679` - // Minimum execution time: 87_424_000 picoseconds. - Weight::from_parts(88_737_000, 0) - .saturating_add(Weight::from_parts(0, 30679)) + // Measured: `27338` + // Estimated: `30803` + // Minimum execution time: 98_973_000 picoseconds. + Weight::from_parts(104_715_000, 0) + .saturating_add(Weight::from_parts(0, 30803)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteList (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteList (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras ActionsQueue (r:1 w:1) - /// Proof Skipped: Paras ActionsQueue (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_accept() -> Weight { // Proof Size summary in bytes: - // Measured: `26991` - // Estimated: `30456` - // Minimum execution time: 612_485_000 picoseconds. - Weight::from_parts(621_670_000, 0) - .saturating_add(Weight::from_parts(0, 30456)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `26728` + // Estimated: `30193` + // Minimum execution time: 550_958_000 picoseconds. + Weight::from_parts(564_497_000, 0) + .saturating_add(Weight::from_parts(0, 30193)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: ParasShared ActiveValidatorKeys (r:1 w:0) - /// Proof Skipped: ParasShared ActiveValidatorKeys (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Paras PvfActiveVoteMap (r:1 w:1) - /// Proof Skipped: Paras PvfActiveVoteMap (max_values: None, max_size: None, mode: Measured) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) fn include_pvf_check_statement_finalize_onboarding_reject() -> Weight { // Proof Size summary in bytes: - // Measured: `26682` - // Estimated: `30147` - // Minimum execution time: 86_673_000 picoseconds. - Weight::from_parts(87_424_000, 0) - .saturating_add(Weight::from_parts(0, 30147)) + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 97_088_000 picoseconds. + Weight::from_parts(103_617_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs index b7b3d12d4d92c301c4b4a8754890676d3ca291d6..71a0bb6fc7b2dae1ce238f3a94a77e54058abca1 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_paras_inherent.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -70,23 +72,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `8967` - // Estimated: `12432` - // Minimum execution time: 144_751_000 picoseconds. - Weight::from_parts(153_966_000, 0) - .saturating_add(Weight::from_parts(0, 12432)) + // Measured: `42760` + // Estimated: `46225` + // Minimum execution time: 228_252_000 picoseconds. + Weight::from_parts(234_368_000, 0) + .saturating_add(Weight::from_parts(0, 46225)) .saturating_add(T::DbWeight::get().reads(15)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -94,10 +94,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -128,16 +130,14 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::Heads` (r:0 w:1) @@ -146,19 +146,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[10, 200]`. + /// The range of component `v` is `[400, 1024]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `67786` - // Estimated: `73726 + v * (23 ±0)` - // Minimum execution time: 972_311_000 picoseconds. - Weight::from_parts(645_559_304, 0) - .saturating_add(Weight::from_parts(0, 73726)) - // Standard Error: 53_320 - .saturating_add(Weight::from_parts(41_795_493, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(25)) - .saturating_add(T::DbWeight::get().writes(15)) - .saturating_add(Weight::from_parts(0, 23).saturating_mul(v.into())) + // Measured: `203155` + // Estimated: `209095` + // Minimum execution time: 17_510_015_000 picoseconds. + Weight::from_parts(948_178_084, 0) + .saturating_add(Weight::from_parts(0, 209095)) + // Standard Error: 16_345 + .saturating_add(Weight::from_parts(41_627_958, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().writes(16)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -166,10 +165,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -182,25 +183,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `42374` - // Estimated: `48314` - // Minimum execution time: 361_262_000 picoseconds. - Weight::from_parts(370_617_000, 0) - .saturating_add(Weight::from_parts(0, 48314)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `76066` + // Estimated: `82006` + // Minimum execution time: 501_266_000 picoseconds. + Weight::from_parts(517_989_000, 0) + .saturating_add(Weight::from_parts(0, 82006)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -208,10 +205,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -236,12 +235,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:0) @@ -252,6 +247,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -262,18 +259,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[101, 200]`. + /// The range of component `v` is `[2, 3]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `42830` - // Estimated: `48770` - // Minimum execution time: 1_322_051_000 picoseconds. - Weight::from_parts(1_379_846_608, 0) - .saturating_add(Weight::from_parts(0, 48770)) - // Standard Error: 19_959 - .saturating_add(Weight::from_parts(24_630, 0).saturating_mul(v.into())) + // Measured: `76842` + // Estimated: `82782` + // Minimum execution time: 1_861_799_000 picoseconds. + Weight::from_parts(1_891_155_030, 0) + .saturating_add(Weight::from_parts(0, 82782)) + // Standard Error: 2_415_944 + .saturating_add(Weight::from_parts(7_924_189, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(26)) - .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(T::DbWeight::get().writes(14)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -281,10 +278,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -309,12 +308,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::FutureCodeHash` (r:1 w:0) @@ -329,6 +324,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -341,12 +338,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `42843` - // Estimated: `48783` - // Minimum execution time: 37_550_515_000 picoseconds. - Weight::from_parts(37_886_489_000, 0) - .saturating_add(Weight::from_parts(0, 48783)) + // Measured: `76855` + // Estimated: `82795` + // Minimum execution time: 37_682_370_000 picoseconds. + Weight::from_parts(41_118_445_000, 0) + .saturating_add(Weight::from_parts(0, 82795)) .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(15)) + .saturating_add(T::DbWeight::get().writes(14)) } } diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs new file mode 100644 index 0000000000000000000000000000000000000000..d068f07e7594a80a9b728e0776649e52f88a7bf5 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_common::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=runtime_common::coretime +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/runtime_common_coretime.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::coretime`. +pub struct WeightInfo(PhantomData); +impl runtime_common::coretime::WeightInfo for WeightInfo { + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_core_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_543_000 picoseconds. + Weight::from_parts(7_745_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 9_367_000 picoseconds. + Weight::from_parts(9_932_305, 0) + .saturating_add(Weight::from_parts(0, 3645)) + // Standard Error: 231 + .saturating_add(Weight::from_parts(12_947, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index ac379b69e3f2ca99f22a7ed336b09b6193342fec..90a0285cd17bc82f6de361c1a5cf208219919fb1 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -144,6 +144,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 6ba8e6ad31825c733c287d9ec8f08d5dc71ba162..9e7ee488af72d8b9993a307997066702a6e7591a 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -31,11 +31,11 @@ use codec::Encode; use pallet_transaction_payment::FungibleAdapter; use polkadot_runtime_parachains::{ - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, - configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, disputes as parachains_disputes, - disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, - inclusion as parachains_inclusion, initializer as parachains_initializer, + assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, + disputes as parachains_disputes, disputes::slashing as parachains_slashing, + dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, + initializer as parachains_initializer, on_demand as parachains_on_demand, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, runtime_api_impl::v11 as runtime_impl, scheduler as parachains_scheduler, session_info as parachains_session_info, @@ -49,8 +49,10 @@ use frame_election_provider_support::{ use frame_support::{ construct_runtime, derive_impl, genesis_builder_helper::{build_state, get_preset}, + pallet_prelude::Get, parameter_types, traits::{KeyOwnerProofSystem, WithdrawReasons}, + PalletId, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; @@ -82,8 +84,8 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, OpaqueKeys, - SaturatedConversion, StaticLookup, Verify, + BlakeTwo256, Block as BlockT, ConvertInto, OpaqueKeys, SaturatedConversion, StaticLookup, + Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, @@ -92,6 +94,7 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; +use xcm::v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, XcmHash}; pub use pallet_balances::Call as BalancesCall; #[cfg(feature = "std")] @@ -167,14 +170,34 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: Self::RuntimeCall, extension: Self::Extension) -> Self::Extrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + parameter_types! { pub storage EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS as u64; pub storage ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -251,6 +274,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = frame_support::weights::ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = (); } parameter_types! { @@ -395,18 +419,20 @@ impl frame_system::offchain::CreateSignedTransaction for R where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let current_block = System::block_number().saturated_into::().saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -418,16 +444,18 @@ where frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = Indices::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } @@ -534,7 +562,7 @@ impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); - type CoretimeOnNewSession = (); + type CoretimeOnNewSession = Coretime; } impl parachains_session_info::Config for Runtime { @@ -552,15 +580,26 @@ impl parachains_paras::Config for Runtime { type QueueFootprinter = ParaInclusion; type NextSessionRotation = Babe; type OnNewHead = (); - type AssignCoretime = (); + type AssignCoretime = CoretimeAssignmentProvider; } parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); +} + +pub struct BrokerPot; +impl Get for BrokerPot { + fn get() -> InteriorLocation { + unimplemented!() + } } parameter_types! { pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * 5; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); } impl parachains_dmp::Config for Runtime {} @@ -582,10 +621,48 @@ impl parachains_hrmp::Config for Runtime { type WeightInfo = parachains_hrmp::TestWeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} +impl parachains_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = parachains_on_demand::TestWeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; +} + +impl parachains_assigner_coretime::Config for Runtime {} impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = ParaAssignmentProvider; + type AssignmentProvider = CoretimeAssignmentProvider; +} + +pub struct DummyXcmSender; +impl SendXcm for DummyXcmSender { + type Ticket = (); + fn validate( + _: &mut Option, + _: &mut Option>, + ) -> SendResult { + Ok(((), Assets::new())) + } + + /// Actually carry out the delivery operation for a previously validated message sending. + fn deliver(_ticket: Self::Ticket) -> Result { + Ok([0u8; 32]) + } +} + +impl coretime::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = pallet_balances::Pallet; + type BrokerId = BrokerId; + type WeightInfo = crate::coretime::TestWeightInfo; + type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; + type BrokerPotLocation = BrokerPot; + type AssetTransactor = (); + type AccountToLocation = (); } impl paras_sudo_wrapper::Config for Runtime {} @@ -728,7 +805,9 @@ construct_runtime! { Xcm: pallet_xcm, ParasDisputes: parachains_disputes, ParasSlashing: parachains_slashing, - ParaAssignmentProvider: parachains_assigner_parachains, + OnDemandAssignmentProvider: parachains_on_demand, + CoretimeAssignmentProvider: parachains_assigner_coretime, + Coretime: coretime, Sudo: pallet_sudo, @@ -746,8 +825,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -759,7 +838,10 @@ pub type SignedExtra = ( ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -770,7 +852,7 @@ pub type Executive = frame_executive::Executive< AllPalletsWithSystem, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; pub type Hash = ::Hash; pub type Extrinsic = ::Extrinsic; diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 31c04c26ece7b9db20eb525a29f1fec92a21b7b1..fcb5719de895513e0b11f71f5d210903c5d96e04 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -277,6 +277,7 @@ runtime-benchmarks = [ "pallet-state-trie-migration/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs index 621ef38f0b75c8cde37714745ccef6a937dddb51..a9e22898ddb45ebd15dade033c39a98489f33b2e 100644 --- a/polkadot/runtime/westend/src/genesis_config_presets.rs +++ b/polkadot/runtime/westend/src/genesis_config_presets.rs @@ -23,6 +23,7 @@ use crate::{ #[cfg(not(feature = "std"))] use alloc::format; use alloc::{vec, vec::Vec}; +use frame_support::build_struct_json_patch; use pallet_staking::{Forcing, StakerStatus}; use polkadot_primitives::{AccountId, AssignmentId, SchedulerParams, ValidatorId}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -170,7 +171,7 @@ fn westend_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * WND; const STASH: u128 = 100 * WND; - let config = RuntimeGenesisConfig { + build_struct_json_patch!(RuntimeGenesisConfig { balances: BalancesConfig { balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), }, @@ -192,7 +193,6 @@ fn westend_testnet_genesis( ) }) .collect::>(), - ..Default::default() }, staking: StakingConfig { minimum_validator_count: 1, @@ -204,19 +204,12 @@ fn westend_testnet_genesis( invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), force_era: Forcing::NotForcing, slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() }, - babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, sudo: SudoConfig { key: Some(root_key) }, configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, - registrar: RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() - }, - ..Default::default() - }; - - serde_json::to_value(config).expect("Could not build genesis config.") + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, + }) } // staging_testnet diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 0216ccaf4917317db42e0ce32620b2f7d8f764ea..4941d91df57d087e56681f34e6d4a7043d17294c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -97,8 +97,8 @@ use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, IdentityLookup, Keccak256, + OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Percent, Permill, @@ -107,7 +107,10 @@ use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use xcm::{latest::prelude::*, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}; +use xcm::{ + latest::prelude::*, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; use xcm_builder::PayOverXcm; use xcm_runtime_apis::{ @@ -218,6 +221,7 @@ impl frame_system::Config for Runtime { type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; type SS58Prefix = SS58Prefix; type MaxConsumers = frame_support::traits::ConstU32<16>; } @@ -480,6 +484,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; } parameter_types! { @@ -853,18 +858,44 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + /// Submits a transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -876,7 +907,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let tip = 0; - let extra: SignedExtra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -889,30 +920,28 @@ where frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), frame_metadata_hash_extension::CheckMetadataHash::::new(true), - ); - let raw_payload = SignedPayload::new(call, extra) + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; - let (call, extra, _) = raw_payload.deconstruct(); + let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, extra))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; -} - -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = UncheckedExtrinsic; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -1742,8 +1771,8 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// `BlockId` type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( +/// The extension to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -1779,12 +1808,19 @@ pub mod migrations { MaxAgentsToMigrate, >, parachains_shared::migration::MigrateToV1, + parachains_scheduler::migration::MigrateV2ToV3, + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, ); } /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1795,7 +1831,7 @@ pub type Executive = frame_executive::Executive< Migrations, >; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; #[cfg(feature = "runtime-benchmarks")] mod benches { @@ -1844,7 +1880,9 @@ mod benches { [pallet_staking, Staking] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] [pallet_treasury, Treasury] [pallet_utility, Utility] [pallet_vesting, Vesting] @@ -2508,6 +2546,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; @@ -2536,6 +2575,7 @@ sp_api::impl_runtime_apis! { use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} @@ -2756,4 +2796,13 @@ sp_api::impl_runtime_apis! { genesis_config_presets::preset_names() } } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) + } + } } diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index c1b396a4cd2f6723e851ba0e0b653fd0c3da9e11..02fd6b61496baae0a9acd266397e2b6573e1535b 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -68,7 +68,7 @@ fn sanity_check_teleport_assets_weight() { weight_limit: Unlimited, } .get_dispatch_info() - .weight; + .call_weight; assert!((weight * 50).all_lt(BlockWeights::get().max_block)); } diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..048f23fbcb91329eadfd721dd2ae63e1f218a89e --- /dev/null +++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=frame-system-extensions +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 75_764_000 picoseconds. + Weight::from_parts(85_402_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 118_233_000 picoseconds. + Weight::from_parts(126_539_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 118_233_000 picoseconds. + Weight::from_parts(126_539_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_885_000 picoseconds. + Weight::from_parts(12_784_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 104_237_000 picoseconds. + Weight::from_parts(110_910_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_141_000 picoseconds. + Weight::from_parts(11_502_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_192_000 picoseconds. + Weight::from_parts(11_481_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 87_616_000 picoseconds. + Weight::from_parts(93_607_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index ae11f6ccba41852cb16a7dddb911cdd4cdcab19f..8c12c1adb9caf02b4802fe53118fffc2e63ce9a2 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -17,6 +17,7 @@ pub mod frame_election_provider_support; pub mod frame_system; +pub mod frame_system_extensions; pub mod pallet_asset_rate; pub mod pallet_bags_list; pub mod pallet_balances; @@ -39,6 +40,7 @@ pub mod pallet_session; pub mod pallet_staking; pub mod pallet_sudo; pub mod pallet_timestamp; +pub mod pallet_transaction_payment; pub mod pallet_treasury; pub mod pallet_utility; pub mod pallet_vesting; diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index e9ab3ad37a4cca6483dc7fdeb98562dd6f1937e6..649c43e031dc4a194bed6564e8e01dc823890c8a 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -94,4 +94,15 @@ impl pallet_sudo::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 2_875_000 picoseconds. + Weight::from_parts(6_803_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000000000000000000000000000000..71a01b6a0c2ea8e12099765d826bd5a550affd16 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,68 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet-transaction-payment +// --chain=westend-dev +// --output=./polkadot/runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `320` + // Estimated: `3593` + // Minimum execution time: 569_518_000 picoseconds. + Weight::from_parts(590_438_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs index 32f6f28f242621f1cf496046fbdf94f3ba7b819c..36aafc1d2f2a7f97becebd367bddffec67dcdf6f 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_paras_inherent.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dr4vwrkf-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -54,10 +54,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -70,23 +72,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `37553` - // Estimated: `41018` - // Minimum execution time: 237_414_000 picoseconds. - Weight::from_parts(245_039_000, 0) - .saturating_add(Weight::from_parts(0, 41018)) + // Measured: `37559` + // Estimated: `41024` + // Minimum execution time: 217_257_000 picoseconds. + Weight::from_parts(228_878_000, 0) + .saturating_add(Weight::from_parts(0, 41024)) .saturating_add(T::DbWeight::get().reads(15)) - .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -94,10 +94,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -134,16 +136,14 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::Heads` (r:0 w:1) @@ -152,19 +152,18 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::MostRecentContext` (r:0 w:1) /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `v` is `[10, 1024]`. + /// The range of component `v` is `[400, 1024]`. fn enter_variable_disputes(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `199504` - // Estimated: `205444 + v * (5 ±0)` - // Minimum execution time: 1_157_489_000 picoseconds. - Weight::from_parts(629_243_559, 0) - .saturating_add(Weight::from_parts(0, 205444)) - // Standard Error: 10_997 - .saturating_add(Weight::from_parts(50_752_930, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(28)) - .saturating_add(T::DbWeight::get().writes(16)) - .saturating_add(Weight::from_parts(0, 5).saturating_mul(v.into())) + // Measured: `117547` + // Estimated: `123487` + // Minimum execution time: 21_077_090_000 picoseconds. + Weight::from_parts(703_350_265, 0) + .saturating_add(Weight::from_parts(0, 123487)) + // Standard Error: 21_944 + .saturating_add(Weight::from_parts(51_197_317, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(17)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -172,10 +171,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -188,25 +189,21 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn enter_bitfields() -> Weight { // Proof Size summary in bytes: - // Measured: `75131` - // Estimated: `81071` - // Minimum execution time: 466_928_000 picoseconds. - Weight::from_parts(494_342_000, 0) - .saturating_add(Weight::from_parts(0, 81071)) - .saturating_add(T::DbWeight::get().reads(17)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `74967` + // Estimated: `80907` + // Minimum execution time: 487_605_000 picoseconds. + Weight::from_parts(506_014_000, 0) + .saturating_add(Weight::from_parts(0, 80907)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -214,10 +211,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -248,12 +247,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::ParaLifecycles` (r:1 w:0) @@ -264,6 +259,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -277,15 +274,15 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// The range of component `v` is `[2, 5]`. fn enter_backed_candidates_variable(v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `76369` - // Estimated: `82309` - // Minimum execution time: 1_468_919_000 picoseconds. - Weight::from_parts(1_433_315_477, 0) - .saturating_add(Weight::from_parts(0, 82309)) - // Standard Error: 419_886 - .saturating_add(Weight::from_parts(42_880_485, 0).saturating_mul(v.into())) + // Measured: `76491` + // Estimated: `82431` + // Minimum execution time: 1_496_985_000 picoseconds. + Weight::from_parts(1_466_448_265, 0) + .saturating_add(Weight::from_parts(0, 82431)) + // Standard Error: 403_753 + .saturating_add(Weight::from_parts(44_015_233, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(29)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().writes(15)) } /// Storage: `ParaInherent::Included` (r:1 w:1) /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) @@ -293,10 +290,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::AvailabilityCores` (r:1 w:1) - /// Proof: `ParaScheduler::AvailabilityCores` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) @@ -327,12 +326,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) - /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) - /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) - /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Paras::FutureCodeHash` (r:1 w:0) @@ -347,6 +342,8 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `Session::DisabledValidators` (r:1 w:0) /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `ParasDisputes::Included` (r:0 w:1) /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) @@ -359,12 +356,12 @@ impl polkadot_runtime_parachains::paras_inherent::Weigh /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) fn enter_backed_candidate_code_upgrade() -> Weight { // Proof Size summary in bytes: - // Measured: `76382` - // Estimated: `82322` - // Minimum execution time: 34_577_233_000 picoseconds. - Weight::from_parts(39_530_352_000, 0) - .saturating_add(Weight::from_parts(0, 82322)) + // Measured: `76504` + // Estimated: `82444` + // Minimum execution time: 40_136_167_000 picoseconds. + Weight::from_parts(41_572_376_000, 0) + .saturating_add(Weight::from_parts(0, 82444)) .saturating_add(T::DbWeight::get().reads(31)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().writes(15)) } } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs index 7cb230f6e0066fa775732ff5feebffddbc46194e..f8a1826b8ab15bf00c1bd971b1a192b856dd67b4 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -168,7 +168,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs index a31e664d8216006982ccb0be1625c6a1ecc80a27..e7b602df7338beffb9859c2196d2b046773824d0 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -142,7 +142,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 40a7da58a687c186664404e0a00734b457b60e51..f1ec3f604d7b27158596ac8ee95b1708e1eec8a5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -121,7 +121,7 @@ benchmarks! { let instruction = Instruction::Transact { origin_kind: OriginKind::SovereignAccount, - require_weight_at_most: noop_call.get_dispatch_info().weight, + require_weight_at_most: noop_call.get_dispatch_info().call_weight, call: double_encoded_noop_call, }; let xcm = Xcm(vec![instruction]); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 210b8f65637797966261e69dc2e5453a6fccb467..5f8482bdcb8cf089397e8ff22c96f3cdc93e42cf 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -62,7 +62,7 @@ const SEED: u32 = 0; /// The XCM executor to use for doing stuff. pub type ExecutorOf = xcm_executor::XcmExecutor<::XcmConfig>; /// The overarching call type. -pub type OverArchingCallOf = ::RuntimeCall; +pub type RuntimeCallOf = ::RuntimeCall; /// The asset transactor of our executor pub type AssetTransactorOf = <::XcmConfig as XcmConfig>::AssetTransactor; /// The call type of executor's config. Should eventually resolve to the same overarching call type. diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index d287987a96d47d68fcc1b5edb3d2e6ca0d46328f..9b8f735b478f2a99231300bc7e82ea159413d412 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -70,6 +70,7 @@ use xcm_executor::{ use xcm_runtime_apis::{ dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::Error as XcmPaymentApiError, + trusted_query::Error as TrustedQueryApiError, }; #[cfg(any(feature = "try-runtime", test))] @@ -2603,6 +2604,57 @@ impl Pallet { }) } + /// Given an Asset and a Location, returns if the provided location is a trusted reserve for the + /// given asset. + pub fn is_trusted_reserve( + asset: VersionedAsset, + location: VersionedLocation, + ) -> Result { + let location: Location = location.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_reserve", + "Asset version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedLocationConversionFailed + })?; + + let a: Asset = asset.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_reserve", + "Location version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedAssetConversionFailed + })?; + + Ok(::IsReserve::contains(&a, &location)) + } + + /// Given an Asset and a Location, returns if the asset can be teleported to provided location. + pub fn is_trusted_teleporter( + asset: VersionedAsset, + location: VersionedLocation, + ) -> Result { + let location: Location = location.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_teleporter", + "Asset version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedLocationConversionFailed + })?; + let a: Asset = asset.try_into().map_err(|e| { + tracing::debug!( + target: "xcm::pallet_xcm::is_trusted_teleporter", + "Location version conversion failed with error: {:?}", + e, + ); + TrustedQueryApiError::VersionedAssetConversionFailed + })?; + Ok(::IsTeleporter::contains(&a, &location)) + } + pub fn query_delivery_fees( destination: VersionedLocation, message: VersionedXcm<()>, @@ -2692,7 +2744,7 @@ impl Pallet { .invert_target(&responder) .map_err(|()| XcmError::LocationNotInvertible)?; let notify: ::RuntimeCall = notify.into(); - let max_weight = notify.get_dispatch_info().weight; + let max_weight = notify.get_dispatch_info().call_weight; let query_id = Self::new_notify_query(responder, notify, timeout, Here); let response_info = QueryResponseInfo { destination, query_id, max_weight }; let report_error = Xcm(vec![ReportError(response_info)]); @@ -2755,6 +2807,44 @@ impl Pallet { /// set. #[cfg(any(feature = "try-runtime", test))] pub fn do_try_state() -> Result<(), TryRuntimeError> { + use migration::data::NeedsMigration; + + // Take the minimum version between `SafeXcmVersion` and `latest - 1` and ensure that the + // operational data is stored at least at that version, for example, to prevent issues when + // removing older XCM versions. + let minimal_allowed_xcm_version = if let Some(safe_xcm_version) = SafeXcmVersion::::get() + { + XCM_VERSION.saturating_sub(1).min(safe_xcm_version) + } else { + XCM_VERSION.saturating_sub(1) + }; + + // check `Queries` + ensure!( + !Queries::::iter_values() + .any(|data| data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other("`Queries` data should be migrated to the higher xcm version!") + ); + + // check `LockedFungibles` + ensure!( + !LockedFungibles::::iter_values() + .any(|data| data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other( + "`LockedFungibles` data should be migrated to the higher xcm version!" + ) + ); + + // check `RemoteLockedFungibles` + ensure!( + !RemoteLockedFungibles::::iter() + .any(|(key, data)| key.needs_migration(minimal_allowed_xcm_version) || + data.needs_migration(minimal_allowed_xcm_version)), + TryRuntimeError::Other( + "`RemoteLockedFungibles` data should be migrated to the higher xcm version!" + ) + ); + // if migration has been already scheduled, everything is ok and data will be eventually // migrated if CurrentMigration::::exists() { @@ -2835,7 +2925,7 @@ impl xcm_executor::traits::Enact for UnlockTicket { let mut maybe_remove_index = None; let mut locked = BalanceOf::::zero(); let mut found = false; - // We could just as well do with with an into_iter, filter_map and collect, however this way + // We could just as well do with an into_iter, filter_map and collect, however this way // avoids making an allocation. for (i, x) in locks.iter_mut().enumerate() { if x.1.try_as::<_>().defensive() == Ok(&self.unlocker) { @@ -3216,7 +3306,7 @@ impl OnResponse for Pallet { }); return Weight::zero() } - return match maybe_notify { + match maybe_notify { Some((pallet_index, call_index)) => { // This is a bit horrible, but we happen to know that the `Call` will // be built by `(pallet_index: u8, call_index: u8, QueryId, Response)`. @@ -3226,7 +3316,7 @@ impl OnResponse for Pallet { ::RuntimeCall::decode(&mut bytes) }) { Queries::::remove(query_id); - let weight = call.get_dispatch_info().weight; + let weight = call.get_dispatch_info().call_weight; if weight.any_gt(max_weight) { let e = Event::NotifyOverweight { query_id, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index 2c5b2620f53595452befa6694f88b6d6caa55289..80154f57ddfbf900a87150744b104bb36f12cfb7 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -15,7 +15,8 @@ // along with Polkadot. If not, see . use crate::{ - pallet::CurrentMigration, Config, Pallet, VersionMigrationStage, VersionNotifyTargets, + pallet::CurrentMigration, Config, CurrentXcmVersion, Pallet, VersionMigrationStage, + VersionNotifyTargets, }; use frame_support::{ pallet_prelude::*, @@ -25,6 +26,307 @@ use frame_support::{ const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; +/// Utilities for handling XCM version migration for the relevant data. +pub mod data { + use crate::*; + + /// A trait for handling XCM versioned data migration for the requested `XcmVersion`. + pub(crate) trait NeedsMigration { + type MigratedData; + + /// Returns true if data does not match `minimal_allowed_xcm_version`. + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool; + + /// Attempts to migrate data. `Ok(None)` means no migration is needed. + /// `Ok(Some(Self::MigratedData))` should contain the migrated data. + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()>; + } + + /// Implementation of `NeedsMigration` for `LockedFungibles` data. + impl NeedsMigration for BoundedVec<(B, VersionedLocation), M> { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.iter() + .any(|(_, unlocker)| unlocker.identify_version() < minimal_allowed_xcm_version) + } + + fn try_migrate( + mut self, + to_xcm_version: XcmVersion, + ) -> Result, ()> { + let mut was_modified = false; + for locked in self.iter_mut() { + if locked.1.identify_version() < to_xcm_version { + let Ok(new_unlocker) = locked.1.clone().into_version(to_xcm_version) else { + return Err(()) + }; + locked.1 = new_unlocker; + was_modified = true; + } + } + + if was_modified { + Ok(Some(self)) + } else { + Ok(None) + } + } + } + + /// Implementation of `NeedsMigration` for `Queries` data. + impl NeedsMigration for QueryStatus { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + match &self { + QueryStatus::Pending { responder, maybe_match_querier, .. } => + responder.identify_version() < minimal_allowed_xcm_version || + maybe_match_querier + .as_ref() + .map(|v| v.identify_version() < minimal_allowed_xcm_version) + .unwrap_or(false), + QueryStatus::VersionNotifier { origin, .. } => + origin.identify_version() < minimal_allowed_xcm_version, + QueryStatus::Ready { response, .. } => + response.identify_version() < minimal_allowed_xcm_version, + } + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + // do migration + match self { + QueryStatus::Pending { responder, maybe_match_querier, maybe_notify, timeout } => { + let Ok(responder) = responder.into_version(to_xcm_version) else { + return Err(()) + }; + let Ok(maybe_match_querier) = + maybe_match_querier.map(|mmq| mmq.into_version(to_xcm_version)).transpose() + else { + return Err(()) + }; + Ok(Some(QueryStatus::Pending { + responder, + maybe_match_querier, + maybe_notify, + timeout, + })) + }, + QueryStatus::VersionNotifier { origin, is_active } => origin + .into_version(to_xcm_version) + .map(|origin| Some(QueryStatus::VersionNotifier { origin, is_active })), + QueryStatus::Ready { response, at } => response + .into_version(to_xcm_version) + .map(|response| Some(QueryStatus::Ready { response, at })), + } + } + } + + /// Implementation of `NeedsMigration` for `RemoteLockedFungibles` key type. + impl NeedsMigration for (XcmVersion, A, VersionedAssetId) { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.0 < minimal_allowed_xcm_version || + self.2.identify_version() < minimal_allowed_xcm_version + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + let Ok(asset_id) = self.2.into_version(to_xcm_version) else { return Err(()) }; + Ok(Some((to_xcm_version, self.1, asset_id))) + } + } + + /// Implementation of `NeedsMigration` for `RemoteLockedFungibles` data. + impl> NeedsMigration + for RemoteLockedFungibleRecord + { + type MigratedData = Self; + + fn needs_migration(&self, minimal_allowed_xcm_version: XcmVersion) -> bool { + self.owner.identify_version() < minimal_allowed_xcm_version || + self.locker.identify_version() < minimal_allowed_xcm_version + } + + fn try_migrate(self, to_xcm_version: XcmVersion) -> Result, ()> { + if !self.needs_migration(to_xcm_version) { + return Ok(None) + } + + let RemoteLockedFungibleRecord { amount, owner, locker, consumers } = self; + + let Ok(owner) = owner.into_version(to_xcm_version) else { return Err(()) }; + let Ok(locker) = locker.into_version(to_xcm_version) else { return Err(()) }; + + Ok(Some(RemoteLockedFungibleRecord { amount, owner, locker, consumers })) + } + } + + impl Pallet { + /// Migrates relevant data to the `required_xcm_version`. + pub(crate) fn migrate_data_to_xcm_version( + weight: &mut Weight, + required_xcm_version: XcmVersion, + ) { + const LOG_TARGET: &str = "runtime::xcm::pallet_xcm::migrate_data_to_xcm_version"; + + // check and migrate `Queries` + let queries_to_migrate = Queries::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`Queries` cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in queries_to_migrate { + tracing::info!( + target: LOG_TARGET, + query_id = ?id, + ?new_data, + "Migrating `Queries`" + ); + Queries::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `LockedFungibles` + let locked_fungibles_to_migrate = + LockedFungibles::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`LockedFungibles` cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in locked_fungibles_to_migrate { + tracing::info!( + target: LOG_TARGET, + account_id = ?id, + ?new_data, + "Migrating `LockedFungibles`" + ); + LockedFungibles::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `RemoteLockedFungibles` - 1. step - just data + let remote_locked_fungibles_to_migrate = + RemoteLockedFungibles::::iter().filter_map(|(id, data)| { + weight.saturating_add(T::DbWeight::get().reads(1)); + match data.try_migrate(required_xcm_version) { + Ok(Some(new_data)) => Some((id, new_data)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + ?id, + ?required_xcm_version, + "`RemoteLockedFungibles` data cannot be migrated!" + ); + None + }, + } + }); + for (id, new_data) in remote_locked_fungibles_to_migrate { + tracing::info!( + target: LOG_TARGET, + key = ?id, + amount = ?new_data.amount, + locker = ?new_data.locker, + owner = ?new_data.owner, + consumers_count = ?new_data.consumers.len(), + "Migrating `RemoteLockedFungibles` data" + ); + RemoteLockedFungibles::::insert(id, new_data); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // check and migrate `RemoteLockedFungibles` - 2. step - key + let remote_locked_fungibles_keys_to_migrate = RemoteLockedFungibles::::iter_keys() + .filter_map(|key| { + if key.needs_migration(required_xcm_version) { + let old_key = key.clone(); + match key.try_migrate(required_xcm_version) { + Ok(Some(new_key)) => Some((old_key, new_key)), + Ok(None) => None, + Err(_) => { + tracing::error!( + target: LOG_TARGET, + id = ?old_key, + ?required_xcm_version, + "`RemoteLockedFungibles` key cannot be migrated!" + ); + None + }, + } + } else { + None + } + }); + for (old_key, new_key) in remote_locked_fungibles_keys_to_migrate { + weight.saturating_add(T::DbWeight::get().reads(1)); + // make sure, that we don't override accidentally other data + if RemoteLockedFungibles::::get(&new_key).is_some() { + tracing::error!( + target: LOG_TARGET, + ?old_key, + ?new_key, + "`RemoteLockedFungibles` already contains data for a `new_key`!" + ); + // let's just skip for now, could be potentially caused with missing this + // migration before (manual clean-up?). + continue; + } + + tracing::info!( + target: LOG_TARGET, + ?old_key, + ?new_key, + "Migrating `RemoteLockedFungibles` key" + ); + + // now we can swap the keys + RemoteLockedFungibles::::swap::< + ( + NMapKey, + NMapKey, + NMapKey, + ), + _, + _, + >(&old_key, &new_key); + weight.saturating_add(T::DbWeight::get().writes(1)); + } + } + } +} + pub mod v1 { use super::*; use crate::{CurrentMigration, VersionMigrationStage}; @@ -84,7 +386,80 @@ pub mod v1 { pub struct MigrateToLatestXcmVersion(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToLatestXcmVersion { fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // trigger expensive/lazy migration (kind of multi-block) CurrentMigration::::put(VersionMigrationStage::default()); - T::DbWeight::get().writes(1) + weight.saturating_accrue(T::DbWeight::get().writes(1)); + + // migrate other operational data to the latest XCM version in-place + let latest = CurrentXcmVersion::get(); + Pallet::::migrate_data_to_xcm_version(&mut weight, latest); + + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: alloc::vec::Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use data::NeedsMigration; + const LOG_TARGET: &str = "runtime::xcm::pallet_xcm::migrate_to_latest"; + + let latest = CurrentXcmVersion::get(); + + let number_of_queries_to_migrate = crate::Queries::::iter() + .filter(|(id, data)| { + let needs_migration = data.needs_migration(latest); + if needs_migration { + tracing::warn!( + target: LOG_TARGET, + query_id = ?id, + query = ?data, + "Query was not migrated!" + ) + } + needs_migration + }) + .count(); + + let number_of_locked_fungibles_to_migrate = crate::LockedFungibles::::iter() + .filter_map(|(id, data)| { + if data.needs_migration(latest) { + tracing::warn!( + target: LOG_TARGET, + ?id, + ?data, + "LockedFungibles item was not migrated!" + ); + Some(true) + } else { + None + } + }) + .count(); + + let number_of_remote_locked_fungibles_to_migrate = + crate::RemoteLockedFungibles::::iter() + .filter_map(|(key, data)| { + if key.needs_migration(latest) || data.needs_migration(latest) { + tracing::warn!( + target: LOG_TARGET, + ?key, + "RemoteLockedFungibles item was not migrated!" + ); + Some(true) + } else { + None + } + }) + .count(); + + ensure!(number_of_queries_to_migrate == 0, "must migrate all `Queries`."); + ensure!(number_of_locked_fungibles_to_migrate == 0, "must migrate all `LockedFungibles`."); + ensure!( + number_of_remote_locked_fungibles_to_migrate == 0, + "must migrate all `RemoteLockedFungibles`." + ); + + Ok(()) } } diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index c16c1a1ba986e5c95da3ce63dd58c52393408464..e98a8f8d2ce7fba605aab9dbfab6b4072a6e251a 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -19,11 +19,15 @@ pub(crate) mod assets_transfer; use crate::{ - mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, - RecordedXcm, ShouldRecordXcm, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, + migration::data::NeedsMigration, + mock::*, + pallet::{LockedFungibles, RemoteLockedFungibles, SupportedVersion}, + AssetTraps, Config, CurrentMigration, Error, ExecuteControllerWeightInfo, + LatestVersionedLocation, Pallet, Queries, QueryStatus, RecordedXcm, RemoteLockedFungibleRecord, + ShouldRecordXcm, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, }; +use bounded_collections::BoundedVec; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -1258,6 +1262,168 @@ fn multistage_migration_works() { }) } +#[test] +fn migrate_data_to_xcm_version_works() { + new_test_ext_with_balances(vec![]).execute_with(|| { + // check `try-state` + assert!(Pallet::::do_try_state().is_ok()); + + let latest_version = XCM_VERSION; + let previous_version = XCM_VERSION - 1; + + // `Queries` migration + { + let origin = VersionedLocation::from(Location::parent()); + let query_id1 = 0; + let query_id2 = 2; + let query_as_latest = + QueryStatus::VersionNotifier { origin: origin.clone(), is_active: true }; + let query_as_previous = QueryStatus::VersionNotifier { + origin: origin.into_version(previous_version).unwrap(), + is_active: true, + }; + assert_ne!(query_as_latest, query_as_previous); + assert!(!query_as_latest.needs_migration(latest_version)); + assert!(!query_as_latest.needs_migration(previous_version)); + assert!(query_as_previous.needs_migration(latest_version)); + assert!(!query_as_previous.needs_migration(previous_version)); + + // store two queries: migrated and not migrated + Queries::::insert(query_id1, query_as_latest.clone()); + Queries::::insert(query_id2, query_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + // no change for query_id1 + assert_eq!(Queries::::get(query_id1), Some(query_as_latest.clone())); + // change for query_id2 + assert_eq!(Queries::::get(query_id2), Some(query_as_latest)); + assert!(Pallet::::do_try_state().is_ok()); + } + + // `LockedFungibles` migration + { + let account1 = AccountId::new([13u8; 32]); + let account2 = AccountId::new([58u8; 32]); + let unlocker = VersionedLocation::from(Location::parent()); + let lockeds_as_latest = BoundedVec::truncate_from(vec![(0, unlocker.clone())]); + let lockeds_as_previous = BoundedVec::truncate_from(vec![( + 0, + unlocker.into_version(previous_version).unwrap(), + )]); + assert_ne!(lockeds_as_latest, lockeds_as_previous); + assert!(!lockeds_as_latest.needs_migration(latest_version)); + assert!(!lockeds_as_latest.needs_migration(previous_version)); + assert!(lockeds_as_previous.needs_migration(latest_version)); + assert!(!lockeds_as_previous.needs_migration(previous_version)); + + // store two lockeds: migrated and not migrated + LockedFungibles::::insert(&account1, lockeds_as_latest.clone()); + LockedFungibles::::insert(&account2, lockeds_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + // no change for account1 + assert_eq!(LockedFungibles::::get(&account1), Some(lockeds_as_latest.clone())); + // change for account2 + assert_eq!(LockedFungibles::::get(&account2), Some(lockeds_as_latest)); + assert!(Pallet::::do_try_state().is_ok()); + } + + // `RemoteLockedFungibles` migration + { + let account1 = AccountId::new([13u8; 32]); + let account2 = AccountId::new([58u8; 32]); + let account3 = AccountId::new([97u8; 32]); + let asset_id = VersionedAssetId::from(AssetId(Location::parent())); + let owner = VersionedLocation::from(Location::parent()); + let locker = VersionedLocation::from(Location::parent()); + let key1_as_latest = (latest_version, account1, asset_id.clone()); + let key2_as_latest = (latest_version, account2, asset_id.clone()); + let key3_as_previous = ( + previous_version, + account3.clone(), + asset_id.clone().into_version(previous_version).unwrap(), + ); + let expected_key3_as_latest = (latest_version, account3, asset_id); + let data_as_latest = RemoteLockedFungibleRecord { + amount: Default::default(), + owner: owner.clone(), + locker: locker.clone(), + consumers: Default::default(), + }; + let data_as_previous = RemoteLockedFungibleRecord { + amount: Default::default(), + owner: owner.into_version(previous_version).unwrap(), + locker: locker.into_version(previous_version).unwrap(), + consumers: Default::default(), + }; + assert_ne!(data_as_latest.owner, data_as_previous.owner); + assert_ne!(data_as_latest.locker, data_as_previous.locker); + assert!(!key1_as_latest.needs_migration(latest_version)); + assert!(!key1_as_latest.needs_migration(previous_version)); + assert!(!key2_as_latest.needs_migration(latest_version)); + assert!(!key2_as_latest.needs_migration(previous_version)); + assert!(key3_as_previous.needs_migration(latest_version)); + assert!(!key3_as_previous.needs_migration(previous_version)); + assert!(!expected_key3_as_latest.needs_migration(latest_version)); + assert!(!expected_key3_as_latest.needs_migration(previous_version)); + assert!(!data_as_latest.needs_migration(latest_version)); + assert!(!data_as_latest.needs_migration(previous_version)); + assert!(data_as_previous.needs_migration(latest_version)); + assert!(!data_as_previous.needs_migration(previous_version)); + + // store three lockeds: + // fully migrated + RemoteLockedFungibles::::insert(&key1_as_latest, data_as_latest.clone()); + // only key migrated + RemoteLockedFungibles::::insert(&key2_as_latest, data_as_previous.clone()); + // neither key nor data migrated + RemoteLockedFungibles::::insert(&key3_as_previous, data_as_previous); + assert!(Pallet::::do_try_state().is_ok()); + + // trigger migration + Pallet::::migrate_data_to_xcm_version(&mut Weight::zero(), latest_version); + + let assert_locked_eq = + |left: Option>, + right: Option>| { + match (left, right) { + (None, Some(_)) | (Some(_), None) => + assert!(false, "Received unexpected message"), + (None, None) => (), + (Some(l), Some(r)) => { + assert_eq!(l.owner, r.owner); + assert_eq!(l.locker, r.locker); + }, + } + }; + + // no change + assert_locked_eq( + RemoteLockedFungibles::::get(&key1_as_latest), + Some(data_as_latest.clone()), + ); + // change - data migrated + assert_locked_eq( + RemoteLockedFungibles::::get(&key2_as_latest), + Some(data_as_latest.clone()), + ); + // fully migrated + assert_locked_eq(RemoteLockedFungibles::::get(&key3_as_previous), None); + assert_locked_eq( + RemoteLockedFungibles::::get(&expected_key3_as_latest), + Some(data_as_latest.clone()), + ); + assert!(Pallet::::do_try_state().is_ok()); + } + }) +} + #[test] fn record_xcm_works() { let balances = vec![(ALICE, INITIAL_BALANCE)]; diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 671f0181277ae905a0ab6f3d69bb13f0dc512616..eaa115740f3e4e35ce0883f941a283c8bbe5d998 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -49,6 +49,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-salary/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs index 4d9809e84f8821d120bd9f83e6f48eb24f7aa607..e6fe8e45c26518540a35550001c4a47f93ec7d18 100644 --- a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs +++ b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs @@ -312,7 +312,7 @@ impl pallet_xcm::Config for Runtime { type UniversalLocation = UniversalLocation; // No version discovery needed const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 0; - type AdvertisedXcmVersion = frame_support::traits::ConstU32<3>; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = frame_system::EnsureRoot; // No locking type TrustedLockers = (); diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 9f42aee87c9fa44b0b73c93b0b4475c009dab26a..bec7b253977b6de1319cd3e974e7b74d8a36835d 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -100,13 +100,13 @@ impl Dispatchable for TestCall { impl GetDispatchInfo for TestCall { fn get_dispatch_info(&self) -> DispatchInfo { - let weight = *match self { + let call_weight = *match self { TestCall::OnlyRoot(estimate, ..) | TestCall::OnlyParachain(estimate, ..) | TestCall::OnlySigned(estimate, ..) | TestCall::Any(estimate, ..) => estimate, }; - DispatchInfo { weight, ..Default::default() } + DispatchInfo { call_weight, ..Default::default() } } } diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index 9f2146fa30e86bc9a4c3e54280105c5cebfbbe09..26ea226313f068d9203bd8057790d3665bae2d11 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -26,13 +26,21 @@ use frame_support::{ }; use frame_system::{EnsureRoot, EnsureSigned}; use polkadot_primitives::{AccountIndex, BlakeTwo256, Signature}; -use polkadot_test_runtime::SignedExtra; use sp_runtime::{generic, traits::MaybeEquivalence, AccountId32, BuildStorage}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; +pub type TxExtension = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, +); pub type Address = sp_runtime::MultiAddress; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Header = generic::Header; pub type Block = generic::Block; @@ -118,7 +126,6 @@ parameter_types! { pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorLocation = (ByGenesis([0; 32]), Parachain(42)).into(); pub UnitWeightCost: u64 = 1_000; - pub static AdvertisedXcmVersion: u32 = 3; pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (AssetId(RelayLocation::get()), 1, 1); pub TrustedAssets: (AssetFilter, Location) = (All.into(), Here.into()); @@ -259,7 +266,7 @@ impl pallet_xcm::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type TrustedLockers = (); type SovereignAccountOf = SovereignAccountOf; type Currency = Balances; diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 020a4a54285e9e256b2d2aa13a27072595271121..e95473c5407eacb78c0b8937d8ffba86eb6e0879 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -81,7 +81,7 @@ fn transact_recursion_limit_works() { BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Native, - require_weight_at_most: call.get_dispatch_info().weight, + require_weight_at_most: call.get_dispatch_info().call_weight, call: call.encode().into(), }, ]) diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 71985360b7d3d09829b71c9a9b65c184a5dc0eda..e3addfa3e794a2b045ee0361af62bb721472c525 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -861,7 +861,7 @@ impl XcmExecutor { "Dispatching with origin", ); - let weight = message_call.get_dispatch_info().weight; + let weight = message_call.get_dispatch_info().call_weight; if !weight.all_lte(require_weight_at_most) { tracing::trace!( diff --git a/polkadot/xcm/xcm-runtime-apis/src/lib.rs b/polkadot/xcm/xcm-runtime-apis/src/lib.rs index 44e518e8e7ab0919f12c5a73eff3e4a6b38afdc4..f9a857c7c4cec28c7f973258618c1aedb633c977 100644 --- a/polkadot/xcm/xcm-runtime-apis/src/lib.rs +++ b/polkadot/xcm/xcm-runtime-apis/src/lib.rs @@ -30,3 +30,7 @@ pub mod dry_run; /// Fee estimation API. /// Given an XCM program, it will return the fees needed to execute it properly or send it. pub mod fees; + +// Exposes runtime API for querying whether a Location is trusted as a reserve or teleporter for a +// given Asset. +pub mod trusted_query; diff --git a/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs b/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs new file mode 100644 index 0000000000000000000000000000000000000000..a2e3e1625486da363540db21b99832b077a81021 --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/src/trusted_query.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime API definition for checking if given is trusted reserve or teleporter. + +use codec::{Decode, Encode}; +use frame_support::pallet_prelude::TypeInfo; +use xcm::{VersionedAsset, VersionedLocation}; + +/// Result of [`TrustedQueryApi`] functions. +pub type XcmTrustedQueryResult = Result; + +sp_api::decl_runtime_apis! { + // API for querying trusted reserves and trusted teleporters. + pub trait TrustedQueryApi { + /// Returns if the location is a trusted reserve for the asset. + /// + /// # Arguments + /// * `asset`: `VersionedAsset`. + /// * `location`: `VersionedLocation`. + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> XcmTrustedQueryResult; + /// Returns if the asset can be teleported to the location. + /// + /// # Arguments + /// * `asset`: `VersionedAsset`. + /// * `location`: `VersionedLocation`. + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> XcmTrustedQueryResult; + } +} + +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Converting a versioned Asset structure from one version to another failed. + VersionedAssetConversionFailed, + /// Converting a versioned Location structure from one version to another failed. + VersionedLocationConversionFailed, +} diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index 6575feccf8a3a77e1e64e01531be67eca81842fe..4a5de887500789944a7b06b227540947fbed1648 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -48,6 +48,7 @@ use xcm_runtime_apis::{ conversions::{Error as LocationToAccountApiError, LocationToAccountApi}, dry_run::{CallDryRunEffects, DryRunApi, Error as XcmDryRunApiError, XcmDryRunEffects}, fees::{Error as XcmPaymentApiError, XcmPaymentApi}, + trusted_query::{Error as TrustedQueryApiError, TrustedQueryApi}, }; construct_runtime! { @@ -59,9 +60,16 @@ construct_runtime! { } } -pub type SignedExtra = (frame_system::CheckWeight,); -pub type TestXt = sp_runtime::testing::TestXt; -type Block = sp_runtime::testing::Block; +pub type TxExtension = (frame_system::CheckWeight,); + +// we only use the hash type from this, so using the mock should be fine. +pub(crate) type Extrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + TxExtension, +>; +type Block = sp_runtime::testing::Block; type Balance = u128; type AssetIdForAssetsPallet = u32; type AccountId = u64; @@ -137,7 +145,6 @@ parameter_types! { pub const MaxInstructions: u32 = 100; pub const NativeTokenPerSecondPerByte: (AssetId, u128, u128) = (AssetId(HereLocation::get()), 1, 1); pub UniversalLocation: InteriorLocation = [GlobalConsensus(NetworkId::Westend), Parachain(2000)].into(); - pub static AdvertisedXcmVersion: XcmVersion = 4; pub const HereLocation: Location = Location::here(); pub const RelayLocation: Location = Location::parent(); pub const MaxAssetsIntoHolding: u32 = 64; @@ -341,7 +348,7 @@ impl pallet_xcm::Config for TestRuntime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = (); @@ -414,6 +421,16 @@ impl sp_api::ProvideRuntimeApi for TestClient { } sp_api::mock_impl_runtime_apis! { + impl TrustedQueryApi for RuntimeApi { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) + } + } + impl LocationToAccountApi for RuntimeApi { fn convert_location(location: VersionedLocation) -> Result { let location = location.try_into().map_err(|_| LocationToAccountApiError::VersionedConversionFailed)?; diff --git a/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs b/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e3a68b9225bae868064bb3b8dde98ddb53e675a --- /dev/null +++ b/polkadot/xcm/xcm-runtime-apis/tests/trusted_query.rs @@ -0,0 +1,150 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +mod mock; + +use frame_support::sp_runtime::testing::H256; +use mock::*; +use sp_api::ProvideRuntimeApi; +use xcm::{prelude::*, v3}; +use xcm_runtime_apis::trusted_query::{Error, TrustedQueryApi}; + +#[test] +fn query_trusted_reserve() { + #[derive(Debug)] + struct TestCase { + name: &'static str, + asset: VersionedAsset, + location: VersionedLocation, + expected: Result, + } + + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + let test_cases: Vec = vec![ + TestCase { + // matches!(asset.id.0.unpack(), (1, [])) && matches!(origin.unpack(), (1, + // [Parachain(1000)])) + name: "Valid asset and location", + asset: Asset { id: AssetId(Location::parent()), fun: Fungible(123) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(true), + }, + TestCase { + name: "Invalid location and valid asset", + asset: Asset { id: AssetId(Location::parent()), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Valid location and invalid asset", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(false), + }, + TestCase { + name: "Invalid asset conversion", + asset: VersionedAsset::V3(v3::MultiAsset { + id: v3::AssetId::Abstract([1; 32]), + fun: v3::Fungibility::Fungible(1), + }), + location: (Parent, Parachain(1000)).into(), + expected: Err(Error::VersionedAssetConversionFailed), + }, + ]; + + for tc in test_cases { + let res = + runtime_api.is_trusted_reserve(H256::zero(), tc.asset.clone(), tc.location.clone()); + let inner_res = res.unwrap_or_else(|e| { + panic!("Test case '{}' failed with ApiError: {:?}", tc.name, e); + }); + + assert_eq!( + tc.expected, inner_res, + "Test case '{}' failed: expected {:?}, got {:?}", + tc.name, tc.expected, inner_res + ); + } + }); +} + +#[test] +fn query_trusted_teleporter() { + #[derive(Debug)] + struct TestCase { + name: &'static str, + asset: VersionedAsset, + location: VersionedLocation, + expected: Result, + } + + sp_io::TestExternalities::default().execute_with(|| { + let client = TestClient {}; + let runtime_api = client.runtime_api(); + + let test_cases: Vec = vec![ + TestCase { + // matches!(asset.id.0.unpack(), (0, [])) && matches!(origin.unpack(), (1, + // [Parachain(1000)])) + name: "Valid asset and location", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1000)).into(), + expected: Ok(true), + }, + TestCase { + name: "Invalid location and valid asset", + asset: Asset { id: AssetId(Location::new(0, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Valid location and invalid asset", + asset: Asset { id: AssetId(Location::new(1, [])), fun: Fungible(100) }.into(), + location: (Parent, Parachain(1002)).into(), + expected: Ok(false), + }, + TestCase { + name: "Invalid asset conversion", + asset: VersionedAsset::V3(v3::MultiAsset { + id: v3::AssetId::Abstract([1; 32]), + fun: v3::Fungibility::Fungible(1), + }), + location: (Parent, Parachain(1000)).into(), + expected: Err(Error::VersionedAssetConversionFailed), + }, + ]; + + for tc in test_cases { + let res = runtime_api.is_trusted_teleporter( + H256::zero(), + tc.asset.clone(), + tc.location.clone(), + ); + let inner_res = res.unwrap_or_else(|e| { + panic!("Test case '{}' failed with ApiError: {:?}", tc.name, e); + }); + + assert_eq!( + tc.expected, inner_res, + "Test case '{}' failed: expected {:?}, got {:?}", + tc.name, tc.expected, inner_res + ); + } + }); +} diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 616329a2f06b43f73d059a3471f49c2d7fa82e95..fc650ae55a785b08ab609feb07337d099231904c 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -44,13 +44,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 459d2640b6d908b14e0964aa2e2a697f95c7dbd3..58687b4785262e4c52f7c74bc9c76c5927176d3a 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -45,13 +45,13 @@ use xcm_builder::{ }; use xcm_executor::{Config, XcmExecutor}; -pub type SignedExtra = (frame_system::CheckNonZeroSender,); +pub type TxExtension = (frame_system::CheckNonZeroSender,); pub type BlockNumber = u64; pub type Address = MultiAddress; pub type Header = generic::Header; pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; pub type Block = generic::Block; pub type Signature = MultiSignature; diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl index b8b8887df85782268735f906eab771ddfcc22fcf..8f883dffa5e1091c32e44a6d319c6512ec316fff 100644 --- a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl +++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.zndsl @@ -5,8 +5,8 @@ Creds: config validator: reports node_roles is 4 # register paras 2 by 2 to speed up the test. registering all at once will exceed the weight limit. -validator-0: js-script ./0015-force-register-paras.js with "2000,2001" return is 0 within 600 seconds -validator-0: js-script ./0015-force-register-paras.js with "2002,2003" return is 0 within 600 seconds +validator-0: js-script ./force-register-paras.js with "2000,2001" return is 0 within 600 seconds +validator-0: js-script ./force-register-paras.js with "2002,2003" return is 0 within 600 seconds # assign core 0 to be shared by all paras. validator-0: js-script ./assign-core.js with "0,2000,14400,2001,14400,2002,14400,2003,14400" return is 0 within 600 seconds diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml new file mode 100644 index 0000000000000000000000000000000000000000..2550054c8dadaf75b34b4c4a50f402576f3f5266 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0017-sync-backing.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 0 + allowed_ancestry_len = 0 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + lookahead = 2 + group_rotation_frequency = 4 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = [ "-lparachain=debug" ] + count = 10 + +[[parachains]] +id = 2000 +addToGenesis = true + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = ["-lparachain=debug"] + +[[parachains]] +id = 2001 +cumulus_based = true + + [parachains.collator] + name = "collator02" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..a53de784b2d1349fa890ce51984b6865c2c94fd5 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl @@ -0,0 +1,22 @@ +Description: Test we are producing 12-second parachain blocks if sync backing is configured +Network: ./0017-sync-backing.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds + +# Ensure parachains made progress. +alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds + +# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for +# some slots to be missed on slower machines +alice: parachain 2000 block height is at least 21 within 300 seconds +alice: parachain 2000 block height is lower than 25 within 2 seconds + +# This should already have produced the needed blocks +alice: parachain 2001 block height is at least 21 within 10 seconds +alice: parachain 2001 block height is lower than 25 within 2 seconds diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml new file mode 100644 index 0000000000000000000000000000000000000000..745c4f9e24b1bbcd79608a3ddfc20d8ea833be6e --- /dev/null +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml @@ -0,0 +1,39 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 2 + lookahead = 2 + num_cores = 4 + group_rotation_frequency = 4 + + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=debug"] + count = 4 + +[[parachains]] +id = 2000 +register_para = false +onboard_as_parachain = false +add_to_genesis = false +chain = "glutton-westend-local-2000" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator-2000" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl new file mode 100644 index 0000000000000000000000000000000000000000..80ecf6ae1b9be635d904733ec07a2b71f652c0cd --- /dev/null +++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.zndsl @@ -0,0 +1,11 @@ +Description: Test that a parachain can keep producing blocks even if the other parachain with which it's sharing a core doesn't +Network: ./0018-shared-core-idle-parachain.toml +Creds: config + +validator: reports node_roles is 4 + +validator-0: js-script ./force-register-paras.js with "2000" return is 0 within 600 seconds +# assign core 0 to be shared by two paras, but only one exists +validator-0: js-script ./assign-core.js with "0,2000,28800,2001,28800" return is 0 within 600 seconds + +collator-2000: reports block height is at least 10 within 180 seconds diff --git a/polkadot/zombienet_tests/functional/0015-force-register-paras.js b/polkadot/zombienet_tests/functional/force-register-paras.js similarity index 100% rename from polkadot/zombienet_tests/functional/0015-force-register-paras.js rename to polkadot/zombienet_tests/functional/force-register-paras.js diff --git a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl index cfb1ce7d98215f3bf6d1f01361077bb041ca90ee..b3a3b46ed7805f45a4eda2a17a9467c95793ef3b 100644 --- a/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl +++ b/polkadot/zombienet_tests/smoke/0004-coretime-smoke-test.zndsl @@ -8,6 +8,9 @@ coretime-collator: is up # configure relay chain alice: js-script ./0004-configure-relay.js with "" return is 0 within 600 secs +# Coretime chain should be producing blocks when the extrinsic is sent +alice: parachain 1005 block height is at least 10 within 120 seconds + # configure broker chain coretime-collator: js-script ./0004-configure-broker.js with "" return is 0 within 600 secs diff --git a/prdoc/pr_4803.prdoc b/prdoc/1.16.1/pr_4803.prdoc similarity index 100% rename from prdoc/pr_4803.prdoc rename to prdoc/1.16.1/pr_4803.prdoc diff --git a/prdoc/pr_5599.prdoc b/prdoc/1.16.1/pr_5599.prdoc similarity index 100% rename from prdoc/pr_5599.prdoc rename to prdoc/1.16.1/pr_5599.prdoc diff --git a/prdoc/pr_5753.prdoc b/prdoc/1.16.1/pr_5753.prdoc similarity index 100% rename from prdoc/pr_5753.prdoc rename to prdoc/1.16.1/pr_5753.prdoc diff --git a/prdoc/pr_5887.prdoc b/prdoc/1.16.1/pr_5887.prdoc similarity index 100% rename from prdoc/pr_5887.prdoc rename to prdoc/1.16.1/pr_5887.prdoc diff --git a/prdoc/pr_5913.prdoc b/prdoc/1.16.1/pr_5913.prdoc similarity index 100% rename from prdoc/pr_5913.prdoc rename to prdoc/1.16.1/pr_5913.prdoc diff --git a/prdoc/pr_6031.prdoc b/prdoc/1.16.1/pr_6031.prdoc similarity index 100% rename from prdoc/pr_6031.prdoc rename to prdoc/1.16.1/pr_6031.prdoc diff --git a/prdoc/pr_3685.prdoc b/prdoc/pr_3685.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bd414c93a6feabb31692839421adf2faab0cc458 --- /dev/null +++ b/prdoc/pr_3685.prdoc @@ -0,0 +1,300 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: FRAME Reintroduce `TransactionExtension` as a replacement for `SignedExtension` + +doc: + - audience: [Runtime Dev, Runtime User, Node Operator, Node Dev] + description: | + Introduces a new trait `TransactionExtension` to replace `SignedExtension`. Introduce the + idea of transactions which obey the runtime's extensions and have according Extension data + (né Extra data) yet do not have hard-coded signatures. + + Deprecate the terminology of "Unsigned" when used for transactions/extrinsics owing to there + now being "proper" unsigned transactions which obey the extension framework and "old-style" + unsigned which do not. Instead we have `General` for the former and `Bare` for the latter. + Unsigned will be phased out as a type of transaction, and `Bare` will only be used for + Inherents. + + Types of extrinsic are now therefore + - Bare (no hardcoded signature, no Extra data; used to be known as "Unsigned") + - Bare transactions (deprecated) - Gossiped, validated with `ValidateUnsigned` + (deprecated) and the `_bare_compat` bits of `TransactionExtension` (deprecated). + - Inherents - Not gossiped, validated with `ProvideInherent`. + - Extended (Extra data) - Gossiped, validated via `TransactionExtension`. + - Signed transactions (with a hardcoded signature). + - General transactions (without a hardcoded signature). + + Notable information on `TransactionExtension` and the differences from `SignedExtension` + - `AdditionalSigned`/`additional_signed` is renamed to `Implicit`/`implicit`. It is encoded + for the entire transaction and passed in to each extension as a new argument to validate. + - `pre_dispatch` is renamed to `prepare`. + - `validate` runs transaction validation logic both off-chain and on-chain, and is + non-mutating. + - `prepare` runs on-chain pre-execution logic using information extracted during validation + and is mutating. + - `validate` and `prepare` are now passed an `Origin` rather than an `AccountId`. If the + extension logic presumes an `AccountId`, consider using the trait function + `AsSystemOriginSigner::as_system_origin_signer`. + - A signature on the underlying transaction may validly not be present. + - The origin may be altered during validation. + - Validation functionality present in `validate` should not be repeated in `prepare`. + Useful information obtained during `validate` should now be passed in to `prepare` using + the new user-specifiable type `Val`. + - Unsigned logic should be temporarily migrated from the old `*_unsigned` functions into the + regular versions of the new functions where the `Origin` is `None`, until the deprecation + of `ValidateUnsigned` in phase 2 of Extrinsic Horizon. + - The `Call` type defining the runtime call is now a type parameter. + - Extensions now track the weight they consume during validation, preparation and + post-dispatch through the `TransactionExtensionBase::weight` function. + - `TestXt` was removed and its usage in tests was replaced with `UncheckedExtrinsic` + instances. + + To fix the build issues introduced by this change, use the `AsTransactionExtension` adapter + to wrap existing `SignedExtension`s by converting them using the `From` + generic implementation for `AsTransactionExtension`. More details on migrating existing + `SignedExtension` implementations to `TransactionExtension` in the PR description. + +crates: + - name: bridge-runtime-common + bump: major + - name: bp-bridge-hub-cumulus + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: bp-kusama + bump: major + - name: bp-polkadot-bulletin + bump: major + - name: bp-polkadot + bump: major + - name: bp-rococo + bump: major + - name: bp-westend + bump: major + - name: pallet-bridge-relayers + bump: major + - name: bp-polkadot-core + bump: major + - name: bp-relayers + bump: major + - name: bp-runtime + bump: major + - name: substrate-relay-helper + bump: major + - name: snowbridge-pallet-ethereum-client + bump: major + - name: snowbridge-pallet-inbound-queue + bump: major + - name: snowbridge-pallet-outbound-queue + bump: major + - name: snowbridge-pallet-system + bump: major + - name: snowbridge-runtime-test-common + bump: major + - name: parachain-template-runtime + bump: major + - name: cumulus-pallet-parachain-system + bump: major + - name: asset-hub-rococo-runtime + bump: major + - name: asset-hub-westend-runtime + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: collectives-westend-runtime + bump: major + - name: contracts-rococo-runtime + bump: major + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major + - name: glutton-westend-runtime + bump: major + - name: people-rococo-runtime + bump: major + - name: people-westend-runtime + bump: major + - name: parachains-runtimes-test-utils + bump: major + - name: penpal-runtime + bump: major + - name: rococo-parachain-runtime + bump: major + - name: polkadot-omni-node + bump: major + - name: polkadot-parachain-bin + bump: major + - name: cumulus-primitives-storage-weight-reclaim + bump: major + - name: cumulus-test-client + bump: major + - name: cumulus-test-runtime + bump: major + - name: cumulus-test-service + bump: major + - name: polkadot-sdk-docs + bump: major + - name: polkadot-service + bump: major + - name: polkadot-test-service + bump: major + - name: polkadot-runtime-common + bump: major + - name: polkadot-runtime-parachains + bump: major + - name: rococo-runtime + bump: major + - name: polkadot-test-runtime + bump: major + - name: westend-runtime + bump: major + - name: pallet-xcm-benchmarks + bump: major + - name: pallet-xcm + bump: major + - name: staging-xcm-builder + bump: major + - name: staging-xcm-executor + bump: major + - name: xcm-runtime-apis + bump: major + - name: xcm-simulator + bump: major + - name: minimal-template-runtime + bump: major + - name: minimal-template-node + bump: major + - name: solochain-template-runtime + bump: major + - name: staging-node-cli + bump: major + - name: kitchensink-runtime + bump: major + - name: node-testing + bump: major + - name: sc-client-api + bump: major + - name: sc-client-db + bump: major + - name: sc-network-gossip + bump: major + - name: sc-network-sync + bump: major + - name: sc-transaction-pool + bump: major + - name: polkadot-sdk-frame + bump: major + - name: pallet-alliance + bump: major + - name: pallet-assets + bump: major + - name: pallet-asset-conversion + bump: major + - name: pallet-babe + bump: major + - name: pallet-balances + bump: major + - name: pallet-beefy + bump: major + - name: pallet-collective + bump: major + - name: pallet-contracts + bump: major + - name: pallet-election-provider-multi-phase + bump: major + - name: pallet-elections-phragmen + bump: major + - name: pallet-example-basic + bump: major + - name: pallet-example-tasks + bump: major + - name: pallet-example-offchain-worker + bump: major + - name: pallet-examples + bump: major + - name: frame-executive + bump: major + - name: pallet-grandpa + bump: major + - name: pallet-im-online + bump: major + - name: pallet-lottery + bump: major + - name: frame-metadata-hash-extension + bump: major + - name: pallet-mixnet + bump: major + - name: pallet-multisig + bump: major + - name: pallet-offences + bump: major + - name: pallet-proxy + bump: major + - name: pallet-recovery + bump: major + - name: pallet-revive + bump: major + - name: pallet-sassafras + bump: major + - name: pallet-scheduler + bump: major + - name: pallet-state-trie-migration + bump: major + - name: pallet-sudo + bump: major + - name: frame-support-procedural + bump: major + - name: frame-support + bump: major + - name: frame-support-test + bump: major + - name: frame-system + bump: major + - name: frame-system-benchmarking + bump: major + - name: pallet-transaction-payment + bump: major + - name: pallet-asset-conversion-tx-payment + bump: major + - name: pallet-asset-tx-payment + bump: major + - name: pallet-skip-feeless-payment + bump: major + - name: pallet-utility + bump: major + - name: pallet-whitelist + bump: major + - name: sp-inherents + bump: major + - name: sp-metadata-ir + bump: major + - name: sp-runtime + bump: major + - name: sp-storage + bump: major + - name: sp-test-primitives + bump: major + - name: substrate-test-runtime + bump: major + - name: substrate-test-utils + bump: major + - name: frame-benchmarking-cli + bump: major + - name: frame-remote-externalities + bump: major + - name: substrate-rpc-client + bump: major + - name: minimal-template-runtime + bump: major + - name: parachain-template-runtime + bump: major + - name: solochain-template-node + bump: major + - name: polkadot-sdk + bump: major diff --git a/prdoc/pr_5194.prdoc b/prdoc/pr_5194.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..afb9d57e79e3ffc3ddd8e273119ec5c400363c2e --- /dev/null +++ b/prdoc/pr_5194.prdoc @@ -0,0 +1,11 @@ +title: "FRAME: Support instantiable pallets in tasks." + +doc: + - audience: Runtime Dev + description: | + In FRAME, tasks can now be used in instantiable pallet. Also some fix for expansion with + conditional compilation in construct runtine. + +crates: + - name: frame-support-procedural + bump: patch diff --git a/prdoc/pr_5461.prdoc b/prdoc/pr_5461.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..bf343216e29ba14c09434c557aa838c40ffce6e7 --- /dev/null +++ b/prdoc/pr_5461.prdoc @@ -0,0 +1,20 @@ +title: "runtime: remove ttl" + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Resolves https://github.com/paritytech/polkadot-sdk/issues/4776. Removes the scheduling ttl used in the relay chain + runtimes, as well as the availability timeout retries. The extrinsics for configuring these two values are also removed. + Deprecates the `ttl` and `max_availability_timeouts` fields of the `HostConfiguration` primitive. + +crates: + - name: polkadot-runtime-parachains + bump: major + - name: polkadot-primitives + bump: major + - name: rococo-runtime + bump: major + - name: westend-runtime + bump: major + - name: polkadot + bump: none diff --git a/prdoc/pr_5502.prdoc b/prdoc/pr_5502.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..ea9972f018705f1b4f0f67f6fb5bf6371c8f3b39 --- /dev/null +++ b/prdoc/pr_5502.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] Add pallet to AH westend' +doc: + - audience: Runtime Dev + description: 'Add pallet-revive to Westend runtime, and configure the runtime to accept Ethereum signed transaction' +crates: +- name: asset-hub-westend-runtime + bump: major diff --git a/prdoc/pr_5686.prdoc b/prdoc/pr_5686.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..3f0da912a34c2ced80b608283b18dd9a495c0111 --- /dev/null +++ b/prdoc/pr_5686.prdoc @@ -0,0 +1,15 @@ +title: "sync: Remove checking of the extrinsics root" + +doc: + - audience: Node Dev + description: | + Remove checking the extrinsics root as part of the sync code. + With the introduction of `system_version` and the possibility to use the `V1` + layout for the trie when calculating the extrinsics root, it would require the + sync code to fetch the runtime version first before knowing which layout to use + when building the extrinsic root. + The extrinsics root is still checked when executing a block on chain. + +crates: + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_5813.prdoc b/prdoc/pr_5813.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e48f29bbfb63f7be51694018014b8bc068a19cac --- /dev/null +++ b/prdoc/pr_5813.prdoc @@ -0,0 +1,18 @@ +title: "build_struct_json_patch macro added" + +doc: + - audience: Runtime Dev + description: | + This PR adds a macro that allows to construct a RuntimeGenesisConfig preset + containing only provided fields, while performing the validation of the + entire struct. + + Related issue: https://github.com/paritytech/polkadot-sdk/issues/5700 + +crates: + - name: frame-support + bump: minor + - name: asset-hub-rococo-runtime + bump: patch + - name: westend-runtime + bump: patch diff --git a/prdoc/pr_5866.prdoc b/prdoc/pr_5866.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..44fffe1d2129e066f19eb0ac2332dbaccbc523a7 --- /dev/null +++ b/prdoc/pr_5866.prdoc @@ -0,0 +1,23 @@ +title: "[pallet-revive] Ethereum JSON-RPC integration" + +doc: + - audience: Runtime Dev + description: | + Related PR: https://github.com/paritytech/revive-ethereum-rpc/pull/5 + + Changes Included: + - A new pallet::call eth_transact. + - A custom UncheckedExtrinsic struct to dispatch unsigned eth_transact calls from an Ethereum JSON-RPC proxy. + - Generated types and traits to support implementing a JSON-RPC Ethereum proxy. +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-mock-network + bump: patch + - name: pallet-revive-uapi + bump: patch + - name: polkadot-sdk + bump: patch + diff --git a/prdoc/pr_5919.prdoc b/prdoc/pr_5919.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..1b48a24a9e28a2c33f44b36a031c1c8fbafbd392 --- /dev/null +++ b/prdoc/pr_5919.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "substrate-offchain: upgrade hyper to v1" + +doc: + - audience: Node Dev + description: | + Bump depencency `hyper` of `substrait-offchain` for http from `0.14` to `1`. + This changed APIs a bit; + - `sc_offchain::Offchainworker::new()` now returns `std::io::Result` (Previously was `Self`) + +crates: + - name: sc-offchain + bump: major + - name: polkadot-service + bump: patch + - name: staging-node-cli + bump: patch diff --git a/prdoc/pr_6022.prdoc b/prdoc/pr_6022.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..804d46af6613b3f92d921bb8861aa1f69524fa28 --- /dev/null +++ b/prdoc/pr_6022.prdoc @@ -0,0 +1,14 @@ +title: '[Coretime chain] Add high assignment count mitigation to testnets' +doc: +- audience: Runtime User + description: | + We can handle a maximum of 28 assignments inside one XCM, while it's possible to have 80 (if a + region is interlaced 79 times). This can be chunked on the coretime chain side but currently the + relay does not support this. This PR truncates the additional assignments on Rococo and Westend + to mitigate this until the relay is fixed. The first 27 assignments are taken, the final 28th is + used to pad with idle to complete the mask. Any other assignments are dropped. +crates: +- name: coretime-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch diff --git a/prdoc/pr_6039.prdoc b/prdoc/pr_6039.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..e14ea8f3e17b8557b4d7e5a5ed8a88faa2cb8123 --- /dev/null +++ b/prdoc/pr_6039.prdoc @@ -0,0 +1,54 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Added Trusted Query API calls." + +doc: + - audience: Runtime Dev + description: | + Added is_trusted_reserve and is_trusted_teleporter API calls to all the runtimes. + Given an asset and a location, they return if the chain trusts that location as a reserve or teleporter for that asset respectively. + You can implement them on your runtime by simply calling a helper function on `pallet-xcm`. + ```rust + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + ``` + + - audience: Runtime User + description: | + There's a new runtime API to check if a chain trust a Location as a reserve or teleporter for a given Asset. + It's implemented in all the relays and system parachains in Westend and Rococo. + +crates: + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: contracts-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor + - name: penpal-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: pallet-xcm + bump: minor + - name: xcm-runtime-apis + bump: minor diff --git a/prdoc/pr_6058.prdoc b/prdoc/pr_6058.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5b99467b413f561b7a63f2048ea55306643e1048 --- /dev/null +++ b/prdoc/pr_6058.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: backpressure `chainhead_v1_follow` + +doc: + - audience: Node Operator + description: | + The RPC endpoint `chainHead_v1_follow` now relies on backpressure + to determine whether or not the subscription should be closed instead of continuing to send more events + to a consumer which can't keep up. + This should significantly improve memory consumption as substrate will be keeping less messages in memory. + +crates: + - name: sc-rpc-spec-v2 + bump: major + - name: sc-rpc + bump: major diff --git a/prdoc/pr_6080.prdoc b/prdoc/pr_6080.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..52ecd58dddde44ae797611da1412472b86efa423 --- /dev/null +++ b/prdoc/pr_6080.prdoc @@ -0,0 +1,22 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Assets in pool with native can be used in query_weight_to_asset_fee in Asset Hubs + +doc: + - audience: Runtime User + description: | + `query_weight_to_asset_fee` now works with assets in a pool with the native asset in both + Westend and Rococo asset hubs. + This means all the information you get from `query_acceptable_payment_assets` can be used + directly in `query_weight_to_asset_fee` to get the correct fees that need to be paid. + +crates: + - name: assets-common + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: emulated-integration-tests-common + bump: minor diff --git a/prdoc/pr_6087.prdoc b/prdoc/pr_6087.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..db083ba645b963b0c9e4e63ad1b6a9143a377465 --- /dev/null +++ b/prdoc/pr_6087.prdoc @@ -0,0 +1,12 @@ +title: Expose private structs in pallet_nfts and pallet_uniques. + +doc: + - audience: Runtime Dev + description: | + PR changes certain structs in pallet_nfts and pallet_uniques into public. It also changes 2 storages (collection & asset metadata) into public in pallet_uniques. + +crates: + - name: pallet-nfts + bump: patch + - name: pallet-uniques + bump: patch diff --git a/prdoc/pr_6088.prdoc b/prdoc/pr_6088.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..93e435bbd458aa0caad21d5c3ac7a08412350dbe --- /dev/null +++ b/prdoc/pr_6088.prdoc @@ -0,0 +1,14 @@ +title: "[pallet-revive] EXTCODEHASH to match EIP-1052" + +doc: + - audience: Runtime Dev + description: | + Update `ext_code_hash` to match [EIP-1052](https://eips.ethereum.org/EIPS/eip-1052) specs. + +crates: + - name: pallet-revive + bump: major + - name: pallet-revive-fixtures + bump: patch + - name: pallet-revive-uapi + bump: major diff --git a/prdoc/pr_6094.prdoc b/prdoc/pr_6094.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..23391c889155adcc236634800bf0acdfc7897714 --- /dev/null +++ b/prdoc/pr_6094.prdoc @@ -0,0 +1,21 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Polkadot OmniNode Docs + +doc: + - audience: ... + description: | + Adds documentation in https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html and rust-docs for polkadot-omni-node project. + +crates: + - name: sp-genesis-builder + bump: patch + - name: pallet-aura + bump: patch + - name: polkadot-omni-node-lib + bump: patch + - name: polkadot-sdk-frame # since the crate is "experimental, we don't need to bump yet." + bump: major + - name: polkadot-omni-node + bump: patch diff --git a/prdoc/pr_6096.prdoc b/prdoc/pr_6096.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..c77c323a2e08cf9c45cca54e24866bc1ed9fc5be --- /dev/null +++ b/prdoc/pr_6096.prdoc @@ -0,0 +1,15 @@ +title: 'pallet-revive: Add stateful address mapping' +doc: +- audience: + - Runtime Dev + description: |- + Fixes #5576 + + This allows contracts to be used with an AccountId32 through normal extrinsics and not only through the eth compat layer. It works by adding a new extrinsic `map_account` that lets people register their AccountId32. +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive-mock-network + bump: patch +- name: pallet-revive + bump: major diff --git a/prdoc/pr_6104.prdoc b/prdoc/pr_6104.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..2b62a68c9f0ea21437e54618b7b3127a938a6019 --- /dev/null +++ b/prdoc/pr_6104.prdoc @@ -0,0 +1,10 @@ +title: "LocalTransactionPool implemented for fork aware transaction pool" + +doc: + - audience: Node Dev + description: | + LocalTransactionPool trait is implemented for fork aware transaction pool. + +crates: + - name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_6129.prdoc b/prdoc/pr_6129.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..61719c213e8d56d1afda4ca2de494239f90f5748 --- /dev/null +++ b/prdoc/pr_6129.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Improved TrustedQueryAPI signatures." + +doc: + - audience: Runtime Dev + description: | + Changed returned type of API methods from `Result` to a typed one: + `type XcmTrustedQueryResult = Result` + +crates: + - name: asset-hub-westend-runtime + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: bridge-hub-westend-runtime + bump: patch + - name: collectives-westend-runtime + bump: patch + - name: contracts-rococo-runtime + bump: patch + - name: coretime-rococo-runtime + bump: patch + - name: coretime-westend-runtime + bump: patch + - name: people-rococo-runtime + bump: patch + - name: people-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch diff --git a/prdoc/pr_6141.prdoc b/prdoc/pr_6141.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d9994ac4f842f7f5ed485e58dae3b441b12b03a9 --- /dev/null +++ b/prdoc/pr_6141.prdoc @@ -0,0 +1,11 @@ +title: Improve `CheckMetadataHash` transaction extension weight and logic + +doc: + - audience: Runtime Dev + description: | + The compilation now panics if the optional compile-time environment variable `RUNTIME_METADATA_HASH` contains an invalid value. + The weight for the `CheckMetadataHash` transaction extension is more accurate as it is almost compile-time. + +crates: +- name: frame-metadata-hash-extension + bump: minor diff --git a/prdoc/pr_6148.prdoc b/prdoc/pr_6148.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..430a58dfefbb509b3b1b9242dbff6017c1227349 --- /dev/null +++ b/prdoc/pr_6148.prdoc @@ -0,0 +1,17 @@ +title: Fix migrations for pallet-xcm +doc: +- audience: Runtime Dev + description: |- + `pallet-xcm` stores some operational data that uses `Versioned*` XCM types. When we add a new XCM version (XV), we deprecate XV-2 and remove XV-3. + This PR extends the existing `MigrateToLatestXcmVersion` to include migration for the `Queries`, `LockedFungibles`, and `RemoteLockedFungibles` storage types. + Additionally, more checks were added to `try_state` for these types. + +crates: +- name: westend-runtime + bump: patch +- name: staging-xcm-builder + bump: none +- name: xcm-runtime-apis + bump: none +- name: pallet-xcm + bump: patch diff --git a/prdoc/pr_6156.prdoc b/prdoc/pr_6156.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..d20324a83a2f3a53ee0b0353fb7def5f16ab7051 --- /dev/null +++ b/prdoc/pr_6156.prdoc @@ -0,0 +1,23 @@ +title: "Use bool::then instead of then_some with function calls" +doc: +- audience: Node Dev + description: |- + Fix misusage of `bool::then_some`. + +crates: +- name: polkadot-omni-node-lib + bump: patch +- name: polkadot-cli + bump: patch +- name: polkadot-collator-protocol + bump: patch +- name: sc-network + bump: patch +- name: sc-network-sync + bump: patch +- name: pallet-contracts-proc-macro + bump: patch +- name: frame-support-procedural + bump: patch +- name: frame-support + bump: patch diff --git a/prdoc/pr_6174.prdoc b/prdoc/pr_6174.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..8aa1c25012b1ad5982ebca880f69b99dc03f245d --- /dev/null +++ b/prdoc/pr_6174.prdoc @@ -0,0 +1,9 @@ +title: '[pallet-revive] fix fixture build path' +doc: +- audience: Runtime Dev + description: "Fix fixture build path" +crates: +- name: pallet-revive-fixtures + bump: patch +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_6192.prdoc b/prdoc/pr_6192.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..cd9255486706a111ce521500113dafd83e2934e8 --- /dev/null +++ b/prdoc/pr_6192.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] fix hardcoded gas in tests' +doc: +- audience: Runtime Dev + description: Fix hardcoded gas limits in tests +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_6205.prdoc b/prdoc/pr_6205.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..0874eb468db4cc96d1d0fa6544f41e547d02bc3c --- /dev/null +++ b/prdoc/pr_6205.prdoc @@ -0,0 +1,8 @@ +title: 'pallet-message-queue: Fix max message size calculation' +doc: +- audience: Runtime Dev + description: |- + The max size of a message should not depend on the weight left in a given execution context. Instead the max message size depends on the service weights configured for the pallet. A message that may does not fit into `on_idle` is not automatically overweight, because it may can be executed successfully in `on_initialize` or in another block in `on_idle` when there is more weight left. +crates: +- name: pallet-message-queue + bump: patch diff --git a/prdoc/pr_6212.prdoc b/prdoc/pr_6212.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..97f522025d1e6d1fae1ded10875843b73fd7249e --- /dev/null +++ b/prdoc/pr_6212.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Added Trusted Query API calls for Westend and Rococo chains" + +doc: + - audience: Runtime Dev + description: | + Added is_trusted_reserve and is_trusted_teleporter API calls to relay chains. + Given an asset and a location, they return if the chain trusts that location as a reserve or teleporter for that asset respectively. + You can implement them on your runtime by simply calling a helper function on `pallet-xcm`. + ```rust + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } + ``` + + - audience: Runtime User + description: | + There's a new runtime API to check if a chain trust a Location as a reserve or teleporter for a given Asset. + It's implemented in all the relays and system parachains in Westend and Rococo. + +crates: + - name: westend-runtime + bump: minor + - name: rococo-runtime + bump: minor diff --git a/prdoc/pr_6218.prdoc b/prdoc/pr_6218.prdoc new file mode 100644 index 0000000000000000000000000000000000000000..5c97c926f238e75ab0bbb316ece8faf3ead472ea --- /dev/null +++ b/prdoc/pr_6218.prdoc @@ -0,0 +1,9 @@ +title: Enable approval-voting-parallel by default on kusama + +doc: + - audience: Node Dev + description: | + Enable approval-voting-parallel by default on kusama +crates: + - name: polkadot-service + bump: patch diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index d73f06c8cd6bb3f8a0814fd797a19da951d51418..d1bbe136ad48f0369a5e4ca9649490eb755842cf 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash export PRODUCT=polkadot -export VERSION=${VERSION:-1.5.0} +export VERSION=${VERSION:-stable2409} export ENGINE=${ENGINE:-podman} export REF1=${REF1:-'HEAD'} export REF2=${REF2} @@ -66,33 +66,21 @@ echo "Changelog ready in $OUTPUT/relnote_commits.md" # Show the files tree -s -h -c $OUTPUT/ -ASSET_HUB_ROCOCO_DIGEST=${ASSET_HUB_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/asset-hub-rococo-srtool-digest.json"} ASSET_HUB_WESTEND_DIGEST=${ASSET_HUB_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/asset-hub-westend-srtool-digest.json"} -BRIDGE_HUB_ROCOCO_DIGEST=${BRIDGE_HUB_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/bridge-hub-rococo-srtool-digest.json"} BRIDGE_HUB_WESTEND_DIGEST=${BRIDGE_HUB_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/bridge-hub-westend-srtool-digest.json"} COLLECTIVES_WESTEND_DIGEST=${COLLECTIVES_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/collectives-westend-srtool-digest.json"} -CONTRACTS_ROCOCO_DIGEST=${CONTRACTS_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/contracts-rococo-srtool-digest.json"} -CORETIME_ROCOCO_DIGEST=${CORETIME_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/coretime-rococo-srtool-digest.json"} CORETIME_WESTEND_DIGEST=${CORETIME_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/coretime-westend-srtool-digest.json"} GLUTTON_WESTEND_DIGEST=${GLUTTON_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/glutton-westend-srtool-digest.json"} -PEOPLE_ROCOCO_DIGEST=${PEOPLE_ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/people-rococo-srtool-digest.json"} PEOPLE_WESTEND_DIGEST=${PEOPLE_WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/people-westend-srtool-digest.json"} -ROCOCO_DIGEST=${ROCOCO_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/rococo-srtool-digest.json"} WESTEND_DIGEST=${WESTEND_DIGEST:-"$PROJECT_ROOT/scripts/release/digests/westend-srtool-digest.json"} jq \ - --slurpfile srtool_asset_hub_rococo $ASSET_HUB_ROCOCO_DIGEST \ --slurpfile srtool_asset_hub_westend $ASSET_HUB_WESTEND_DIGEST \ - --slurpfile srtool_bridge_hub_rococo $BRIDGE_HUB_ROCOCO_DIGEST \ --slurpfile srtool_bridge_hub_westend $BRIDGE_HUB_WESTEND_DIGEST \ --slurpfile srtool_collectives_westend $COLLECTIVES_WESTEND_DIGEST \ - --slurpfile srtool_contracts_rococo $CONTRACTS_ROCOCO_DIGEST \ - --slurpfile srtool_coretime_rococo $CORETIME_ROCOCO_DIGEST\ --slurpfile srtool_coretime_westend $CORETIME_WESTEND_DIGEST \ --slurpfile srtool_glutton_westend $GLUTTON_WESTEND_DIGEST \ - --slurpfile srtool_people_rococ $PEOPLE_ROCOCO_DIGEST \ --slurpfile srtool_people_westend $PEOPLE_WESTEND_DIGEST \ - --slurpfile srtool_rococo $ROCOCO_DIGEST \ --slurpfile srtool_westend $WESTEND_DIGEST \ -n '{ srtool: [ @@ -102,13 +90,7 @@ jq \ { order: 13, name: "Westend Collectives", data: $srtool_collectives_westend[0] }, { order: 14, name: "Westend Coretime", data: $srtool_coretime_westend[0] }, { order: 15, name: "Westend Glutton", data: $srtool_glutton_westend[0] }, - { order: 16, name: "Westend People", data: $srtool_people_westend[0] }, - { order: 17, name: "Rococo", data: $srtool_rococo[0] }, - { order: 18, name: "Rococo AssetHub", data: $srtool_asset_hub_rococo[0] }, - { order: 19, name: "Rococo BridgeHub", data: $srtool_bridge_hub_rococo[0] }, - { order: 20, name: "Rococo Contracts", data: $srtool_contracts_rococo[0] }, - { order: 21, name: "Rococo Coretime", data: $srtool_coretime_rococo[0] }, - { order: 22, name: "Rococo People", data: $srtool_people_rococ[0] } + { order: 16, name: "Westend People", data: $srtool_people_westend[0] } ] }' > "$PROJECT_ROOT/scripts/release/context.json" RELEASE_DIR="$PROJECT_ROOT/scripts/release/" diff --git a/scripts/release/templates/changelog.md.tera b/scripts/release/templates/changelog.md.tera index aaba761e8e47fa567db20c125ed9893c733da5dd..8d17451c8d05474a6d9e46cc25f9b96b7bf28d9c 100644 --- a/scripts/release/templates/changelog.md.tera +++ b/scripts/release/templates/changelog.md.tera @@ -1,4 +1,4 @@ -## Changelog for `{{ env.PRODUCT | capitalize }} v{{ env.VERSION }}` +## Changelog for `{{ env.PRODUCT | capitalize }} {{ env.VERSION }}` {% for file in prdoc | sort(attribute="doc_filename.number") -%} {%- set author= file.content.author | default(value="n/a") -%} diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index ecd384a514563cfb4ecb0e9f364cdf473f5b5cec..ec9eee205cee37b534eca5187ece031daa54dd99 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -33,7 +33,7 @@ pub trait WeightInfo { /// Weights for `{{pallet}}` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{#if (eq pallet "frame_system")}} +{{#if (or (eq pallet "frame_system") (eq pallet "frame_system_extensions"))}} impl WeightInfo for SubstrateWeight { {{else}} impl WeightInfo for SubstrateWeight { diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 2e53c18645f56821a2f2f3a663dad481e5035cf9..933406670e5cc487d781bd61cf6ebf1368730b46 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -46,6 +46,7 @@ futures = { workspace = true } log = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } +subxt-signer = { workspace = true, features = ["unstable-eth"] } # The Polkadot-SDK: polkadot-sdk = { features = [ @@ -56,8 +57,6 @@ polkadot-sdk = { features = [ "generate-bags", "mmr-gadget", "mmr-rpc", - "pallet-contracts-mock-network", - "pallet-revive-mock-network", "pallet-transaction-payment-rpc", "sc-allocator", "sc-authority-discovery", diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index de883d1051f50e1cf8a24cf83eef4f48d771073a..da82729dbec0fde85d28e56bc3069abbbd4c5a41 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -39,6 +39,7 @@ use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed} use sp_consensus::BlockOrigin; use sp_keyring::Sr25519Keyring; use sp_runtime::{ + generic, transaction_validity::{InvalidTransaction, TransactionValidityError}, AccountId32, MultiAddress, OpaqueExtrinsic, }; @@ -120,11 +121,11 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { } fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { - kitchensink_runtime::UncheckedExtrinsic { - signature: None, - function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), - } - .into() + let utx: kitchensink_runtime::UncheckedExtrinsic = generic::UncheckedExtrinsic::new_bare( + kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), + ) + .into(); + utx.into() } fn import_block(client: &FullClient, built: BuiltBlock) { diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index fa4da5c13d4344208c6a51067fbf78e380923a82..412b7f0ba0fca53221af2e0b803e45744d24a5df 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -31,7 +31,7 @@ use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; -use sp_runtime::traits::BlakeTwo256; +use sp_runtime::{generic::ExtrinsicFormat, traits::BlakeTwo256}; use sp_state_machine::TestExternalities as CoreTestExternalities; use staging_node_cli::service::RuntimeExecutor; @@ -146,11 +146,11 @@ fn test_blocks( ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { - signed: None, + format: ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), + format: ExtrinsicFormat::Signed(alice(), tx_ext(i, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 1 * DOLLARS, diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 6cd9adfd7f2586e7e0db6f95ab8d0db15cbfdcf7..362deacba5f30f0d53df027a6aefc89ac337cc59 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -20,6 +20,7 @@ use polkadot_sdk::*; +use crate::chain_spec::{sc_service::Properties, sp_runtime::AccountId32}; use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, Block, MaxNominations, SessionKeys, StakerStatus, }; @@ -291,8 +292,8 @@ fn configure_accounts( usize, Vec<(AccountId, AccountId, Balance, StakerStatus)>, ) { - let mut endowed_accounts: Vec = endowed_accounts - .unwrap_or_else(|| Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect()); + let mut endowed_accounts: Vec = + endowed_accounts.unwrap_or_else(default_endowed_accounts); // endow all authorities and nominators. initial_authorities .iter() @@ -417,12 +418,40 @@ fn development_config_genesis_json() -> serde_json::Value { ) } +fn props() -> Properties { + let mut properties = Properties::new(); + properties.insert("tokenDecimals".to_string(), 12.into()); + properties +} + +fn eth_account(from: subxt_signer::eth::Keypair) -> AccountId32 { + let mut account_id = AccountId32::new([0xEE; 32]); + >::as_mut(&mut account_id)[..20] + .copy_from_slice(&from.account_id().0); + account_id +} + +fn default_endowed_accounts() -> Vec { + Sr25519Keyring::well_known() + .map(|k| k.to_account_id()) + .chain([ + eth_account(subxt_signer::eth::dev::alith()), + eth_account(subxt_signer::eth::dev::baltathar()), + eth_account(subxt_signer::eth::dev::charleth()), + eth_account(subxt_signer::eth::dev::dorothy()), + eth_account(subxt_signer::eth::dev::ethan()), + eth_account(subxt_signer::eth::dev::faith()), + ]) + .collect() +} + /// Development config (single validator Alice). pub fn development_config() -> ChainSpec { ChainSpec::builder(wasm_binary_unwrap(), Default::default()) .with_name("Development") .with_id("dev") .with_chain_type(ChainType::Development) + .with_properties(props()) .with_genesis_config_patch(development_config_genesis_json()) .build() } diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index c0dcacb2e4b451990ea08c0c29fd1f92f89b3eaa..1d7001a5dccfc9474ec0056441153b2604d49458 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -59,6 +59,7 @@ pub enum Subcommand { Inspect(node_inspect::cli::InspectCmd), /// Sub-commands concerned with benchmarking. + /// /// The pallet benchmarking moved to the `pallet` sub-command. #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 4eb1db185e9b441dfac761dd08b376becbecea84..057e0bbdcefc751c2d970633fcbc017d15e0c8ca 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -121,7 +121,7 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = + let tx_ext: kitchensink_runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), @@ -143,7 +143,7 @@ pub fn create_extrinsic( let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), - extra.clone(), + tx_ext.clone(), ( (), kitchensink_runtime::VERSION.spec_version, @@ -158,12 +158,13 @@ pub fn create_extrinsic( ); let signature = raw_payload.using_encoded(|e| sender.sign(e)); - kitchensink_runtime::UncheckedExtrinsic::new_signed( + generic::UncheckedExtrinsic::new_signed( function, sp_runtime::AccountId32::from(sender.public()).into(), kitchensink_runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) + .into() } /// Creates a new partial node. @@ -419,10 +420,12 @@ pub fn new_full_base::Hash>>( let enable_offchain_worker = config.offchain_worker.enabled; let hwbench = (!disable_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(&database_path); - sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(&database_path); + sc_sysinfo::gather_hwbench(Some(database_path), &SUBSTRATE_REFERENCE_HARDWARE) + }) + }) .flatten(); let sc_service::PartialComponents { @@ -787,9 +790,7 @@ pub fn new_full_base::Hash>>( ); if enable_offchain_worker { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), keystore: Some(keystore_container.keystore()), @@ -803,9 +804,11 @@ pub fn new_full_base::Hash>>( custom_extensions: move |_| { vec![Box::new(statement_store.clone().as_statement_store_ext()) as Box<_>] }, - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } @@ -866,7 +869,7 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, + Address, BalancesCall, RuntimeCall, TxExtension, }; use node_primitives::{Block, DigestItem, Signature}; use polkadot_sdk::{sc_transaction_pool_api::MaintainedTransactionPool, *}; @@ -883,7 +886,7 @@ mod tests { use sp_keyring::AccountKeyring; use sp_keystore::KeystorePtr; use sp_runtime::{ - generic::{Digest, Era, SignedPayload}, + generic::{self, Digest, Era, SignedPayload}, key_types::BABE, traits::{Block as BlockT, Header as HeaderT, IdentifyAccount, Verify}, RuntimeAppPublic, @@ -989,7 +992,7 @@ mod tests { sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) .map(|(digest, _)| digest) { - break (babe_pre_digest, epoch_descriptor) + break (babe_pre_digest, epoch_descriptor); } slot += 1; @@ -1070,7 +1073,7 @@ mod tests { pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), ); let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false); - let extra = ( + let tx_ext: TxExtension = ( check_non_zero_sender, check_spec_version, check_tx_version, @@ -1083,7 +1086,7 @@ mod tests { ); let raw_payload = SignedPayload::from_raw( function, - extra, + tx_ext, ( (), spec_version, @@ -1097,10 +1100,18 @@ mod tests { ), ); let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let (function, extra, _) = raw_payload.deconstruct(); + let (function, tx_ext, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) - .into() + let utx: kitchensink_runtime::UncheckedExtrinsic = + generic::UncheckedExtrinsic::new_signed( + function, + from.into(), + signature.into(), + tx_ext, + ) + .into(); + + utx.into() }, ); } diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 037ddbb1e47b573df0e019b25a28c09601d95b83..8f1475fce4f89e9a38945eff6c0cf7847037e04e 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -17,11 +17,11 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}, + dispatch::{DispatchClass, GetDispatchInfo}, traits::Currency, weights::Weight, }; -use frame_system::{self, AccountInfo, EventRecord, Phase}; +use frame_system::{self, AccountInfo, DispatchEventInfo, EventRecord, Phase}; use polkadot_sdk::*; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ @@ -59,17 +59,23 @@ pub fn bloaty_code_unwrap() -> &'static [u8] { /// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic` /// at block `n`, it must be called prior to executing block `n` to do the calculation with the /// correct multiplier. -fn transfer_fee(extrinsic: &E) -> Balance { - TransactionPayment::compute_fee( - extrinsic.encode().len() as u32, - &default_transfer_call().get_dispatch_info(), - 0, - ) +fn transfer_fee(extrinsic: &UncheckedExtrinsic) -> Balance { + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = extrinsic.0.extension_weight(); + TransactionPayment::compute_fee(extrinsic.encode().len() as u32, &info, 0) +} + +/// Default transfer fee, same as `transfer_fee`, but with a weight refund factored in. +fn transfer_fee_with_refund(extrinsic: &UncheckedExtrinsic, weight_refund: Weight) -> Balance { + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = extrinsic.0.extension_weight(); + let post_info = (Some(info.total_weight().saturating_sub(weight_refund)), info.pays_fee).into(); + TransactionPayment::compute_actual_fee(extrinsic.encode().len() as u32, &info, &post_info, 0) } fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(default_transfer_call()), }) } @@ -86,11 +92,11 @@ fn changes_trie_block() -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -113,11 +119,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 69 * DOLLARS, @@ -133,18 +139,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((bob(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(bob(), tx_ext(0, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(1, 0)), function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: bob().into(), value: 15 * DOLLARS, @@ -168,11 +174,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { - signed: Some((alice(), signed_extra(nonce, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(nonce, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], @@ -257,13 +263,14 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -297,13 +304,14 @@ fn successful_execution_with_foreign_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -316,15 +324,18 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); + let extension_weight = xt().0.extension_weight(); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); - let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( + let transfer_weight = default_transfer_call().get_dispatch_info().call_weight.saturating_add( ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic, ); let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight + .call_weight .saturating_add( ::BlockWeights::get() .get(DispatchClass::Mandatory) @@ -334,17 +345,17 @@ fn full_native_block_import_works() { executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, - ..Default::default() + pays_fee: Default::default(), }, }), topics: vec![], @@ -370,21 +381,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees * 2 / 10, + amount: fees_after_refund * 2 / 10, }), topics: vec![], }, @@ -393,7 +404,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -402,7 +413,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -412,15 +427,18 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); + let extension_weight = xt().0.extension_weight(); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees_after_refund); let events = vec![ EventRecord { phase: Phase::Initialization, @@ -433,10 +451,10 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, - ..Default::default() + pays_fee: Default::default(), }, }), topics: vec![], @@ -462,21 +480,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -485,7 +503,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: bob().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -494,7 +512,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -519,21 +541,21 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -542,7 +564,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -551,7 +573,11 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, + dispatch_info: DispatchEventInfo { + weight: transfer_weight + .saturating_add(extension_weight.saturating_sub(weight_refund)), + ..Default::default() + }, }), topics: vec![], }, @@ -567,26 +593,28 @@ fn full_wasm_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let mut fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); }); - fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees_after_refund); }); } @@ -700,11 +728,11 @@ fn deploying_wasm_contract_should_work() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< Runtime, > { @@ -717,7 +745,7 @@ fn deploying_wasm_contract_should_work() { }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, @@ -828,7 +856,8 @@ fn successful_execution_gives_ok() { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); }); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = Weight::zero(); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 @@ -839,7 +868,7 @@ fn successful_execution_gives_ok() { .expect("Extrinsic failed"); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index b49af4c1055cd9774905c0c7f1c44c6f90b25ac3..da9d2662408eb5986ee4c6bfc790880560987814 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -55,11 +55,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)), function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { call: Box::new(RuntimeCall::RootTesting( pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) }, @@ -78,11 +78,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { block1.1, vec![ CheckedExtrinsic { - signed: None, + format: sp_runtime::generic::ExtrinsicFormat::Bare, function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)), function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], @@ -148,7 +148,7 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, tip))), + format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, tip)), function: RuntimeCall::Balances(default_transfer_call()), }); @@ -174,7 +174,9 @@ fn transaction_fee_is_correct() { let length_fee = TransactionByteFee::get() * (xt.clone().encode().len() as Balance); balance_alice -= length_fee; - let weight = default_transfer_call().get_dispatch_info().weight; + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = xt.0.extension_weight(); + let weight = info.total_weight(); let weight_fee = IdentityFee::::weight_to_fee(&weight); // we know that weight to fee multiplier is effect-less in block 1. diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index 18826e7e90a784e2b644b88024a8d83e3e15f3c1..3672432ae3426294479577e83c4bc93cf3ec9cb4 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -23,6 +23,7 @@ use sp_application_crypto::AppCrypto; use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_keyring::sr25519::Keyring::Alice; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; +use sp_runtime::generic; pub mod common; use self::common::*; @@ -44,10 +45,9 @@ fn should_submit_unsigned_transaction() { }; let call = pallet_im_online::Call::heartbeat { heartbeat: heartbeat_data, signature }; - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ) - .unwrap(); + let xt = generic::UncheckedExtrinsic::new_bare(call.into()).into(); + SubmitTransaction::>::submit_transaction(xt) + .unwrap(); assert_eq!(state.read().transactions.len(), 1) }); @@ -131,7 +131,7 @@ fn should_submit_signed_twice_from_the_same_account() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; + let extra = tx.0.preamble.to_signed().unwrap().2; extra.5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); @@ -180,7 +180,7 @@ fn should_submit_signed_twice_from_all_accounts() { // now check that the transaction nonces are not equal let s = state.read(); fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; + let extra = tx.0.preamble.to_signed().unwrap().2; extra.5 } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); @@ -237,7 +237,7 @@ fn submitted_transaction_should_be_valid() { let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); // add balance to the account - let author = extrinsic.signature.clone().unwrap().0; + let author = extrinsic.0.preamble.clone().to_signed().clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() }; diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 6310e16d5a14d4d3e4e3b4053448ba488bb125cb..7acf4294c51bd20cea967fb730ca0d5630128add 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -27,6 +27,7 @@ scale-info = { features = ["derive", "serde"], workspace = true } static_assertions = { workspace = true, default-features = true } log = { workspace = true } serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } +sp-debug-derive = { workspace = true, features = ["force-debug"] } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } @@ -56,6 +57,7 @@ std = [ "primitive-types/std", "scale-info/std", "serde_json/std", + "sp-debug-derive/std", "substrate-wasm-builder", ] runtime-benchmarks = [ diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index ececf0d87b2d28adabe1f58917aa59bc5b8e3294..89b5bf26ce75e8e4c22a3c2c0258d9499e6b40b3 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -76,6 +76,7 @@ use pallet_identity::legacy::IdentityInfo; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; +use pallet_revive::evm::runtime::EthExtra; use pallet_session::historical as pallet_session_historical; // Can't use `FungibleAdapter` here until Treasury pallet migrates to fungibles // @@ -102,8 +103,8 @@ use sp_runtime::{ MaybeConvert, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, FixedU128, Perbill, Percent, Permill, Perquintill, - RuntimeDebug, + ApplyExtrinsicResult, FixedPointNumber, FixedU128, MultiSignature, MultiSigner, Perbill, + Percent, Permill, Perquintill, RuntimeDebug, }; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; @@ -574,6 +575,7 @@ impl pallet_transaction_payment::Config for Runtime { MinimumMultiplier, MaximumMultiplier, >; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_asset_conversion_tx_payment::Config for Runtime { @@ -585,6 +587,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { AssetConversion, ResolveAssetTo, >; + type WeightInfo = pallet_asset_conversion_tx_payment::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; } impl pallet_skip_feeless_payment::Config for Runtime { @@ -1410,7 +1415,7 @@ impl pallet_revive::Config for Runtime { type WeightPrice = pallet_transaction_payment::Pallet; type WeightInfo = pallet_revive::weights::SubstrateWeight; type ChainExtension = (); - type AddressMapper = pallet_revive::DefaultAddressMapper; + type AddressMapper = pallet_revive::AccountId32Mapper; type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>; type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>; type UnsafeUnstableInterface = ConstBool; @@ -1438,16 +1443,29 @@ parameter_types! { pub const MaxPeerInHeartbeats: u32 = 10_000; } +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + generic::UncheckedExtrinsic::new_transaction(call, extension).into() + } +} + impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let tip = 0; // take the biggest period possible. let period = @@ -1458,7 +1476,7 @@ where // so the actual block number is `n`. .saturating_sub(1); let era = Era::mortal(period, current_block); - let extra = ( + let tx_ext: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -1473,15 +1491,27 @@ where ), frame_metadata_hash_extension::CheckMetadataHash::new(false), ); - let raw_payload = SignedPayload::new(call, extra) + + let raw_payload = SignedPayload::new(call, tx_ext) .map_err(|e| { log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) + let (call, tx_ext, _) = raw_payload.deconstruct(); + let transaction = + generic::UncheckedExtrinsic::new_signed(call, address, signature, tx_ext).into(); + Some(transaction) + } +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + generic::UncheckedExtrinsic::new_bare(call).into() } } @@ -1490,12 +1520,12 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; } impl pallet_im_online::Config for Runtime { @@ -1990,6 +2020,30 @@ impl pallet_transaction_storage::Config for Runtime { ConstU32<{ pallet_transaction_storage::DEFAULT_MAX_TRANSACTION_SIZE }>; } +#[cfg(feature = "runtime-benchmarks")] +pub struct VerifySignatureBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_verify_signature::BenchmarkHelper + for VerifySignatureBenchmarkHelper +{ + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (MultiSignature, AccountId) { + use sp_io::crypto::{sr25519_generate, sr25519_sign}; + use sp_runtime::traits::IdentifyAccount; + let public = sr25519_generate(0.into(), None); + let who_account: AccountId = MultiSigner::Sr25519(public).into_account().into(); + let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &public, msg).unwrap()); + (signature, who_account) + } +} + +impl pallet_verify_signature::Config for Runtime { + type Signature = MultiSignature; + type AccountIdentifier = MultiSigner; + type WeightInfo = pallet_verify_signature::weights::SubstrateWeight; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = VerifySignatureBenchmarkHelper; +} + impl pallet_whitelist::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -2530,8 +2584,21 @@ mod runtime { #[runtime::pallet_index(80)] pub type Revive = pallet_revive::Pallet; + + #[runtime::pallet_index(81)] + pub type VerifySignature = pallet_verify_signature::Pallet; } +impl TryFrom for pallet_revive::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Revive(call) => Ok(call), + _ => Err(()), + } + } +} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. @@ -2542,12 +2609,12 @@ pub type Block = generic::Block; pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The TransactionExtension to the basic transaction logic. /// /// When you change this, you **MUST** modify [`sign`] in `bin/node/testing/src/keyring.rs`! /// /// [`sign`]: <../../testing/src/keyring.rs.html> -pub type SignedExtra = ( +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -2562,13 +2629,39 @@ pub type SignedExtra = ( frame_metadata_hash_extension::CheckMetadataHash, ); +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct EthExtraImpl; + +impl EthExtra for EthExtraImpl { + type Config = Runtime; + type Extension = TxExtension; + + fn get_eth_extension(nonce: u32, tip: Balance) -> Self::Extension { + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::from(crate::generic::Era::Immortal), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None) + .into(), + frame_metadata_hash_extension::CheckMetadataHash::::new(false), + ) + } +} + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + pallet_revive::evm::runtime::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -2624,6 +2717,62 @@ mod mmr { pub type Hashing = ::Hashing; } +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + NativeOrWithId, + NativeOrWithId, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter(seed: u32) -> (NativeOrWithId, NativeOrWithId) { + (NativeOrWithId::WithId(seed), NativeOrWithId::WithId(seed)) + } + + fn setup_balances_and_pool(asset_id: NativeOrWithId, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + let NativeOrWithId::WithId(asset_idx) = asset_id.clone() else { unimplemented!() }; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_idx.into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + let _ = Balances::deposit_creating(&lp_provider, ((u64::MAX as u128) * 100).into()); + assert_ok!(Assets::mint_into( + asset_idx.into(), + &lp_provider, + ((u64::MAX as u128) * 100).into() + )); + + let token_native = alloc::boxed::Box::new(NativeOrWithId::Native); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + u64::MAX.into(), // 1 desired + u64::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + #[cfg(feature = "runtime-benchmarks")] mod benches { polkadot_sdk::frame_benchmarking::define_benchmarks!( @@ -2646,6 +2795,8 @@ mod benches { [pallet_example_tasks, TasksExample] [pallet_democracy, Democracy] [pallet_asset_conversion, AssetConversion] + [pallet_asset_conversion_tx_payment, AssetConversionTxPayment] + [pallet_transaction_payment, TransactionPayment] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] @@ -2679,6 +2830,7 @@ mod benches { [pallet_state_trie_migration, StateTrieMigration] [pallet_sudo, Sudo] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_timestamp, Timestamp] [pallet_tips, Tips] [pallet_transaction_storage, TransactionStorage] @@ -2694,6 +2846,7 @@ mod benches { [pallet_safe_mode, SafeMode] [pallet_example_mbm, PalletExampleMbms] [pallet_asset_conversion_ops, AssetConversionMigration] + [pallet_verify_signature, VerifySignature] ); } @@ -3006,6 +3159,38 @@ impl_runtime_apis! { impl pallet_revive::ReviveApi for Runtime { + fn eth_transact( + from: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> pallet_revive::EthContractResult + { + use pallet_revive::AddressMapper; + let blockweights: BlockWeights = ::BlockWeights::get(); + let origin = ::AddressMapper::to_account_id(&from); + + let encoded_size = |pallet_call| { + let call = RuntimeCall::Revive(pallet_call); + let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }; + + Revive::bare_eth_transact( + origin, + dest, + value, + input, + gas_limit.unwrap_or(blockweights.max_block), + storage_deposit_limit.unwrap_or(u128::MAX), + encoded_size, + pallet_revive::DebugInfo::UnsafeDebug, + pallet_revive::CollectEvents::UnsafeCollect, + ) + } + fn call( origin: AccountId, dest: H160, @@ -3013,7 +3198,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_revive::ContractExecResult { + ) -> pallet_revive::ContractResult { Revive::bare_call( RuntimeOrigin::signed(origin), dest, @@ -3034,7 +3219,7 @@ impl_runtime_apis! { code: pallet_revive::Code, data: Vec, salt: Option<[u8; 32]>, - ) -> pallet_revive::ContractInstantiateResult + ) -> pallet_revive::ContractResult { Revive::bare_instantiate( RuntimeOrigin::signed(origin), @@ -3361,6 +3546,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -3385,6 +3571,7 @@ impl_runtime_apis! { use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as EPSBench; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index a5cec856717f676898b883954dafedecf900759f..16112386ad7cbb1894e4c94f350a97abd795794e 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -28,6 +28,7 @@ node-primitives = { workspace = true, default-features = true } kitchensink-runtime = { workspace = true } pallet-asset-conversion = { workspace = true, default-features = true } pallet-assets = { workspace = true, default-features = true } +pallet-revive = { workspace = true, default-features = true } pallet-asset-conversion-tx-payment = { workspace = true, default-features = true } pallet-asset-tx-payment = { workspace = true, default-features = true } pallet-skip-feeless-payment = { workspace = true, default-features = true } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index fb0504f8fadef7eb107de6040d04cf591ca2ec66..3812524f0b1fee86d4001434a121b6deac37613e 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -53,6 +53,7 @@ use sp_core::{ use sp_crypto_hashing::blake2_256; use sp_inherents::InherentData; use sp_runtime::{ + generic::{self, ExtrinsicFormat, Preamble, EXTRINSIC_FORMAT_VERSION}, traits::{Block as BlockT, IdentifyAccount, Verify}, OpaqueExtrinsic, }; @@ -298,10 +299,10 @@ impl<'a> Iterator for BlockContentIterator<'a> { let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some(( + format: ExtrinsicFormat::Signed( sender, - signed_extra(0, kitchensink_runtime::ExistentialDeposit::get() + 1), - )), + tx_ext(0, kitchensink_runtime::ExistentialDeposit::get() + 1), + ), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => RuntimeCall::Balances(BalancesCall::transfer_keep_alive { @@ -565,11 +566,11 @@ impl BenchKeyring { tx_version: u32, genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, @@ -585,12 +586,27 @@ impl BenchKeyring { key.sign(b) } }); - UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + generic::UncheckedExtrinsic { + preamble: Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + 0, + tx_ext, + ), function: payload.0, } + .into() }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { + preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), + function: xt.function, + } + .into(), + ExtrinsicFormat::General(tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(0, tx_ext), + function: xt.function, + } + .into(), } } diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 312fba8319b5c8cab6ae695e59ca9d8b1d388266..20497e85eab976aab2f87beb23a3f6f5107529b8 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -19,12 +19,12 @@ //! Test accounts. use codec::Encode; -use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, TxExtension, UncheckedExtrinsic}; use node_primitives::{AccountId, Balance, Nonce}; use sp_core::{crypto::get_public_from_string_or_panic, ecdsa, ed25519, sr25519}; use sp_crypto_hashing::blake2_256; use sp_keyring::Sr25519Keyring; -use sp_runtime::generic::Era; +use sp_runtime::generic::{self, Era, ExtrinsicFormat, EXTRINSIC_FORMAT_VERSION}; /// Alice's account id. pub fn alice() -> AccountId { @@ -73,7 +73,7 @@ pub fn session_keys_from_seed(seed: &str) -> SessionKeys { } /// Returns transaction extra. -pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { +pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension { ( frame_system::CheckNonZeroSender::new(), frame_system::CheckSpecVersion::new(), @@ -97,11 +97,11 @@ pub fn sign( genesis_hash: [u8; 32], metadata_hash: Option<[u8; 32]>, ) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { + match xt.format { + ExtrinsicFormat::Signed(signed, tx_ext) => { let payload = ( xt.function, - extra.clone(), + tx_ext.clone(), spec_version, tx_version, genesis_hash, @@ -119,11 +119,26 @@ pub fn sign( } }) .into(); - UncheckedExtrinsic { - signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), + generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Signed( + sp_runtime::MultiAddress::Id(signed), + signature, + 0, + tx_ext, + ), function: payload.0, } + .into() }, - None => UncheckedExtrinsic { signature: None, function: xt.function }, + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::Bare(EXTRINSIC_FORMAT_VERSION), + function: xt.function, + } + .into(), + ExtrinsicFormat::General(tx_ext) => generic::UncheckedExtrinsic { + preamble: sp_runtime::generic::Preamble::General(0, tx_ext), + function: xt.function, + } + .into(), } } diff --git a/substrate/client/api/src/notifications/tests.rs b/substrate/client/api/src/notifications/tests.rs index fba829b1cf9020034529b034f52015fb423608d8..9ad7973514b2f8f9a8d9688884c85b039fe2bf1d 100644 --- a/substrate/client/api/src/notifications/tests.rs +++ b/substrate/client/api/src/notifications/tests.rs @@ -18,7 +18,7 @@ use super::*; -use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; +use sp_runtime::testing::{Block as RawBlock, TestXt, H256 as Hash}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -50,7 +50,7 @@ impl PartialEq for StorageChangeSet { } } -type Block = RawBlock>; +type Block = RawBlock>; #[test] fn triggering_change_should_notify_wildcard_listeners() { diff --git a/substrate/client/db/benches/state_access.rs b/substrate/client/db/benches/state_access.rs index e47559e710df1e21dab3d2a57b0c6434dd790289..7ea8e7080184b53dff1e8df17118f26f7538c8a5 100644 --- a/substrate/client/db/benches/state_access.rs +++ b/substrate/client/db/benches/state_access.rs @@ -22,12 +22,12 @@ use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBack use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + testing::{Block as RawBlock, Header, MockCallU64, TestXt}, StateVersion, Storage, }; use tempfile::TempDir; -pub(crate) type Block = RawBlock>; +pub(crate) type Block = RawBlock>; fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { let mut op = db.begin_operation().unwrap(); diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 72707c306f5823b461822a2de395a806b0870ec5..aaa1398a13bcfac3ab0cd93a14113f421def5abd 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2567,7 +2567,7 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + testing::{Block as RawBlock, Header, MockCallU64, TestXt}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, StateVersion, }; @@ -2575,7 +2575,8 @@ pub(crate) mod tests { const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; - pub(crate) type Block = RawBlock>; + type UncheckedXt = TestXt; + pub(crate) type Block = RawBlock; pub fn insert_header( backend: &Backend, @@ -2594,7 +2595,7 @@ pub(crate) mod tests { parent_hash: H256, _changes: Option, Vec)>>, extrinsics_root: H256, - body: Vec>, + body: Vec, transaction_index: Option>, ) -> Result { use sp_runtime::testing::Digest; @@ -3680,7 +3681,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3702,11 +3703,20 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[0]).unwrap()); assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } } @@ -3730,7 +3740,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3739,16 +3749,26 @@ pub(crate) mod tests { } // insert a fork at block 2 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -3758,7 +3778,10 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); @@ -3772,16 +3795,28 @@ pub(crate) mod tests { assert_eq!(None, bc.body(blocks[1]).unwrap()); assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); } else { for i in 0..5 { - assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction((i as u64).into(), ())]), + bc.body(blocks[i]).unwrap() + ); } } if matches!(pruning, BlocksPruning::KeepAll) { - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(fork_hash_root).unwrap() + ); } else { assert_eq!(None, bc.body(fork_hash_root).unwrap()); } @@ -3802,8 +3837,16 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(10), 10); let make_block = |index, parent, val: u64| { - insert_block(&backend, index, parent, None, H256::random(), vec![val.into()], None) - .unwrap() + insert_block( + &backend, + index, + parent, + None, + H256::random(), + vec![UncheckedXt::new_transaction(val.into(), ())], + None, + ) + .unwrap() }; let block_0 = make_block(0, Default::default(), 0x00); @@ -3831,18 +3874,30 @@ pub(crate) mod tests { let bc = backend.blockchain(); assert_eq!(None, bc.body(block_1b).unwrap()); assert_eq!(None, bc.body(block_2b).unwrap()); - assert_eq!(Some(vec![0x00.into()]), bc.body(block_0).unwrap()); - assert_eq!(Some(vec![0x1a.into()]), bc.body(block_1a).unwrap()); - assert_eq!(Some(vec![0x2a.into()]), bc.body(block_2a).unwrap()); - assert_eq!(Some(vec![0x3a.into()]), bc.body(block_3a).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x00.into(), ())]), + bc.body(block_0).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x1a.into(), ())]), + bc.body(block_1a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x2a.into(), ())]), + bc.body(block_2a).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0x3a.into(), ())]), + bc.body(block_3a).unwrap() + ); } #[test] fn indexed_data_block_body() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); let x0_hash = as sp_core::Hasher>::hash(&x0[1..]); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); let index = vec![ @@ -3863,7 +3918,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3885,8 +3943,9 @@ pub(crate) mod tests { fn index_invalid_size() { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); - let x0 = ExtrinsicWrapper::from(0u64).encode(); - let x1 = ExtrinsicWrapper::from(1u64).encode(); + let x0 = UncheckedXt::new_transaction(0.into(), ()).encode(); + let x1 = UncheckedXt::new_transaction(1.into(), ()).encode(); + let x0_hash = as sp_core::Hasher>::hash(&x0[..]); let x1_hash = as sp_core::Hasher>::hash(&x1[..]); let index = vec![ @@ -3907,7 +3966,10 @@ pub(crate) mod tests { Default::default(), None, Default::default(), - vec![0u64.into(), 1u64.into()], + vec![ + UncheckedXt::new_transaction(0.into(), ()), + UncheckedXt::new_transaction(1.into(), ()), + ], Some(index), ) .unwrap(); @@ -3921,7 +3983,7 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - let x1 = ExtrinsicWrapper::from(0u64).encode(); + let x1 = UncheckedXt::new_transaction(0.into(), ()).encode(); let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); for i in 0..10 { let mut index = Vec::new(); @@ -3941,7 +4003,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], Some(index), ) .unwrap(); @@ -3975,7 +4037,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -3990,7 +4052,7 @@ pub(crate) mod tests { blocks[1], None, sp_core::H256::random(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4004,7 +4066,7 @@ pub(crate) mod tests { blocks[0], None, sp_core::H256::random(), - vec![42.into()], + vec![UncheckedXt::new_transaction(42.into(), ())], None, ) .unwrap(); @@ -4478,7 +4540,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4493,7 +4555,10 @@ pub(crate) mod tests { // Check that we can properly access values when there is reference count // but no value. - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); // Block 1 gets pinned three times backend.pin_block(blocks[1]).unwrap(); @@ -4510,27 +4575,42 @@ pub(crate) mod tests { // Block 0, 1, 2, 3 are pinned, so all values should be cached. // Block 4 is inside the pruning window, its value is in db. - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(1))), bc.justifications(blocks[1]).unwrap() ); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(2))), bc.justifications(blocks[2]).unwrap() ); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(3))), bc.justifications(blocks[3]).unwrap() ); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4561,7 +4641,10 @@ pub(crate) mod tests { assert!(bc.justifications(blocks[1]).unwrap().is_none()); // Block 4 is inside the pruning window and still kept - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() @@ -4569,9 +4652,16 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 -> 5 - let hash = - insert_block(&backend, 5, prev_hash, None, Default::default(), vec![5.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 5, + prev_hash, + None, + Default::default(), + vec![UncheckedXt::new_transaction(5.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); backend.pin_block(blocks[4]).unwrap(); @@ -4586,12 +4676,18 @@ pub(crate) mod tests { assert!(bc.body(blocks[2]).unwrap().is_none()); assert!(bc.body(blocks[3]).unwrap().is_none()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); assert_eq!( Some(Justifications::from(build_justification(4))), bc.justifications(blocks[4]).unwrap() ); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); backend.unpin_block(blocks[4]); @@ -4601,9 +4697,16 @@ pub(crate) mod tests { // Append a justification to block 5. backend.append_justification(blocks[5], ([0, 0, 0, 1], vec![42])).unwrap(); - let hash = - insert_block(&backend, 6, blocks[5], None, Default::default(), vec![6.into()], None) - .unwrap(); + let hash = insert_block( + &backend, + 6, + blocks[5], + None, + Default::default(), + vec![UncheckedXt::new_transaction(6.into(), ())], + None, + ) + .unwrap(); blocks.push(hash); // Pin block 5 so it gets loaded into the cache on prune @@ -4616,7 +4719,10 @@ pub(crate) mod tests { op.mark_finalized(blocks[6], None).unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(5.into(), ())]), + bc.body(blocks[5]).unwrap() + ); assert!(bc.header(blocks[5]).ok().flatten().is_some()); let mut expected = Justifications::from(build_justification(5)); expected.append(([0, 0, 0, 1], vec![42])); @@ -4638,7 +4744,7 @@ pub(crate) mod tests { prev_hash, None, Default::default(), - vec![i.into()], + vec![UncheckedXt::new_transaction(i.into(), ())], None, ) .unwrap(); @@ -4654,16 +4760,26 @@ pub(crate) mod tests { // Block tree: // 0 -> 1 -> 2 -> 3 -> 4 // \ -> 2 -> 3 - let fork_hash_root = - insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) - .unwrap(); + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + H256::random(), + vec![UncheckedXt::new_transaction(2.into(), ())], + None, + ) + .unwrap(); let fork_hash_3 = insert_block( &backend, 3, fork_hash_root, None, H256::random(), - vec![3.into(), 11.into()], + vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()), + ], None, ) .unwrap(); @@ -4684,14 +4800,35 @@ pub(crate) mod tests { } let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(0.into(), ())]), + bc.body(blocks[0]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(1.into(), ())]), + bc.body(blocks[1]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(2.into(), ())]), + bc.body(blocks[2]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(3.into(), ())]), + bc.body(blocks[3]).unwrap() + ); + assert_eq!( + Some(vec![UncheckedXt::new_transaction(4.into(), ())]), + bc.body(blocks[4]).unwrap() + ); // Check the fork hashes. assert_eq!(None, bc.body(fork_hash_root).unwrap()); - assert_eq!(Some(vec![3.into(), 11.into()]), bc.body(fork_hash_3).unwrap()); + assert_eq!( + Some(vec![ + UncheckedXt::new_transaction(3.into(), ()), + UncheckedXt::new_transaction(11.into(), ()) + ]), + bc.body(fork_hash_3).unwrap() + ); // Unpin all blocks, except the forked one. for block in &blocks { diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index 0b591c967e6019339e5970e4f94ae5e89abded82..a79f5ab3ac7d91854219d70f69482c70766598b3 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -613,14 +613,16 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { mod tests { use super::*; use codec::Input; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - type Block = RawBlock>; + use sp_runtime::testing::{Block as RawBlock, MockCallU64, TestXt}; + + pub type UncheckedXt = TestXt; + type Block = RawBlock; #[cfg(feature = "rocksdb")] #[test] fn database_type_subdir_migration() { use std::path::PathBuf; - type Block = RawBlock>; + type Block = RawBlock; fn check_dir_for_db_type( db_type: DatabaseType, diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index ac3f7a1b8c74c24fbccf44906f97249739bee247..7649c8cc63700585ab6eec932088308f190c1863 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -549,7 +549,7 @@ mod tests { }; use sc_network_types::multiaddr::Multiaddr; use sp_runtime::{ - testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + testing::{Block as RawBlock, MockCallU64, TestXt, H256}, traits::NumberFor, }; use std::{ @@ -558,7 +558,7 @@ mod tests { sync::{Arc, Mutex}, }; - type Block = RawBlock>; + type Block = RawBlock>; macro_rules! push_msg { ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 86c66c22701cc9af68236494a40b26d9df639276..49e0797c126c5a2adfce1e5400105a806f9cd063 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -648,7 +648,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { let mut list: LinkedHashSet<_> = self .permanent_addresses .iter() - .filter_map(|(p, a)| (*p == peer_id).then_some(a.clone())) + .filter_map(|(p, a)| (*p == peer_id).then(|| a.clone())) .collect(); if let Some(ephemeral_addresses) = self.ephemeral_addresses.get(&peer_id) { diff --git a/substrate/client/network/sync/src/blocks.rs b/substrate/client/network/sync/src/blocks.rs index af88c5245dcb0a4cddac6087abd07c073e97d241..eedba18bebe317d824fd66070325ab9bfcbbebdf 100644 --- a/substrate/client/network/sync/src/blocks.rs +++ b/substrate/client/network/sync/src/blocks.rs @@ -265,9 +265,9 @@ mod test { use sc_network_common::sync::message; use sc_network_types::PeerId; use sp_core::H256; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::testing::{Block as RawBlock, MockCallU64, TestXt}; - type Block = RawBlock>; + type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { bc.blocks.is_empty() && bc.peer_requests.is_empty() diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs index b0e28d00f64ac5608e395939d8e51b72898b1a9c..202033e8e00afc42cd3369baf6e92cd6a9716c0c 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync.rs @@ -42,7 +42,6 @@ use crate::{ LOG_TARGET, }; -use codec::Encode; use log::{debug, error, info, trace, warn}; use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider}; @@ -57,8 +56,7 @@ use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{BlockOrigin, BlockStatus}; use sp_runtime::{ traits::{ - Block as BlockT, CheckedSub, Hash, HashingFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, + Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, EncodedJustification, Justifications, }; @@ -2305,24 +2303,6 @@ pub fn validate_blocks( return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); } } - if let (Some(header), Some(body)) = (&b.header, &b.body) { - let expected = *header.extrinsics_root(); - let got = HashingFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - sp_runtime::StateVersion::V0, - ); - if expected != got { - debug!( - target: LOG_TARGET, - "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", - b.hash, - peer_id, - expected, - got, - ); - return Err(BadPeer(*peer_id, rep::BAD_BLOCK)); - } - } } Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number())) diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs index a04ab8be4fea5b182da5fa855b18271833f70b20..d69ab3e2d535e9605d227316b0faa02b87952d46 100644 --- a/substrate/client/network/sync/src/strategy/state.rs +++ b/substrate/client/network/sync/src/strategy/state.rs @@ -173,7 +173,7 @@ impl StateStrategy { peer_id: PeerId, announce: &BlockAnnounce, ) -> Option<(B::Hash, NumberFor)> { - is_best.then_some({ + is_best.then(|| { let best_number = *announce.header.number(); let best_hash = announce.header.hash(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs index cce6a93caf43dc3e2b31ff33f6b76fe422732d8c..0c71dd3c6aeeea59470f30b4dbe2176541d81285 100644 --- a/substrate/client/network/sync/src/strategy/warp.rs +++ b/substrate/client/network/sync/src/strategy/warp.rs @@ -301,7 +301,7 @@ where peer_id: PeerId, announce: &BlockAnnounce, ) -> Option<(B::Hash, NumberFor)> { - is_best.then_some({ + is_best.then(|| { let best_number = *announce.header.number(); let best_hash = announce.header.hash(); if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index bbbe7018d1060e5e87ed94dca9b1afabab978cc9..71b40211e1263e7ad808e7037191fee26a0e4909 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -22,15 +22,15 @@ codec = { features = ["derive"], workspace = true, default-features = true } fnv = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } -hyperv14 = { features = [ - "http2", - "stream", -], workspace = true, default-features = true } -hyper-rustls = { features = ["http2"], workspace = true } +http-body-util = { workspace = true } +hyper = { features = ["http1", "http2"], workspace = true, default-features = true } +hyper-rustls = { workspace = true } +hyper-util = { features = ["client-legacy", "http1", "http2"], workspace = true } num_cpus = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } +rustls = { workspace = true } threadpool = { workspace = true } tracing = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs index 19ccdbcf498f4a7a53cc77cf1936da584238076d..a5981f14c093ce3c41a9e97e7e3331b6f2ff351e 100644 --- a/substrate/client/offchain/src/api.rs +++ b/substrate/client/offchain/src/api.rs @@ -326,7 +326,7 @@ mod tests { fn offchain_api() -> (Api, AsyncApi) { sp_tracing::try_init_simple(); let mock = Arc::new(TestNetwork()); - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); AsyncApi::new(mock, false, shared_client) } diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index 73407b1359d772cb28071fa0ba2f6e35b8b47f00..56f5c0230094eafe9655d87b06d6fa8b446ceae1 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -27,14 +27,14 @@ //! (i.e.: the socket should continue being processed) in the background even if the runtime isn't //! actively calling any function. -use hyperv14 as hyper; - use crate::api::timestamp; use bytes::buf::{Buf, Reader}; use fnv::FnvHashMap; use futures::{channel::mpsc, future, prelude::*}; -use hyper::{client, Body, Client as HyperClient}; +use http_body_util::{combinators::BoxBody, StreamBody}; +use hyper::body::Body as _; use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder}; +use hyper_util::{client::legacy as client, rt::TokioExecutor}; use once_cell::sync::Lazy; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; @@ -48,21 +48,26 @@ use std::{ const LOG_TARGET: &str = "offchain-worker::http"; +pub type Body = BoxBody; + +type Sender = mpsc::Sender, hyper::Error>>; +type Receiver = mpsc::Receiver, hyper::Error>>; + +type HyperClient = client::Client, Body>; +type LazyClient = Lazy HyperClient + Send>>; + /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] -pub struct SharedClient(Arc, Body>>>); +pub struct SharedClient(Arc); impl SharedClient { - pub fn new() -> Self { - Self(Arc::new(Lazy::new(|| { - let connector = HttpsConnectorBuilder::new() - .with_native_roots() - .https_or_http() - .enable_http1() - .enable_http2() - .build(); - HyperClient::builder().build(connector) - }))) + pub fn new() -> std::io::Result { + let builder = HttpsConnectorBuilder::new() + .with_provider_and_native_roots(rustls::crypto::ring::default_provider())?; + Ok(Self(Arc::new(Lazy::new(Box::new(|| { + let connector = builder.https_or_http().enable_http1().enable_http2().build(); + client::Client::builder(TokioExecutor::new()).build(connector) + }))))) } } @@ -105,23 +110,23 @@ pub struct HttpApi { /// One active request within `HttpApi`. enum HttpApiRequest { /// The request object is being constructed locally and not started yet. - NotDispatched(hyper::Request, hyper::body::Sender), + NotDispatched(hyper::Request, Sender), /// The request has been dispatched and we're in the process of sending out the body (if the /// field is `Some`) or waiting for a response (if the field is `None`). - Dispatched(Option), + Dispatched(Option), /// Received a response. Response(HttpApiRequestRp), /// A request has been dispatched but the worker notified us of an error. We report this /// failure to the user as an `IoError` and remove the request from the list as soon as /// possible. - Fail(hyper::Error), + Fail(client::Error), } /// A request within `HttpApi` that has received a response. struct HttpApiRequestRp { /// We might still be writing the request's body when the response comes. /// This field allows to continue writing that body. - sending_body: Option, + sending_body: Option, /// Status code of the response. status_code: hyper::StatusCode, /// Headers of the response. @@ -132,7 +137,7 @@ struct HttpApiRequestRp { /// Elements extracted from the channel are first put into `current_read_chunk`. /// If the channel produces an error, then that is translated into an `IoError` and the request /// is removed from the list. - body: stream::Fuse>>, + body: stream::Fuse, /// Chunk that has been extracted from the channel and that is currently being read. /// Reading data from the response should read from this field in priority. current_read_chunk: Option>, @@ -144,7 +149,9 @@ impl HttpApi { // Start by building the prototype of the request. // We do this first so that we don't touch anything in `self` if building the prototype // fails. - let (body_sender, body) = hyper::Body::channel(); + let (body_sender, receiver) = mpsc::channel(0); + let body = StreamBody::new(receiver); + let body = BoxBody::new(body); let mut request = hyper::Request::new(body); *request.method_mut() = hyper::Method::from_bytes(method.as_bytes()).map_err(|_| ())?; *request.uri_mut() = hyper::Uri::from_maybe_shared(uri.to_owned()).map_err(|_| ())?; @@ -158,7 +165,7 @@ impl HttpApi { target: LOG_TARGET, "Overflow in offchain worker HTTP request ID assignment" ); - return Err(()) + return Err(()); }, }; self.requests @@ -213,7 +220,7 @@ impl HttpApi { // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` // (if the body has been written), or `DeadlineReached`, or `IoError`. // If `IoError` is returned, don't forget to remove the request from the list. - let mut poll_sender = move |sender: &mut hyper::body::Sender| -> Result<(), HttpError> { + let mut poll_sender = move |sender: &mut Sender| -> Result<(), HttpError> { let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); match when_ready { @@ -221,12 +228,15 @@ impl HttpApi { future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, }; futures::executor::block_on( - sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), + async { + future::poll_fn(|cx| sender.poll_ready(cx)).await?; + sender.start_send(Ok(hyper::body::Frame::data(hyper::body::Bytes::from(chunk.to_owned())))) + } ) .map_err(|_| { tracing::error!(target: "offchain-worker::http", "HTTP sender refused data despite being ready"); @@ -250,13 +260,13 @@ impl HttpApi { match poll_sender(&mut sender) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); - return other + return other; }, } } else { @@ -265,7 +275,7 @@ impl HttpApi { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); - return Ok(()) + return Ok(()); } }, @@ -281,13 +291,13 @@ impl HttpApi { ) { Err(HttpError::IoError) => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, other => { tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Response(response)); - return other + return other; }, } } else { @@ -302,7 +312,7 @@ impl HttpApi { ..response }), ); - return Ok(()) + return Ok(()); } }, @@ -311,7 +321,7 @@ impl HttpApi { // If the request has already failed, return without putting back the request // in the list. - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, v @ HttpApiRequest::Dispatched(None) | @@ -320,7 +330,7 @@ impl HttpApi { // We have already finished sending this body. self.requests.insert(request_id, v); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, } } @@ -340,7 +350,7 @@ impl HttpApi { Some(HttpApiRequest::Dispatched(sending_body)) | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); - continue + continue; }, _ => continue, }; @@ -405,7 +415,7 @@ impl HttpApi { }, } } - return output + return output; } } @@ -418,7 +428,7 @@ impl HttpApi { msg } else { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); - continue + continue; } }; @@ -458,7 +468,7 @@ impl HttpApi { None => { tracing::error!(target: "offchain-worker::http", "Worker has crashed"); - return ids.iter().map(|_| HttpRequestStatus::IoError).collect() + return ids.iter().map(|_| HttpRequestStatus::IoError).collect(); }, } } @@ -498,14 +508,14 @@ impl HttpApi { // and we still haven't received a response. Some(rq @ HttpApiRequest::Dispatched(_)) => { self.requests.insert(request_id, rq); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); }, // The request has failed. Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); - return Err(HttpError::Invalid) + return Err(HttpError::Invalid); }, None => return Err(HttpError::Invalid), }; @@ -526,12 +536,12 @@ impl HttpApi { ..response }), ); - return Ok(n) + return Ok(n); }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. tracing::error!(target: "offchain-worker::http", "Failed to read from current read chunk: {:?}", err); - return Err(HttpError::IoError) + return Err(HttpError::IoError); }, } } @@ -544,7 +554,10 @@ impl HttpApi { if let future::MaybeDone::Done(next_body) = next_body { match next_body { - Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), + Some(Ok(chunk)) => + if let Ok(chunk) = chunk.into_data() { + response.current_read_chunk = Some(chunk.reader()); + }, Some(Err(_)) => return Err(HttpError::IoError), None => return Ok(0), // eof } @@ -552,7 +565,7 @@ impl HttpApi { if let future::MaybeDone::Done(_) = deadline { self.requests.insert(request_id, HttpApiRequest::Response(response)); - return Err(HttpError::DeadlineReached) + return Err(HttpError::DeadlineReached); } } } @@ -587,7 +600,7 @@ enum ApiToWorker { /// ID to send back when the response comes back. id: HttpRequestId, /// Request to start executing. - request: hyper::Request, + request: hyper::Request, }, } @@ -608,14 +621,14 @@ enum WorkerToApi { /// the next item. /// Can also be used to send an error, in case an error happened on the HTTP socket. After /// an error is sent, the channel will close. - body: mpsc::Receiver>, + body: Receiver, }, /// A request has failed because of an error. The request is then no longer valid. Fail { /// The ID that was passed to the worker. id: HttpRequestId, /// Error that happened. - error: hyper::Error, + error: client::Error, }, } @@ -626,7 +639,7 @@ pub struct HttpWorker { /// Used to receive messages from the `HttpApi`. from_api: TracingUnboundedReceiver, /// The engine that runs HTTP requests. - http_client: Arc, Body>>>, + http_client: Arc, /// HTTP requests that are being worked on by the engine. requests: Vec<(HttpRequestId, HttpWorkerRequest)>, } @@ -634,13 +647,13 @@ pub struct HttpWorker { /// HTTP request being processed by the worker. enum HttpWorkerRequest { /// Request has been dispatched and is waiting for a response from the Internet. - Dispatched(hyper::client::ResponseFuture), + Dispatched(client::ResponseFuture), /// Progressively reading the body of the response and sending it to the channel. ReadBody { /// Body to read `Chunk`s from. Only used if the channel is ready to accept data. - body: hyper::Body, + body: Body, /// Channel to the [`HttpApi`] where we send the chunks to. - tx: mpsc::Sender>, + tx: Sender, }, } @@ -663,12 +676,12 @@ impl Future for HttpWorker { let response = match Future::poll(Pin::new(&mut future), cx) { Poll::Pending => { me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - continue + continue; }, Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue // don't insert the request back + continue; // don't insert the request back }, }; @@ -684,9 +697,12 @@ impl Future for HttpWorker { body: body_rx, }); - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); + me.requests.push(( + id, + HttpWorkerRequest::ReadBody { body: Body::new(body), tx: body_tx }, + )); cx.waker().wake_by_ref(); // reschedule in order to poll the new future - continue + continue; }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { @@ -697,12 +713,11 @@ impl Future for HttpWorker { Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - continue + continue; }, } - // `tx` is ready. Read a chunk from the socket and send it to the channel. - match Stream::poll_next(Pin::new(&mut body), cx) { + match Pin::new(&mut body).poll_frame(cx) { Poll::Ready(Some(Ok(chunk))) => { let _ = tx.start_send(Ok(chunk)); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); @@ -762,19 +777,22 @@ mod tests { }; use crate::api::timestamp; use core::convert::Infallible; - use futures::{future, StreamExt}; + use futures::future; + use http_body_util::BodyExt; use sp_core::offchain::{Duration, Externalities, HttpError, HttpRequestId, HttpRequestStatus}; use std::sync::LazyLock; // Using LazyLock to avoid spawning lots of different SharedClients, // as spawning a SharedClient is CPU-intensive and opens lots of fds. - static SHARED_CLIENT: LazyLock = LazyLock::new(|| SharedClient::new()); + static SHARED_CLIENT: LazyLock = LazyLock::new(|| SharedClient::new().unwrap()); // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP // server that runs in the background as well. macro_rules! build_api_server { () => { - build_api_server!(hyper::Response::new(hyper::Body::from("Hello World!"))) + build_api_server!(hyper::Response::new(http_body_util::Full::new( + hyper::body::Bytes::from("Hello World!") + ))) }; ( $response:expr ) => {{ let hyper_client = SHARED_CLIENT.clone(); @@ -785,21 +803,32 @@ mod tests { let rt = tokio::runtime::Runtime::new().unwrap(); let worker = rt.spawn(worker); let server = rt.spawn(async move { - let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( - hyper::service::make_service_fn(|_| async move { - Ok::<_, Infallible>(hyper::service::service_fn( - move |req: hyper::Request| async move { - // Wait until the complete request was received and processed, - // otherwise the tests are flaky. - let _ = req.into_body().collect::>().await; - - Ok::<_, Infallible>($response) - }, - )) - }), - ); - let _ = addr_tx.send(server.local_addr()); - server.await.map_err(drop) + let addr = std::net::SocketAddr::from(([127, 0, 0, 1], 0)); + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + let _ = addr_tx.send(listener.local_addr().unwrap()); + loop { + let (stream, _) = listener.accept().await.unwrap(); + let io = hyper_util::rt::TokioIo::new(stream); + tokio::task::spawn(async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection( + io, + hyper::service::service_fn( + move |req: hyper::Request| async move { + // Wait until the complete request was received and + // processed, otherwise the tests are flaky. + let _ = req.into_body().collect().await; + + Ok::<_, Infallible>($response) + }, + ), + ) + .await + { + eprintln!("Error serving connection: {:?}", err); + } + }); + } }); let _ = rt.block_on(future::join(worker, server)); }); @@ -839,7 +868,7 @@ mod tests { let (mut api, addr) = build_api_server!(hyper::Response::builder() .version(hyper::Version::HTTP_2) - .body(hyper::Body::from("Hello World!")) + .body(http_body_util::Full::new(hyper::body::Bytes::from("Hello World!"))) .unwrap()); let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -1097,7 +1126,7 @@ mod tests { #[test] fn shared_http_client_is_only_initialized_on_access() { - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); { let mock = Arc::new(TestNetwork()); @@ -1112,7 +1141,7 @@ mod tests { // Check that the http client wasn't initialized, because it wasn't used. assert!(Lazy::into_value(Arc::try_unwrap(shared_client.0).unwrap()).is_err()); - let shared_client = SharedClient::new(); + let shared_client = SharedClient::new().unwrap(); { let mock = Arc::new(TestNetwork()); diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 3d5728aad17deededa8ece937c356e14e0486176..b0a7a66520b7ceb89345a3bc0d738f83e05db41e 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -153,14 +153,14 @@ impl OffchainWorkers { enable_http_requests, custom_extensions, }: OffchainWorkerOptions, - ) -> Self { - Self { + ) -> std::io::Result { + Ok(Self { runtime_api_provider, thread_pool: Mutex::new(ThreadPool::with_name( "offchain-worker".into(), num_cpus::get(), )), - shared_http_client: api::SharedClient::new(), + shared_http_client: api::SharedClient::new()?, enable_http_requests, keystore, offchain_db: offchain_db.map(OffchainDb::new), @@ -168,7 +168,7 @@ impl OffchainWorkers { is_validator, network_provider, custom_extensions: Box::new(custom_extensions), - } + }) } } @@ -466,7 +466,8 @@ mod tests { is_validator: false, enable_http_requests: false, custom_extensions: |_| Vec::new(), - }); + }) + .unwrap(); futures::executor::block_on(offchain.on_block_imported(&header)); // then diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a88e7f2a0b3a40269d41459591af9842029d9dd4..61eb47d1b9abac4c2baa27ea13347adc977946c2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -75,6 +75,8 @@ pub struct ChainHeadConfig { pub max_lagging_distance: usize, /// The maximum number of `chainHead_follow` subscriptions per connection. pub max_follow_subscriptions_per_connection: usize, + /// The maximum number of pending messages per subscription. + pub subscription_buffer_cap: usize, } /// Maximum pinned blocks across all connections. @@ -107,6 +109,7 @@ impl Default for ChainHeadConfig { subscription_max_ongoing_operations: MAX_ONGOING_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, } } } @@ -126,6 +129,8 @@ pub struct ChainHead, Block: BlockT, Client> { max_lagging_distance: usize, /// Phantom member to pin the block type. _phantom: PhantomData, + /// The maximum number of pending messages per subscription. + subscription_buffer_cap: usize, } impl, Block: BlockT, Client> ChainHead { @@ -148,6 +153,7 @@ impl, Block: BlockT, Client> ChainHead { backend, ), max_lagging_distance: config.max_lagging_distance, + subscription_buffer_cap: config.subscription_buffer_cap, _phantom: PhantomData, } } @@ -196,6 +202,7 @@ where let backend = self.backend.clone(); let client = self.client.clone(); let max_lagging_distance = self.max_lagging_distance; + let subscription_buffer_cap = self.subscription_buffer_cap; let fut = async move { // Ensure the current connection ID has enough space to accept a new subscription. @@ -231,6 +238,7 @@ where with_runtime, sub_id.clone(), max_lagging_distance, + subscription_buffer_cap, ); let result = chain_head_follow.generate_events(sink, sub_data).await; if let Err(SubscriptionManagementError::BlockDistanceTooLarge) = result { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index f2326f0156779c5f8544d37cdbce81d70d4a2105..e9975b36b4a10cd33af6d4aff544f6b75a4adc4d 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -28,9 +28,8 @@ use crate::chain_head::{ }; use futures::{ channel::oneshot, - stream::{self, Stream, StreamExt}, + stream::{self, Stream, StreamExt, TryStreamExt}, }; -use futures_util::future::Either; use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, @@ -74,6 +73,8 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. max_lagging_distance: usize, + /// The maximum number of pending messages per subscription. + pub subscription_buffer_cap: usize, } struct AnnouncedBlocks { @@ -148,6 +149,7 @@ impl, Block: BlockT, Client> ChainHeadFollower Self { Self { client, @@ -161,6 +163,7 @@ impl, Block: BlockT, Client> ChainHeadFollower( &mut self, startup_point: &StartupPoint, - mut stream: EventStream, + stream: EventStream, sink: Subscription, rx_stop: oneshot::Receiver<()>, ) -> Result<(), SubscriptionManagementError> where - EventStream: Stream> + Unpin, + EventStream: Stream> + Unpin + Send, { - let mut stream_item = stream.next(); - - // The stop event can be triggered by the chainHead logic when the pinned - // block guarantee cannot be hold. Or when the client is disconnected. - let connection_closed = sink.closed(); - tokio::pin!(connection_closed); - let mut stop_event = futures_util::future::select(rx_stop, connection_closed); - - while let Either::Left((Some(event), next_stop_event)) = - futures_util::future::select(stream_item, stop_event).await - { - let events = match event { - NotificationType::InitialEvents(events) => Ok(events), - NotificationType::NewBlock(notification) => - self.handle_import_blocks(notification, &startup_point), - NotificationType::Finalized(notification) => - self.handle_finalized_blocks(notification, &startup_point), - NotificationType::MethodResponse(notification) => Ok(vec![notification]), - }; + let buffer_cap = self.subscription_buffer_cap; + // create a channel to propagate error messages + let mut handle_events = |event| match event { + NotificationType::InitialEvents(events) => Ok(events), + NotificationType::NewBlock(notification) => + self.handle_import_blocks(notification, &startup_point), + NotificationType::Finalized(notification) => + self.handle_finalized_blocks(notification, &startup_point), + NotificationType::MethodResponse(notification) => Ok(vec![notification]), + }; - let events = match events { - Ok(events) => events, - Err(err) => { - debug!( - target: LOG_TARGET, - "[follow][id={:?}] Failed to handle stream notification {:?}", - self.sub_id, - err - ); - _ = sink.send(&FollowEvent::::Stop).await; - return Err(err) - }, - }; + let stream = stream + .map(|event| handle_events(event)) + .map_ok(|items| stream::iter(items).map(Ok)) + .try_flatten(); + + tokio::pin!(stream); + + let sink_future = + sink.pipe_from_try_stream(stream, sc_rpc::utils::BoundedVecDeque::new(buffer_cap)); - for event in events { - if let Err(err) = sink.send(&event).await { - // Failed to submit event. + let result = tokio::select! { + _ = rx_stop => Ok(()), + result = sink_future => { + if let Err(ref e) = result { debug!( target: LOG_TARGET, - "[follow][id={:?}] Failed to send event {:?}", self.sub_id, err + "[follow][id={:?}] Failed to handle stream notification {:?}", + &self.sub_id, + e ); - - let _ = sink.send(&FollowEvent::::Stop).await; - // No need to propagate this error further, the client disconnected. - return Ok(()) - } + }; + result } - - stream_item = stream.next(); - stop_event = next_stop_event; - } - - // If we got here either: - // - the substrate streams have closed - // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) - // - the client disconnected. + }; let _ = sink.send(&FollowEvent::::Stop).await; - Ok(()) + result } /// Generate the block events for the `chainHead_follow` method. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 0c2486157bd033b829079afae86ca262f6742c67..c505566d887dba429c493cca3f32c4e1fc2ba9ac 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -44,7 +44,7 @@ use sp_core::{ use sp_runtime::traits::Block as BlockT; use sp_version::RuntimeVersion; use std::{ - collections::{HashMap, HashSet}, + collections::{HashMap, HashSet, VecDeque}, fmt::Debug, sync::Arc, time::Duration, @@ -86,6 +86,7 @@ pub async fn run_server() -> std::net::SocketAddr { subscription_max_ongoing_operations: MAX_OPERATIONS, max_follow_subscriptions_per_connection: 1, max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -147,6 +148,7 @@ async fn setup_api() -> ( subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -254,6 +256,7 @@ async fn follow_subscription_produces_blocks() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -323,6 +326,7 @@ async fn follow_with_runtime() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -631,6 +635,7 @@ async fn call_runtime_without_flag() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1290,6 +1295,7 @@ async fn separate_operation_ids_for_subscriptions() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1376,6 +1382,7 @@ async fn follow_generates_initial_blocks() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1532,6 +1539,7 @@ async fn follow_exceeding_pinned_blocks() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1609,6 +1617,7 @@ async fn follow_with_unpin() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1715,6 +1724,7 @@ async fn unpin_duplicate_hashes() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1818,6 +1828,7 @@ async fn follow_with_multiple_unpin_hashes() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -1963,6 +1974,7 @@ async fn follow_prune_best_block() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2149,6 +2161,7 @@ async fn follow_forks_pruned_block() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2309,6 +2322,7 @@ async fn follow_report_multiple_pruned_block() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2555,6 +2569,7 @@ async fn pin_block_references() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2690,6 +2705,7 @@ async fn follow_finalized_before_new_block() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2805,6 +2821,7 @@ async fn ensure_operation_limits_works() { subscription_max_ongoing_operations: 1, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -2910,6 +2927,7 @@ async fn storage_is_backpressured() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3047,6 +3065,7 @@ async fn stop_storage_operation() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3344,6 +3363,7 @@ async fn chain_head_stop_all_subscriptions() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: 5, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3557,6 +3577,7 @@ async fn chain_head_limit_reached() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: 1, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3597,6 +3618,7 @@ async fn follow_unique_pruned_blocks() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3766,6 +3788,7 @@ async fn follow_report_best_block_of_a_known_block() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_lagging_distance: MAX_LAGGING_DISTANCE, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -3984,6 +4007,7 @@ async fn follow_event_with_unknown_parent() { subscription_max_ongoing_operations: MAX_OPERATIONS, max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, max_lagging_distance: MAX_LAGGING_DISTANCE, + subscription_buffer_cap: MAX_PINNED_BLOCKS, }, ) .into_rpc(); @@ -4033,3 +4057,54 @@ async fn follow_event_with_unknown_parent() { // When importing the block 2, chainHead detects a gap in our blocks and stops. assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); } + +#[tokio::test] +async fn events_are_backpressured() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TokioTestExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + max_lagging_distance: MAX_LAGGING_DISTANCE, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + subscription_buffer_cap: 10, + }, + ) + .into_rpc(); + + let mut parent_hash = client.chain_info().genesis_hash; + let mut header = VecDeque::new(); + let mut sub = api.subscribe("chainHead_v1_follow", [false], 1).await.unwrap(); + + // insert more events than the user can consume + for i in 0..=5 { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(i) + .build() + .unwrap() + .build() + .unwrap() + .block; + header.push_front(block.header().clone()); + + parent_hash = block.hash(); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + } + + let mut events = Vec::new(); + + while let Some(event) = sub.next::>().await { + events.push(event); + } + + assert_eq!(events.len(), 2); + assert_matches!(events.pop().unwrap().map(|x| x.0), Ok(FollowEvent::Stop)); +} diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index e2ff04c0baf362516d8ef8da11b7ef53713e3fac..b94f062cddab6782bb5b03ed1a11006a37d8ef53 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -21,7 +21,7 @@ use crate::SubscriptionTaskExecutor; use futures::{ future::{self, Either, Fuse, FusedFuture}, - Future, FutureExt, Stream, StreamExt, + Future, FutureExt, Stream, StreamExt, TryStream, TryStreamExt, }; use jsonrpsee::{ types::SubscriptionId, DisconnectError, PendingSubscriptionSink, SubscriptionMessage, @@ -173,14 +173,27 @@ impl From for Subscription { impl Subscription { /// Feed items to the subscription from the underlying stream /// with specified buffer strategy. - pub async fn pipe_from_stream(self, mut stream: S, mut buf: B) + pub async fn pipe_from_stream(&self, stream: S, buf: B) where - S: Stream + Unpin + Send + 'static, - T: Serialize + Send + 'static, + S: Stream + Unpin, + T: Serialize + Send, + B: Buffer, + { + self.pipe_from_try_stream(stream.map(Ok::), buf) + .await + .expect("No Err will be ever encountered.qed"); + } + + /// Feed items to the subscription from the underlying stream + /// with specified buffer strategy. + pub async fn pipe_from_try_stream(&self, mut stream: S, mut buf: B) -> Result<(), E> + where + S: TryStream + Unpin, + T: Serialize + Send, B: Buffer, { let mut next_fut = Box::pin(Fuse::terminated()); - let mut next_item = stream.next(); + let mut next_item = stream.try_next(); let closed = self.0.closed(); futures::pin_mut!(closed); @@ -201,7 +214,7 @@ impl Subscription { next_fut = Box::pin(Fuse::terminated()); }, // New item from the stream - Either::Right((Either::Right((Some(v), n)), c)) => { + Either::Right((Either::Right((Ok(Some(v)), n)), c)) => { if buf.push(v).is_err() { log::debug!( target: "rpc", @@ -209,31 +222,35 @@ impl Subscription { self.0.method_name(), self.0.connection_id().0 ); - return + return Ok(()); } next_fut = n; closed = c; - next_item = stream.next(); + next_item = stream.try_next(); }, + // Error occured while processing the stream. + // + // terminate the stream. + Either::Right((Either::Right((Err(e), _)), _)) => return Err(e), // Stream "finished". // // Process remaining items and terminate. - Either::Right((Either::Right((None, pending_fut)), _)) => { + Either::Right((Either::Right((Ok(None), pending_fut)), _)) => { if !pending_fut.is_terminated() && pending_fut.await.is_err() { - return; + return Ok(()); } while let Some(v) = buf.pop() { if self.send(&v).await.is_err() { - return; + return Ok(()); } } - return; + return Ok(()); }, // Subscription was closed. - Either::Left(_) => return, + Either::Left(_) => return Ok(()), } } } diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 2db34bc3f32f48a741cd6bb9b7edbe23093e07ef..0d8c1cbba9b414f9222248d7873fe61fcc496acc 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -91,6 +91,15 @@ impl ChainApi for TestApi { }))) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> sc_transaction_pool_api::error::Result { + unimplemented!(); + } + fn block_id_to_number( &self, at: &BlockId, diff --git a/substrate/client/transaction-pool/src/common/api.rs b/substrate/client/transaction-pool/src/common/api.rs index a5185ba606ef4d6b603a7b5da0cd574bd2b29010..e16c0f2efa5113fbb1cb8244bb66d4ab2c329de7 100644 --- a/substrate/client/transaction-pool/src/common/api.rs +++ b/substrate/client/transaction-pool/src/common/api.rs @@ -162,6 +162,18 @@ where .boxed() } + /// Validates a transaction by calling into the runtime. + /// + /// Same as `validate_transaction` but blocks the current thread when performing validation. + fn validate_transaction_blocking( + &self, + at: Block::Hash, + source: TransactionSource, + uxt: graph::ExtrinsicFor, + ) -> error::Result { + validate_transaction_blocking(&*self.client, at, source, uxt) + } + fn block_id_to_number( &self, at: &BlockId, @@ -272,28 +284,3 @@ where result } - -impl FullChainApi -where - Block: BlockT, - Client: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + HeaderMetadata, - Client: Send + Sync + 'static, - Client::Api: TaggedTransactionQueue, -{ - /// Validates a transaction by calling into the runtime, same as - /// `validate_transaction` but blocks the current thread when performing - /// validation. Only implemented for `FullChainApi` since we can call into - /// the runtime locally. - pub fn validate_transaction_blocking( - &self, - at: Block::Hash, - source: TransactionSource, - uxt: graph::ExtrinsicFor, - ) -> error::Result { - validate_transaction_blocking(&*self.client, at, source, uxt) - } -} diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs index 1cbabf8b5fde52e92d01069db8c10302264435fd..b00cf5fbfede903b3adeeaf13bc8aa28281cd633 100644 --- a/substrate/client/transaction-pool/src/common/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -156,6 +156,15 @@ impl ChainApi for TestApi { futures::future::ready(Ok(res)) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> error::Result { + unimplemented!(); + } + /// Returns a block number given the block id. fn block_id_to_number( &self, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 404225167e576b9b8fd671f575270417c38f1249..7e72b44adf38b7ebb245874a50ea6c4067a9178c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -53,7 +53,7 @@ use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Extrinsic, NumberFor}, + traits::{Block as BlockT, NumberFor}, }; use std::{ collections::{HashMap, HashSet}, @@ -599,7 +599,7 @@ where log::debug!(target: LOG_TARGET, "fatp::submit_at count:{} views:{}", xts.len(), self.active_views_count()); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "[{:?}] fatp::submit_at"); let xts = xts.into_iter().map(Arc::from).collect::>(); - let mempool_result = self.mempool.extend_unwatched(source, xts.clone()); + let mempool_result = self.mempool.extend_unwatched(source, &xts); if view_store.is_empty() { return future::ready(Ok(mempool_result)).boxed() @@ -838,16 +838,16 @@ where fn submit_local( &self, _at: Block::Hash, - _xt: sc_transaction_pool_api::LocalTransactionFor, + xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { - //todo [#5493] - //looks like view_store / view needs non async submit_local method ?. - let e = Err(sc_transaction_pool_api::error::Error::Unactionable.into()); - log::warn!( - target: LOG_TARGET, - "LocalTransactionPool::submit_local is not implemented for ForkAwareTxPool, returning error: {e:?}", - ); - e + log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); + let xt = Arc::from(xt); + let result = self + .mempool + .extend_unwatched(TransactionSource::Local, &[xt.clone()]) + .remove(0)?; + + self.view_store.submit_local(xt).or_else(|_| Ok(result)) } } @@ -1194,8 +1194,7 @@ where None }) .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); + .into_iter(); let mut resubmitted_to_report = 0; diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 86ea27dcf4517bf4f0f6af55f1316d15ad0216e6..989c7e8ef356e74af9c565f54ae4513034ea649c 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -237,11 +237,11 @@ where pub(super) fn extend_unwatched( &self, source: TransactionSource, - xts: Vec>, + xts: &[ExtrinsicFor], ) -> Vec, ChainApi::Error>> { let mut transactions = self.transactions.write(); let result = xts - .into_iter() + .iter() .map(|xt| { let hash = self.api.hash_and_length(&xt).0; self.try_insert( @@ -437,7 +437,7 @@ mod tx_mem_pool_tests { let xts = (0..max + 1).map(|x| Arc::from(uxt(x as _))).collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts); assert!(results.iter().take(max).all(Result::is_ok)); assert!(matches!( results.into_iter().last().unwrap().unwrap_err(), @@ -455,7 +455,7 @@ mod tx_mem_pool_tests { let mut xts = (0..max - 1).map(|x| Arc::from(uxt(x as _))).collect::>(); xts.push(xts.iter().last().unwrap().clone()); - let results = mempool.extend_unwatched(TransactionSource::External, xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts); assert!(results.iter().take(max - 1).all(Result::is_ok)); assert!(matches!( results.into_iter().last().unwrap().unwrap_err(), @@ -471,7 +471,7 @@ mod tx_mem_pool_tests { let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts); assert!(results.iter().all(Result::is_ok)); let xt = Arc::from(uxt(98)); @@ -481,7 +481,7 @@ mod tx_mem_pool_tests { sc_transaction_pool_api::error::Error::ImmediatelyDropped )); let xt = Arc::from(uxt(99)); - let mut result = mempool.extend_unwatched(TransactionSource::External, vec![xt]); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); assert!(matches!( result.pop().unwrap().unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped @@ -498,7 +498,7 @@ mod tx_mem_pool_tests { let xt0 = xts.iter().last().unwrap().clone(); let xt1 = xts.iter().next().unwrap().clone(); - let results = mempool.extend_unwatched(TransactionSource::External, xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts); assert!(results.iter().all(Result::is_ok)); let result = mempool.push_watched(TransactionSource::External, xt0); @@ -506,7 +506,7 @@ mod tx_mem_pool_tests { result.unwrap_err(), sc_transaction_pool_api::error::Error::AlreadyImported(_) )); - let mut result = mempool.extend_unwatched(TransactionSource::External, vec![xt1]); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt1]); assert!(matches!( result.pop().unwrap().unwrap_err(), sc_transaction_pool_api::error::Error::AlreadyImported(_) @@ -521,7 +521,7 @@ mod tx_mem_pool_tests { let xts0 = (0..10).map(|x| Arc::from(uxt(x as _))).collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, xts0); + let results = mempool.extend_unwatched(TransactionSource::External, &xts0); assert!(results.iter().all(Result::is_ok)); let xts1 = (0..5).map(|x| Arc::from(uxt(2 * x))).collect::>(); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index fd5bfa8312c074764162dd780546927a18daa75b..99095d88cb0accaa0bdec86f3b594b20a25eb4ba 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -33,10 +33,11 @@ use crate::{ LOG_TARGET, }; use parking_lot::Mutex; -use sc_transaction_pool_api::{PoolStatus, TransactionSource}; +use sc_transaction_pool_api::{error::Error as TxPoolError, PoolStatus, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ - traits::Block as BlockT, transaction_validity::TransactionValidityError, SaturatedConversion, + generic::BlockId, traits::Block as BlockT, transaction_validity::TransactionValidityError, + SaturatedConversion, }; use std::{collections::HashMap, sync::Arc, time::Instant}; @@ -178,6 +179,50 @@ where self.pool.submit_and_watch(&self.at, source, xt).await } + /// Synchronously imports single unvalidated extrinsics into the view. + pub(super) fn submit_local( + &self, + xt: ExtrinsicFor, + ) -> Result, ChainApi::Error> { + let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); + log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); + + let validity = self + .pool + .validated_pool() + .api() + .validate_transaction_blocking( + self.at.hash, + TransactionSource::Local, + Arc::from(xt.clone()), + )? + .map_err(|e| { + match e { + TransactionValidityError::Invalid(i) => TxPoolError::InvalidTransaction(i), + TransactionValidityError::Unknown(u) => TxPoolError::UnknownTransaction(u), + } + .into() + })?; + + let block_number = self + .pool + .validated_pool() + .api() + .block_id_to_number(&BlockId::hash(self.at.hash))? + .ok_or_else(|| TxPoolError::InvalidBlockId(format!("{:?}", self.at.hash)))?; + + let validated = ValidatedTransaction::valid_at( + block_number.saturated_into::(), + hash, + TransactionSource::Local, + Arc::from(xt), + length, + validity, + ); + + self.pool.validated_pool().submit(vec![validated]).remove(0) + } + /// Status of the pool associated with the view. pub(super) fn status(&self) -> PoolStatus { self.pool.validated_pool().status() @@ -243,9 +288,7 @@ where let validation_result = (api.validate_transaction(self.at.hash, tx.source, tx.data.clone()).await, tx.hash, tx); validation_results.push(validation_result); } else { - { - self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); - } + self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); should_break = true; } } => {} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 953d6d860338688cd94627ee2a2c3a8090bcde35..413fca223242a46fde881b848bb39164250a641b 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -29,6 +29,7 @@ use crate::{ ReadyIteratorFor, LOG_TARGET, }; use futures::prelude::*; +use itertools::Itertools; use parking_lot::RwLock; use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus, TransactionSource}; use sp_blockchain::TreeRoute; @@ -110,6 +111,37 @@ where HashMap::<_, _>::from_iter(results.into_iter()) } + /// Synchronously imports single unverified extrinsics into every active view. + pub(super) fn submit_local( + &self, + xt: ExtrinsicFor, + ) -> Result, ChainApi::Error> { + let active_views = self + .active_views + .read() + .iter() + .map(|(_, view)| view.clone()) + .collect::>(); + + let tx_hash = self.api.hash_and_length(&xt).0; + + let result = active_views + .iter() + .map(|view| { + self.dropped_stream_controller + .add_initial_views(std::iter::once(tx_hash), view.at.hash); + view.submit_local(xt.clone()) + }) + .find_or_first(Result::is_ok); + + if let Some(Err(err)) = result { + log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); + return Err(err) + }; + + Ok(tx_hash) + } + /// Import a single extrinsic and starts to watch its progress in the pool. /// /// The extrinsic is imported to every view, and the individual streams providing the progress @@ -155,12 +187,8 @@ where let maybe_error = futures::future::join_all(submit_and_watch_futures) .await .into_iter() - .reduce(|mut r, v| { - if r.is_err() && v.is_ok() { - r = v; - } - r - }); + .find_or_first(Result::is_ok); + if let Some(Err(err)) = maybe_error { log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); return Err((err, Some(external_watcher))); diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 6d08a0f0b93c0ed57b9e825e146cf1d0622d99be..2dd8de352c6bf3e640ccc2e229d7a008cc931331 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -73,7 +73,7 @@ pub trait ChainApi: Send + Sync { + Send + 'static; - /// Verify extrinsic at given block. + /// Asynchronously verify extrinsic at given block. fn validate_transaction( &self, at: ::Hash, @@ -81,6 +81,17 @@ pub trait ChainApi: Send + Sync { uxt: ExtrinsicFor, ) -> Self::ValidationFuture; + /// Synchronously verify given extrinsic at given block. + /// + /// Validates a transaction by calling into the runtime. Same as `validate_transaction` but + /// blocks the current thread when performing validation. + fn validate_transaction_blocking( + &self, + at: ::Hash, + source: TransactionSource, + uxt: ExtrinsicFor, + ) -> Result; + /// Returns a block number given the block id. fn block_id_to_number( &self, diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index 6b4feca44bf8b8a94b6715ada848ab07ff637d4f..0826b95cf07063247c015dd1434c98823a8fcc95 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -46,7 +46,7 @@ use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{AtLeast32Bit, Block as BlockT, Extrinsic, Header as HeaderT, NumberFor, Zero}, + traits::{AtLeast32Bit, Block as BlockT, Header as HeaderT, NumberFor, Zero}, }; use std::{ collections::{HashMap, HashSet}, @@ -675,8 +675,7 @@ where None }) .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); + .into_iter(); let mut resubmitted_to_report = 0; diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 41ece6c9a27fef834eac8468731b90b6f8ed5b9d..2d0daf82997d90e7d21ff7f603cce328800f393a 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -44,8 +44,10 @@ sp-offchain = { optional = true, workspace = true } sp-session = { optional = true, workspace = true } sp-consensus-aura = { optional = true, workspace = true } sp-consensus-grandpa = { optional = true, workspace = true } +sp-genesis-builder = { optional = true, workspace = true } sp-inherents = { optional = true, workspace = true } sp-storage = { optional = true, workspace = true } +sp-keyring = { optional = true, workspace = true } frame-executive = { optional = true, workspace = true } frame-system-rpc-runtime-api = { optional = true, workspace = true } @@ -67,19 +69,20 @@ pallet-examples = { workspace = true } default = ["runtime", "std"] experimental = ["frame-support/experimental"] runtime = [ + "frame-executive", + "frame-system-rpc-runtime-api", "sp-api", "sp-block-builder", "sp-consensus-aura", "sp-consensus-grandpa", + "sp-genesis-builder", "sp-inherents", + "sp-keyring", "sp-offchain", "sp-session", "sp-storage", "sp-transaction-pool", "sp-version", - - "frame-executive", - "frame-system-rpc-runtime-api", ] std = [ "codec/std", @@ -98,8 +101,10 @@ std = [ "sp-consensus-aura?/std", "sp-consensus-grandpa?/std", "sp-core/std", + "sp-genesis-builder?/std", "sp-inherents?/std", "sp-io/std", + "sp-keyring?/std", "sp-offchain?/std", "sp-runtime/std", "sp-session?/std", diff --git a/substrate/frame/alliance/src/tests.rs b/substrate/frame/alliance/src/tests.rs index ec31ebf6a47ae8eaf93025079069efe054dd02e8..2397ebfe7db41a98fa7856991f557f8761bb790f 100644 --- a/substrate/frame/alliance/src/tests.rs +++ b/substrate/frame/alliance/src/tests.rs @@ -244,7 +244,7 @@ fn vote_works() { fn close_works() { new_test_ext().execute_with(|| { let (proposal, proposal_len, hash) = make_remark_proposal(42); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; assert_ok!(Alliance::propose( RuntimeOrigin::signed(1), 3, @@ -645,8 +645,8 @@ fn remove_unscrupulous_items_works() { #[test] fn weights_sane() { let info = crate::Call::::join_alliance {}.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::join_alliance(), info.weight); + assert_eq!(<() as crate::WeightInfo>::join_alliance(), info.call_weight); let info = crate::Call::::nominate_ally { who: 10 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::nominate_ally(), info.weight); + assert_eq!(<() as crate::WeightInfo>::nominate_ally(), info.call_weight); } diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index 9aea19dbf57c6adf2936d4141987d198cd944fb7..f6e025520d71bfe029bd8f9f838c5ef38fefdad3 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -37,6 +37,7 @@ // --chain=dev // --header=./substrate/HEADER-APACHE2 // --output=./substrate/frame/asset-conversion/src/weights.rs +// --header=./substrate/HEADER-APACHE2 // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index af605c5a3c6403a60c5a768b754c1e900a875083..75a6139702c65e3f0dace0b94ecdaa8b0b2bcafe 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -1785,10 +1785,10 @@ fn multiple_transfer_alls_work_ok() { #[test] fn weights_sane() { let info = crate::Call::::create { id: 10, admin: 4, min_balance: 3 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::create(), info.weight); + assert_eq!(<() as crate::WeightInfo>::create(), info.call_weight); let info = crate::Call::::finish_destroy { id: 10 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.weight); + assert_eq!(<() as crate::WeightInfo>::finish_destroy(), info.call_weight); } #[test] diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index f829578fb285132812f6ce341cf4f5efcb950101..c74e864ea0d998f6f3c2f3bb889df7aab93cd97b 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -400,7 +400,9 @@ impl OnTimestampSet for Pallet { assert_eq!( CurrentSlot::::get(), timestamp_slot, - "Timestamp slot must match `CurrentSlot`" + "Timestamp slot must match `CurrentSlot`. This likely means that the configured block \ + time in the node and/or rest of the runtime is not compatible with Aura's \ + `SlotDuration`", ); } } diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index 4be07bdae1f0154c2ca1331bbdd62b6540dae665..524ad23e58ee119aca8122aae79c770e48698a35 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -100,7 +100,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -110,7 +110,7 @@ impl OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -132,7 +132,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index d416e31b25f71ae759cdfef987100ac994085ef0..c2e24c73a7bda46271c5d90dfda2776b3b1c7ceb 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -68,14 +68,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + impl_opaque_keys! { pub struct MockSessionKeys { pub babe_authority: super::Pallet, diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index b9a214ca105c846fae4be018149c7aba55c521e8..eca958160239d801480e57c1a79ca3c4922779b9 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -906,7 +906,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // it should have non-zero weight and the fee has to be paid. // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.call_weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 44899e5b7d8da87bc46e4dbde24840c61546a254..f0117555c37ea5c4ec3d3fee6741be421e58914a 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -52,6 +52,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index a4984b34f6dbf96671cef77e63c74e2e69caefa2..7fcc49d50aa54a3aa74a236828e4041e3318cc22 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -30,6 +30,7 @@ use frame_support::{ StorageNoopGuard, }; use frame_system::Event as SysEvent; +use sp_runtime::traits::DispatchTransaction; const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; @@ -258,17 +259,17 @@ fn lock_should_work_reserve() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, @@ -289,17 +290,17 @@ fn lock_should_work_tx_fee() { TokenError::Frozen ); assert_noop!(Balances::reserve(&1, 1), Error::::LiquidityRestrictions,); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(1), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, ) .is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert!(ChargeTransactionPayment::::validate_and_prepare( ChargeTransactionPayment::from(0), - &1, + Some(1).into(), CALL, &info_from_weight(Weight::from_parts(1, 0)), 1, diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index ba0cdabdabbbdc8459fcb551ae3a1b704d479262..bf49ad9f0a1f08f12f16236d36b237c345c70842 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -37,7 +37,7 @@ use scale_info::TypeInfo; use sp_core::hexdisplay::HexDisplay; use sp_io; use sp_runtime::{ - traits::{BadOrigin, SignedExtension, Zero}, + traits::{BadOrigin, Zero}, ArithmeticError, BuildStorage, DispatchError, DispatchResult, FixedPointNumber, RuntimeDebug, TokenError, }; @@ -104,7 +104,6 @@ impl pallet_transaction_payment::Config for Test { type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); } parameter_types! { @@ -275,7 +274,7 @@ pub fn events() -> Vec { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } /// Check that the total-issuance matches the sum of all accounts' total balances. @@ -298,10 +297,10 @@ pub fn ensure_ti_valid() { #[test] fn weights_sane() { let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.weight); + assert_eq!(<() as crate::WeightInfo>::transfer_allow_death(), info.call_weight); let info = crate::Call::::force_unreserve { who: 10, amount: 4 }.get_dispatch_info(); - assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.weight); + assert_eq!(<() as crate::WeightInfo>::force_unreserve(), info.call_weight); } #[test] diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index 15345e6ae1997ccd0e768edd56afcecb11e21f35..3a49b9e169ce904d0185408fa8095d933ac730ad 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -118,7 +118,7 @@ where /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the authorship pallet. @@ -262,7 +262,7 @@ impl EquivocationEvidenceFor { impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -278,7 +278,8 @@ where use frame_system::offchain::SubmitTransaction; let call: Call = evidence.into(); - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 5c79d8f7d7d7fb8933909452cc7ca8a1aefdc576..2b75c4107414e908fa3e92d5cf614234bc54cd62 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -75,14 +75,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct MockAncestryProofContext { pub is_valid: bool, diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index 625da2a24bd0ad1d9375bdffeaeca87b0beb9671..6e21356e9d47afc1bbec2f1785bd4fac38b62969 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -311,6 +311,83 @@ pub use v1::*; /// } /// } /// ``` +/// +/// ## Migrate from v1 to v2 +/// +/// To migrate your code from benchmarking v1 to benchmarking v2, you may follow these +/// steps: +/// 1. Change the import from `frame_benchmarking::v1::` to `frame_benchmarking::v2::*`, or +/// `frame::benchmarking::prelude::*` under the umbrella crate; +/// 2. Move the code inside the v1 `benchmarks! { ... }` block to the v2 benchmarks module `mod +/// benchmarks { ... }` under the benchmarks macro (`#[benchmarks]` for a regular module, or +/// `#[instance_benchmarks]` to set up the module in instance benchmarking mode); +/// 3. Turn each v1 benchmark into a function inside the v2 benchmarks module with the same name, +/// having either a blank return type or a return type compatible with `Result<(), +/// BenchmarkError>`. For instance, `foo { ... }` can become `fn foo() -> Result<(), +/// BenchmarkError>`. More in detail: +/// 1. Move all the v1 complexity parameters as [ParamRange](`v2::ParamRange`) arguments to the +/// v2 function, and their setup code to the body of the function. For instance, `let y in 0 +/// .. 10 => setup(y)?;` from v1 will give a `y: Linear<0, 10>` argument to the corresponding +/// function in v2, while `setup(y)?;` will be moved to the body of the function; +/// 2. Move all the v1 setup code to the body of the v2 function; +/// 3. Move the benchmarked code to the body of the v2 function under the appropriate macro +/// attribute: `#[extrinsic_call]` for extrinsic pallet calls and `#[block]` for blocks of +/// code; +/// 4. Move the v1 verify code block to the body of the v2 function, after the +/// `#[extrinsic_call]` or `#[block]` attribute. +/// 5. If the function returns a `Result<(), BenchmarkError>`, end with `Ok(())`. +/// +/// As for tests, the code is the same as v1 (see [Benchmark Tests](#benchmark-tests)). +/// +/// As an example migration, the following v1 code +/// +/// ```ignore +/// #![cfg(feature = "runtime-benchmarks")] +/// +/// use frame_benchmarking::v1::*; +/// +/// benchmarks! { +/// +/// // first dispatchable: this is a user dispatchable and operates on a `u8` vector of +/// // size `l` +/// foo { +/// let caller = funded_account::(b"caller", 0); +/// let l in 1 .. 10_000 => initialize_l(l); +/// }: { +/// _(RuntimeOrigin::Signed(caller), vec![0u8; l]) +/// } verify { +/// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); +/// } +/// } +/// ``` +/// +/// would become the following v2 code: +/// +/// ```ignore +/// #![cfg(feature = "runtime-benchmarks")] +/// +/// use frame_benchmarking::v2::*; +/// +/// #[benchmarks] +/// mod benchmarks { +/// use super::*; +/// +/// // first dispatchable: foo; this is a user dispatchable and operates on a `u8` vector of +/// // size `l` +/// #[benchmark] +/// fn foo(l: Linear<1 .. 10_000>) -> Result<(), BenchmarkError> { +/// let caller = funded_account::(b"caller", 0); +/// initialize_l(l); +/// +/// #[extrinsic_call] +/// _(RuntimeOrigin::Signed(caller), vec![0u8; l]); +/// +/// // Everything onwards will be treated as test. +/// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); +/// Ok(()) +/// } +/// } +/// ``` pub mod v2 { pub use super::*; pub use frame_support_procedural::{ diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index 79428689caaf7d2999754fe3786464fb97940fb9..8e533a7b290432b17eed5f142e9f234f3d1d9d5c 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -629,7 +629,7 @@ pub mod pallet { T::WeightInfo::execute( *length_bound, // B T::MaxMembers::get(), // M - ).saturating_add(proposal.get_dispatch_info().weight), // P + ).saturating_add(proposal.get_dispatch_info().call_weight), // P DispatchClass::Operational ))] pub fn execute( @@ -681,7 +681,7 @@ pub mod pallet { T::WeightInfo::propose_execute( *length_bound, // B T::MaxMembers::get(), // M - ).saturating_add(proposal.get_dispatch_info().weight) // P1 + ).saturating_add(proposal.get_dispatch_info().call_weight) // P1 } else { T::WeightInfo::propose_proposed( *length_bound, // B @@ -915,7 +915,7 @@ impl, I: 'static> Pallet { ) -> Result<(u32, DispatchResultWithPostInfo), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!( proposal_weight.all_lte(T::MaxProposalWeight::get()), Error::::WrongProposalWeight @@ -942,7 +942,7 @@ impl, I: 'static> Pallet { ) -> Result<(u32, u32), DispatchError> { let proposal_len = proposal.encoded_size(); ensure!(proposal_len <= length_bound as usize, Error::::WrongProposalLength); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!( proposal_weight.all_lte(T::MaxProposalWeight::get()), Error::::WrongProposalWeight @@ -1130,7 +1130,7 @@ impl, I: 'static> Pallet { storage::read(&key, &mut [0; 0], 0).ok_or(Error::::ProposalMissing)?; ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; ensure!(proposal_weight.all_lte(weight_bound), Error::::WrongProposalWeight); Ok((proposal, proposal_len as usize)) } @@ -1157,7 +1157,7 @@ impl, I: 'static> Pallet { ) -> (Weight, u32) { Self::deposit_event(Event::Approved { proposal_hash }); - let dispatch_weight = proposal.get_dispatch_info().weight; + let dispatch_weight = proposal.get_dispatch_info().call_weight; let origin = RawOrigin::Members(yes_votes, seats).into(); let result = proposal.dispatch(origin); Self::deposit_event(Event::Executed { diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 70ce221f10d0e5ab5bbe63b73835d2542209251a..c4ed17821ae89e2b66246b6937e5f169c644bd3b 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -36,7 +36,7 @@ use sp_runtime::{ }; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test @@ -316,7 +316,7 @@ fn close_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( @@ -388,7 +388,7 @@ fn proposal_weight_limit_works_on_approve() { old_count: MaxMembers::get(), }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); // Set 1 as prime voter Prime::::set(Some(1)); @@ -430,7 +430,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { old_count: MaxMembers::get(), }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( @@ -456,7 +456,7 @@ fn close_with_prime_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( RuntimeOrigin::root(), @@ -524,7 +524,7 @@ fn close_with_voting_prime_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( RuntimeOrigin::root(), @@ -594,7 +594,7 @@ fn close_with_no_prime_but_majority_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(CollectiveMajority::set_members( RuntimeOrigin::root(), @@ -874,7 +874,7 @@ fn correct_validate_and_get_proposal() { )); let hash = BlakeTwo256::hash_of(&proposal); - let weight = proposal.get_dispatch_info().weight; + let weight = proposal.get_dispatch_info().call_weight; assert_noop!( Collective::validate_and_get_proposal( &BlakeTwo256::hash_of(&vec![3; 4]), @@ -1073,7 +1073,7 @@ fn motions_all_first_vote_free_works() { // Test close() Extrinsics | Check DispatchResultWithPostInfo with Pay Info - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let close_rval: DispatchResultWithPostInfo = Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap().pays_fee, Pays::No); @@ -1091,7 +1091,7 @@ fn motions_reproposing_disapproved_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1123,7 +1123,7 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { ExtBuilder::default().build_and_execute(|| { let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); // The voting threshold is 2, but the required votes for `ExternalMajorityOrigin` is 3. // The proposal will be executed regardless of the voting threshold @@ -1253,7 +1253,7 @@ fn motions_disapproval_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1312,7 +1312,7 @@ fn motions_approval_works() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), @@ -1373,7 +1373,7 @@ fn motion_with_no_votes_closes_with_disapproval() { ExtBuilder::default().build_and_execute(|| { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( RuntimeOrigin::signed(1), diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 84ea7de00a2f1d3a3e64404dede2cde1556d039a..4aba1d24dbd5889e925fb9bbab53cf600642ab77 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -522,7 +522,7 @@ fn expand_docs(def: &EnvDef) -> TokenStream2 { /// `expand_impls()`). fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { let impls = expand_impls(def); - let docs = docs.then_some(expand_docs(def)).unwrap_or(TokenStream2::new()); + let docs = docs.then(|| expand_docs(def)).unwrap_or(TokenStream2::new()); let stable_api_count = def.host_funcs.iter().filter(|f| f.is_stable).count(); quote! { diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 984e5712ae061e4d7dc1becf6d458c042a20b177..39f846ac43197b6122b3f19032e05913c28a866b 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -522,7 +522,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { run: impl FnOnce(&mut Self) -> DispatchResultWithPostInfo, ) -> Result { use frame_support::dispatch::extract_actual_weight; - let charged = self.charge_gas(runtime_cost(dispatch_info.weight))?; + let charged = self.charge_gas(runtime_cost(dispatch_info.call_weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); self.adjust_gas(charged, runtime_cost(actual_weight)); @@ -2347,7 +2347,7 @@ pub mod env { let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { weight, ..Default::default() }; + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; ctx.call_dispatchable::( dispatch_info, diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 072cfe176b61991081d64e3f83fc1f353d4ae0c6..06cb2963d762d4613790cdf97464e9c969af791c 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -245,7 +245,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; -use frame_system::{ensure_none, offchain::SendTransactionTypes, pallet_prelude::BlockNumberFor}; +use frame_system::{ensure_none, offchain::CreateInherent, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_arithmetic::{ traits::{CheckedAdd, Zero}, @@ -576,7 +576,7 @@ pub mod pallet { use sp_runtime::traits::Convert; #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { type RuntimeEvent: From> + IsType<::RuntimeEvent> + TryInto>; diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 32a099e1a26f43057dc1898e582d4ac9004b677d..2e5ac2527203fc9373883947a906b6f1aad966dc 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -421,14 +421,23 @@ impl Convert> for Runtime { } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub type Extrinsic = sp_runtime::testing::TestXt; parameter_types! { diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 4c56f02db526b9f984a787e5fac6846a265a72f5..191131ed3acc3ba11ba74490e49276e880d38858 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -31,7 +31,10 @@ use frame_support::{ traits::{DefensiveResult, Get}, BoundedVec, }; -use frame_system::{offchain::SubmitTransaction, pallet_prelude::BlockNumberFor}; +use frame_system::{ + offchain::{CreateInherent, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; use scale_info::TypeInfo; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, ElectionResult, @@ -179,7 +182,7 @@ fn ocw_solution_exists() -> bool { matches!(StorageValueRef::persistent(OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) } -impl Pallet { +impl>> Pallet { /// Mine a new npos solution. /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the @@ -277,7 +280,8 @@ impl Pallet { fn submit_call(call: Call) -> Result<(), MinerError> { log!(debug, "miner submitting a solution as an unsigned transaction"); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| MinerError::PoolSubmissionFailed) } @@ -1818,7 +1822,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); - let call = extrinsic.call; + let call = extrinsic.function; assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1835,7 +1839,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); - let call = match extrinsic.call { + let call = match extrinsic.function { RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index f20e3983b09d1b12770de142694ddcfabc07f9c3..360f14913fcc6745297081688a45f3136036b12b 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -61,7 +61,7 @@ pub const INIT_TIMESTAMP: BlockNumber = 30_000; pub const BLOCK_TIME: BlockNumber = 1000; type Block = frame_system::mocking::MockBlockU32; -type Extrinsic = testing::TestXt; +type Extrinsic = sp_runtime::testing::TestXt; frame_support::construct_runtime!( pub enum Runtime { @@ -308,14 +308,23 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub struct OnChainSeqPhragmen; parameter_types! { @@ -687,7 +696,7 @@ pub fn roll_to_with_ocw(n: BlockNumber, pool: Arc>, delay_solu for encoded in &pool.read().transactions { let extrinsic = Extrinsic::decode(&mut &encoded[..]).unwrap(); - let _ = match extrinsic.call { + let _ = match extrinsic.function { RuntimeCall::ElectionProviderMultiPhase( call @ Call::submit_unsigned { .. }, ) => { diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a1c5f69e1b65abcd2b77e78fd5fe73a9231de7c9..effbb6e786c0d0b948c843656e72978bbc8eb3ec 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -1408,7 +1408,7 @@ mod tests { pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index ee0f8df29cf581db99a4935e2b2ccd6de122df51..0c6ad5ef0978eb05b2466bf063a219db274dc4c9 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -25,12 +25,14 @@ pallet-example-offchain-worker = { workspace = true } pallet-example-split = { workspace = true } pallet-example-single-block-migrations = { workspace = true } pallet-example-tasks = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true } [features] default = ["std"] std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", + "pallet-example-authorization-tx-extension/std", "pallet-example-basic/std", "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", @@ -42,6 +44,7 @@ std = [ try-runtime = [ "pallet-default-config-example/try-runtime", "pallet-dev-mode/try-runtime", + "pallet-example-authorization-tx-extension/try-runtime", "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..9b51fc6c1e636b1b00eef479ba7a4b2c6bb132da --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "pallet-example-authorization-tx-extension" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage.workspace = true +repository.workspace = true +description = "FRAME example authorization transaction extension pallet" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } + +sp-io = { workspace = true } +sp-runtime = { workspace = true } + +[dev-dependencies] +pallet-verify-signature = { workspace = true } +sp-core = { workspace = true } +sp-keyring = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-verify-signature/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-verify-signature/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-verify-signature/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/authorization-tx-extension/src/extensions.rs b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..d1e56916d3a205af6b6c0fabfb34f2b8a1223957 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::{fmt, marker::PhantomData}; + +use codec::{Decode, Encode}; +use frame_support::{traits::OriginTrait, Parameter}; +use scale_info::TypeInfo; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + DispatchInfoOf, DispatchOriginOf, IdentifyAccount, TransactionExtension, ValidateResult, + Verify, + }, + transaction_validity::{InvalidTransaction, ValidTransaction}, +}; + +use crate::pallet_coownership::{Config, Origin}; + +/// Helper struct to organize the data needed for signature verification of both parties involved. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct AuthCredentials { + first: (Signer, Signature), + second: (Signer, Signature), +} + +/// Extension that, if activated by providing a pair of signers and signatures, will authorize a +/// coowner origin of the two signers. Both signers have to construct their signatures on all of the +/// data that follows this extension in the `TransactionExtension` pipeline, their implications and +/// the call. Essentially re-sign the transaction from this point onwards in the pipeline by using +/// the `inherited_implication`, as shown below. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct AuthorizeCoownership { + inner: Option>, + _phantom: PhantomData, +} + +impl Default for AuthorizeCoownership { + fn default() -> Self { + Self { inner: None, _phantom: Default::default() } + } +} + +impl AuthorizeCoownership { + /// Creates an active extension that will try to authorize the coownership origin. + pub fn new(first: (Signer, Signature), second: (Signer, Signature)) -> Self { + Self { inner: Some(AuthCredentials { first, second }), _phantom: Default::default() } + } +} + +impl fmt::Debug for AuthorizeCoownership { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AuthorizeCoownership") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } +} + +impl TransactionExtension + for AuthorizeCoownership +where + Signer: IdentifyAccount + Parameter + Send + Sync + 'static, + Signature: Verify + Parameter + Send + Sync + 'static, +{ + const IDENTIFIER: &'static str = "AuthorizeCoownership"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn validate( + &self, + mut origin: DispatchOriginOf, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + inherited_implication: &impl codec::Encode, + ) -> ValidateResult { + // If the extension is inactive, just move on in the pipeline. + let Some(auth) = &self.inner else { + return Ok((ValidTransaction::default(), (), origin)); + }; + let first_account = auth.first.0.clone().into_account(); + let second_account = auth.second.0.clone().into_account(); + + // Construct the payload to sign using the `inherited_implication`. + let msg = inherited_implication.using_encoded(sp_io::hashing::blake2_256); + + // Both parties' signatures must be correct for the origin to be authorized. + // In a prod environment, we're just return a `InvalidTransaction::BadProof` if the + // signature isn't valid, but we return these custom errors to be able to assert them in + // tests. + if !auth.first.1.verify(&msg[..], &first_account) { + Err(InvalidTransaction::Custom(100))? + } + if !auth.second.1.verify(&msg[..], &second_account) { + Err(InvalidTransaction::Custom(200))? + } + // Construct a `pallet_coownership::Origin`. + let local_origin = Origin::Coowners(first_account, second_account); + // Turn it into a local `PalletsOrigin`. + let local_origin = ::PalletsOrigin::from(local_origin); + // Then finally into a pallet `RuntimeOrigin`. + let local_origin = ::RuntimeOrigin::from(local_origin); + // Which the `set_caller_from` function will convert into the overarching `RuntimeOrigin` + // created by `construct_runtime!`. + origin.set_caller_from(local_origin); + // Make sure to return the new origin. + Ok((ValidTransaction::default(), (), origin)) + } + // We're not doing any special logic in `TransactionExtension::prepare`, so just impl a default. + impl_tx_ext_default!(T::RuntimeCall; weight prepare); +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/lib.rs b/substrate/frame/examples/authorization-tx-extension/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..9105155a94d0d39ef7d2af234c153ef615419801 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/lib.rs @@ -0,0 +1,158 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Authorization Transaction Extension Example Pallet +//! +//! **This pallet serves as an example and is not meant to be used in production.** +//! +//! FRAME Transaction Extension reference implementation, origin mutation, origin authorization and +//! integration in a `TransactionExtension` pipeline. +//! +//! The [TransactionExtension](sp_runtime::traits::TransactionExtension) used in this example is +//! [AuthorizeCoownership](extensions::AuthorizeCoownership). If activated, the extension will +//! authorize 2 signers as coowners, with a [coowner origin](pallet_coownership::Origin) specific to +//! the [coownership example pallet](pallet_coownership), by validating a signature of the rest of +//! the transaction from each party. This means any extensions after ours in the pipeline, their +//! implicits and the actual call. The extension pipeline used in our example checks the genesis +//! hash, transaction version and mortality of the transaction after the `AuthorizeCoownership` runs +//! as we want these transactions to run regardless of what origin passes through them and/or we +//! want their implicit data in any signature authorization happening earlier in the pipeline. +//! +//! In this example, aside from the [AuthorizeCoownership](extensions::AuthorizeCoownership) +//! extension, we use the following pallets: +//! - [pallet_coownership] - provides a coowner origin and the functionality to authorize it. +//! - [pallet_assets] - a dummy asset pallet that tracks assets, identified by an +//! [AssetId](pallet_assets::AssetId), and their respective owners, which can be either an +//! [account](pallet_assets::Owner::Single) or a [pair of owners](pallet_assets::Owner::Double). +//! +//! Assets are created in [pallet_assets] using the +//! [create_asset](pallet_assets::Call::create_asset) call, which accepts traditionally signed +//! origins (a single account) or coowner origins, authorized through the +//! [CoownerOrigin](pallet_assets::Config::CoownerOrigin) type. +//! +//! ### Example runtime setup +#![doc = docify::embed!("src/mock.rs", example_runtime)] +//! +//! ### Example usage +#![doc = docify::embed!("src/tests.rs", create_coowned_asset_works)] +//! +//! This example does not focus on any pallet logic or syntax, but rather on `TransactionExtension` +//! functionality. The pallets used are just skeletons to provide storage state and custom origin +//! choices and requirements, as shown in the examples. Any weight and/or +//! transaction fee is out of scope for this example. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod extensions; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +extern crate alloc; + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet_coownership { + use super::*; + use frame_support::traits::OriginTrait; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The aggregated origin which the dispatch will take. + type RuntimeOrigin: OriginTrait + + From + + IsType<::RuntimeOrigin>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: From> + TryInto, Error = Self::PalletsOrigin>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + /// Origin that this pallet can authorize. For the purposes of this example, it's just two + /// accounts that own something together. + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Origin { + Coowners(T::AccountId, T::AccountId), + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_assets { + use super::*; + + pub type AssetId = u32; + + /// Type that describes possible owners of a particular asset. + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Owner { + Single(AccountId), + Double(AccountId, AccountId), + } + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type that can authorize an account pair coowner origin. + type CoownerOrigin: EnsureOrigin< + Self::RuntimeOrigin, + Success = (Self::AccountId, Self::AccountId), + >; + } + + /// Map that holds the owner information for each asset it manages. + #[pallet::storage] + pub type AssetOwners = + StorageMap<_, Blake2_128Concat, AssetId, Owner<::AccountId>>; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::error] + pub enum Error { + /// Asset already exists. + AlreadyExists, + } + + #[pallet::call] + impl Pallet { + /// Simple call that just creates an asset with a specific `AssetId`. This call will fail if + /// there is already an asset with the same `AssetId`. + /// + /// The origin is either a single account (traditionally signed origin) or a coowner origin. + #[pallet::call_index(0)] + pub fn create_asset(origin: OriginFor, asset_id: AssetId) -> DispatchResult { + let owner: Owner = match T::CoownerOrigin::try_origin(origin) { + Ok((first, second)) => Owner::Double(first, second), + Err(origin) => ensure_signed(origin).map(|account| Owner::Single(account))?, + }; + AssetOwners::::try_mutate(asset_id, |maybe_owner| { + if maybe_owner.is_some() { + return Err(Error::::AlreadyExists); + } + *maybe_owner = Some(owner); + Ok(()) + })?; + Ok(()) + } + } +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/mock.rs b/substrate/frame/examples/authorization-tx-extension/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..aa70d12d7d84ca9a13742a316e21acc7e1498dd0 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/mock.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +pub(crate) use example_runtime::*; +use extensions::AuthorizeCoownership; +use frame_support::derive_impl; +use frame_system::{CheckEra, CheckGenesis, CheckNonce, CheckTxVersion}; +use pallet_verify_signature::VerifySignature; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + BuildStorage, MultiSignature, MultiSigner, +}; + +#[docify::export] +mod example_runtime { + use super::*; + + /// Our `TransactionExtension` fit for general transactions. + pub type TxExtension = ( + // Validate the signature of regular account transactions (substitutes the old signed + // transaction). + VerifySignature, + // Nonce check (and increment) for the caller. + CheckNonce, + // If activated, will mutate the origin to a `pallet_coownership` origin of 2 accounts that + // own something. + AuthorizeCoownership, + // Some other extensions that we want to run for every possible origin and we want captured + // in any and all signature and authorization schemes (such as the traditional account + // signature or the double signature in `pallet_coownership`). + CheckGenesis, + CheckTxVersion, + CheckEra, + ); + /// Convenience type to more easily construct the signature to be signed in case + /// `AuthorizeCoownership` is activated. + pub type InnerTxExtension = (CheckGenesis, CheckTxVersion, CheckEra); + pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + pub type Header = generic::Header; + pub type Block = generic::Block; + pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + pub type Signature = MultiSignature; + pub type BlockNumber = u32; + + // For testing the pallet, we construct a mock runtime. + frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + VerifySignaturePallet: pallet_verify_signature, + + Assets: pallet_assets, + Coownership: pallet_coownership, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] + impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Block = Block; + type Lookup = IdentityLookup; + } + + #[cfg(feature = "runtime-benchmarks")] + pub struct BenchmarkHelper; + #[cfg(feature = "runtime-benchmarks")] + impl pallet_verify_signature::BenchmarkHelper for BenchmarkHelper { + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (MultiSignature, AccountId) { + use sp_io::crypto::{sr25519_generate, sr25519_sign}; + use sp_runtime::traits::IdentifyAccount; + let public = sr25519_generate(0.into(), None); + let who_account: AccountId = MultiSigner::Sr25519(public).into_account().into(); + let signature = MultiSignature::Sr25519(sr25519_sign(0.into(), &public, msg).unwrap()); + (signature, who_account) + } + } + + impl pallet_verify_signature::Config for Runtime { + type Signature = MultiSignature; + type AccountIdentifier = MultiSigner; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = BenchmarkHelper; + } + + /// Type that enables any pallet to ask for a coowner origin. + pub struct EnsureCoowner; + impl EnsureOrigin for EnsureCoowner { + type Success = (AccountId, AccountId); + + fn try_origin(o: RuntimeOrigin) -> Result { + match o.clone().into() { + Ok(pallet_coownership::Origin::::Coowners(first, second)) => + Ok((first, second)), + _ => Err(o), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + unimplemented!() + } + } + + impl pallet_assets::Config for Runtime { + type CoownerOrigin = EnsureCoowner; + } + + impl pallet_coownership::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type PalletsOrigin = OriginCaller; + } +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/tests.rs b/substrate/frame/examples/authorization-tx-extension/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ede549a2f1388afdeb3ec067bd13c693c6aac81 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/tests.rs @@ -0,0 +1,269 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example-authorization-tx-extension. + +use codec::Encode; +use frame_support::{ + assert_noop, + dispatch::GetDispatchInfo, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, +}; +use pallet_verify_signature::VerifySignature; +use sp_keyring::AccountKeyring; +use sp_runtime::{ + traits::{Applyable, Checkable, IdentityLookup, TransactionExtension}, + MultiSignature, MultiSigner, +}; + +use crate::{extensions::AuthorizeCoownership, mock::*, pallet_assets}; + +#[test] +fn create_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let alice_account = AccountId::from(alice_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create extension that will be used for dispatch. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction signature, to be used in the top level `VerifyMultiSignature` + // extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, alice_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Alice's nonce. + frame_system::Account::::mutate(&alice_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert_eq!(frame_system::Account::::get(&alice_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Single(alice_account)) + ); + assert!(res.is_ok()); + }); +} + +#[docify::export] +#[test] +fn create_coowned_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; + let alice_account = AccountId::from(alice_keyring.public()); + let bob_account = AccountId::from(bob_keyring.public()); + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(&charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert!(res.is_ok()); + assert_eq!(frame_system::Account::::get(charlie_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Double(alice_account, bob_account)) + ); + }); +} + +#[test] +fn inner_authorization_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. They + // are going to sign this transaction as a mortal one. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Sign with mortal era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal(4, 0)), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension that Charlie signed. + let tx_ext = ( + VerifySignature::new_with_signature(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // The extrinsic should fail as the signature for the `AuthorizeCoownership` doesn't work + // for the provided payload with the changed transaction mortality. + assert_noop!( + xt.apply::(&uxt_info, uxt_len), + TransactionValidityError::Invalid(InvalidTransaction::Custom(100)) + ); + }); +} diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index fea04cb447a0790ef99768dd034b59af9cb73105..2f1b32d964e4da5869c2ff3513180bc62f2f5221 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -46,9 +46,10 @@ //! use the [`Config::WeightInfo`] trait to calculate call weights. This can also be overridden, //! as demonstrated by [`Call::set_dummy`]. //! - A private function that performs a storage update. -//! - A simple signed extension implementation (see: [`sp_runtime::traits::SignedExtension`]) which -//! increases the priority of the [`Call::set_dummy`] if it's present and drops any transaction -//! with an encoded length higher than 200 bytes. +//! - A simple transaction extension implementation (see: +//! [`sp_runtime::traits::TransactionExtension`]) which increases the priority of the +//! [`Call::set_dummy`] if it's present and drops any transaction with an encoded length higher +//! than 200 bytes. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -67,10 +68,12 @@ use frame_system::ensure_signed; use log::info; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + impl_tx_ext_default, + traits::{ + Bounded, DispatchInfoOf, DispatchOriginOf, SaturatedConversion, Saturating, + TransactionExtension, ValidateResult, }, + transaction_validity::{InvalidTransaction, ValidTransaction}, }; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -440,42 +443,43 @@ impl Pallet { } } -// Similar to other FRAME pallets, your pallet can also define a signed extension and perform some -// checks and [pre/post]processing [before/after] the transaction. A signed extension can be any -// decodable type that implements `SignedExtension`. See the trait definition for the full list of -// bounds. As a convention, you can follow this approach to create an extension for your pallet: +// Similar to other FRAME pallets, your pallet can also define a transaction extension and perform +// some checks and [pre/post]processing [before/after] the transaction. A transaction extension can +// be any decodable type that implements `TransactionExtension`. See the trait definition for the +// full list of bounds. As a convention, you can follow this approach to create an extension for +// your pallet: // - If the extension does not carry any data, then use a tuple struct with just a `marker` // (needed for the compiler to accept `T: Config`) will suffice. // - Otherwise, create a tuple struct which contains the external data. Of course, for the entire // struct to be decodable, each individual item also needs to be decodable. // -// Note that a signed extension can also indicate that a particular data must be present in the -// _signing payload_ of a transaction by providing an implementation for the `additional_signed` -// method. This example will not cover this type of extension. See `CheckSpecVersion` in -// [FRAME System](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/system#signed-extensions) +// Note that a transaction extension can also indicate that a particular data must be present in the +// _signing payload_ of a transaction by providing an implementation for the `implicit` method. This +// example will not cover this type of extension. See `CheckSpecVersion` in [FRAME +// System](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/frame/system#signed-extensions) // for an example. // // Using the extension, you can add some hooks to the life cycle of each transaction. Note that by // default, an extension is applied to all `Call` functions (i.e. all transactions). the `Call` enum -// variant is given to each function of `SignedExtension`. Hence, you can filter based on pallet or -// a particular call if needed. +// variant is given to each function of `TransactionExtension`. Hence, you can filter based on +// pallet or a particular call if needed. // // Some extra information, such as encoded length, some static dispatch info like weight and the // sender of the transaction (if signed) are also provided. // -// The full list of hooks that can be added to a signed extension can be found -// [here](https://paritytech.github.io/polkadot-sdk/master/sp_runtime/traits/trait.SignedExtension.html). +// The full list of hooks that can be added to a transaction extension can be found in the +// `TransactionExtension` trait definition. // -// The signed extensions are aggregated in the runtime file of a substrate chain. All extensions -// should be aggregated in a tuple and passed to the `CheckedExtrinsic` and `UncheckedExtrinsic` -// types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and -// `node-template` for an example of this. +// The transaction extensions are aggregated in the runtime file of a substrate chain. All +// extensions should be aggregated in a tuple and passed to the `CheckedExtrinsic` and +// `UncheckedExtrinsic` types defined in the runtime. Lookup `pub type TxExtension = (...)` in +// `node/runtime` and `node-template` for an example of this. -/// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the -/// priority and prints some log. +/// A simple transaction extension that checks for the `set_dummy` call. In that case, it increases +/// the priority and prints some log. /// /// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No -/// particular reason why, just to demonstrate the power of signed extensions. +/// particular reason why, just to demonstrate the power of transaction extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct WatchDummy(PhantomData); @@ -486,52 +490,42 @@ impl core::fmt::Debug for WatchDummy { } } -impl SignedExtension for WatchDummy +impl TransactionExtension<::RuntimeCall> + for WatchDummy where ::RuntimeCall: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + type Val = (); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: DispatchOriginOf<::RuntimeCall>, + call: &::RuntimeCall, + _info: &DispatchInfoOf<::RuntimeCall>, len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult::RuntimeCall> { // if the transaction is too big, just drop it. if len > 200 { - return InvalidTransaction::ExhaustsResources.into() + return Err(InvalidTransaction::ExhaustsResources.into()) } // check for `set_dummy` - match call.is_sub_type() { + let validity = match call.is_sub_type() { Some(Call::set_dummy { .. }) => { sp_runtime::print("set_dummy was received."); let valid_tx = ValidTransaction { priority: Bounded::max_value(), ..Default::default() }; - Ok(valid_tx) + valid_tx }, - _ => Ok(Default::default()), - } + _ => Default::default(), + }; + Ok((validity, (), origin)) } + impl_tx_ext_default!(::RuntimeCall; weight prepare); } diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index d7095eb3c944ffb4532c7ec0efeaca3e6d95a4b3..8e33d3d0a3487e4e210fa14ac6ccad1072368511 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -27,7 +27,7 @@ use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, DispatchTransaction, IdentityLookup}, BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. @@ -146,13 +146,16 @@ fn signed_ext_watch_dummy_works() { assert_eq!( WatchDummy::(PhantomData) - .validate(&1, &call, &info, 150) + .validate_only(Some(1).into(), &call, &info, 150) .unwrap() + .0 .priority, u64::MAX, ); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + WatchDummy::(PhantomData) + .validate_only(Some(1).into(), &call, &info, 250) + .unwrap_err(), InvalidTransaction::ExhaustsResources.into(), ); }) @@ -174,13 +177,13 @@ fn weights_work() { let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` // TODO: account for proof size weight - assert!(info1.weight.ref_time() > 0); - assert_eq!(info1.weight, ::WeightInfo::accumulate_dummy()); + assert!(info1.call_weight.ref_time() > 0); + assert_eq!(info1.call_weight, ::WeightInfo::accumulate_dummy()); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); // TODO: account for proof size weight - assert!(info1.weight.ref_time() > info2.weight.ref_time()); + assert!(info1.call_weight.ref_time() > info2.call_weight.ref_time()); } diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index add014f6b34a96971eca3ed4f0570bea9a797565..b3fdb6ea1897ac1471abb66fd676203c58ea0c7a 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -53,8 +53,8 @@ use frame_support::traits::Get; use frame_system::{ self as system, offchain::{ - AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, - SignedPayload, Signer, SigningTypes, SubmitTransaction, + AppCrypto, CreateInherent, CreateSignedTransaction, SendSignedTransaction, + SendUnsignedTransaction, SignedPayload, Signer, SigningTypes, SubmitTransaction, }, pallet_prelude::BlockNumberFor, }; @@ -124,7 +124,9 @@ pub mod pallet { /// This pallet's configuration trait #[pallet::config] - pub trait Config: CreateSignedTransaction> + frame_system::Config { + pub trait Config: + CreateSignedTransaction> + CreateInherent> + frame_system::Config + { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; @@ -501,7 +503,8 @@ impl Pallet { // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|()| "Unable to submit unsigned transaction.")?; Ok(()) diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index 741adbe6d26ac4b0f8fb0157b4668949c0cca2c9..755beb8b82ece5f521ff7786390e4782c97fae26 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -31,7 +31,7 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ testing::TestXt, - traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; @@ -80,25 +80,47 @@ impl frame_system::offchain::SigningTypes for Test { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateTransaction for Test +where + RuntimeCall: From, +{ + type Extension = (); + + fn create_transaction(call: RuntimeCall, _extension: Self::Extension) -> Extrinsic { + Extrinsic::new_transaction(call, ()) + } +} + impl frame_system::offchain::CreateSignedTransaction for Test where RuntimeCall: From, { - fn create_transaction>( + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { - Some((call, (nonce, ()))) + ) -> Option { + Some(Extrinsic::new_signed(call, nonce, (), ())) + } +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) } } @@ -218,8 +240,8 @@ fn should_submit_signed_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); + assert!(matches!(tx.preamble, sp_runtime::generic::Preamble::Signed(0, (), 0, (),))); + assert_eq!(tx.function, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -258,11 +280,11 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -313,11 +335,11 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { // then let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, - }) = tx.call + }) = tx.function { assert_eq!(body, price_payload); @@ -354,9 +376,9 @@ fn should_submit_raw_unsigned_transaction_on_chain() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + assert!(tx.is_inherent()); assert_eq!( - tx.call, + tx.function, RuntimeCall::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index dee23a41379fc8b732252b9fe4db39bf809699fd..d0d30830f2f04d1bfd4c4afa5a39be91ee549a54 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -40,12 +40,16 @@ //! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to //! split sections across multiple files. //! -//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be -//! built using only the `frame` umbrella crate. +//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be built using only the +//! `frame` umbrella crate. //! //! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for //! writing storage migrations. //! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! +//! - [`pallet_example_authorization_tx_extension`]: An example `TransactionExtension` that +//! authorizes a custom origin through signature validation, along with two support pallets to +//! showcase the usage. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index 1908a235ba15868c41f1fe62c3edd43532e329cf..7d51617497d65fcf6ba68aa441edff574cd942e4 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; -use frame_system::offchain::SendTransactionTypes; +use frame_system::offchain::CreateInherent; #[cfg(feature = "experimental")] use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -77,22 +77,21 @@ pub mod pallet { let call = frame_system::Call::::do_task { task: runtime_task.into() }; // Submit the task as an unsigned transaction - let res = - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ); + let xt = >>::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), Err(e) => log::error!(target: LOG_TARGET, "Error submitting task: {:?}", e), } } } + + #[cfg(not(feature = "experimental"))] + fn offchain_worker(_block_number: BlockNumberFor) {} } #[pallet::config] - pub trait Config: - SendTransactionTypes> + frame_system::Config - { + pub trait Config: CreateInherent> + frame_system::Config { type RuntimeTask: frame_support::traits::Task + IsType<::RuntimeTask> + From>; diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 9a1112946f69f387172d067fd2dcc6690b62d93e..3dc9153c94a06f4da07311c78a7e69fc3e5d700e 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -40,14 +40,23 @@ impl frame_system::Config for Runtime { type Block = Block; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + impl pallet_example_tasks::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs index 6c8acb0194bde6cba27e4e2706f78676468c0057..4b31849c2ea90d7da792cc33b396aa1b8b8bfb97 100644 --- a/substrate/frame/examples/tasks/src/tests.rs +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -157,6 +157,7 @@ fn task_with_offchain_worker() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); + use sp_runtime::traits::ExtrinsicLike; + assert!(tx.is_bare()); }); } diff --git a/substrate/frame/examples/tasks/src/weights.rs b/substrate/frame/examples/tasks/src/weights.rs index 793af6e962201fd9f92e0260ea0e24f5bc39753d..c9ddea6f9a8ab4ed19d12f8db89fa3dff59cca3f 100644 --- a/substrate/frame/examples/tasks/src/weights.rs +++ b/substrate/frame/examples/tasks/src/weights.rs @@ -15,30 +15,31 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for `pallet_example_tasks` +//! Autogenerated weights for `tasks_example` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `20`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `MacBook.local`, CPU: `` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/release/node-template +// ./target/production/substrate-node // benchmark // pallet -// --chain -// dev -// --pallet -// pallet_example_tasks -// --extrinsic -// * -// --steps -// 20 -// --repeat -// 10 -// --output -// frame/examples/tasks/src/weights.rs +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=tasks_example +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/examples/tasks/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,37 +49,42 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_template. +/// Weight functions needed for `tasks_example`. pub trait WeightInfo { fn add_number_into_total() -> Weight; } -/// Weight functions for `pallet_example_kitchensink`. +/// Weights for `tasks_example` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } } +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Kitchensink OtherFoo (r:0 w:1) - /// Proof Skipped: Kitchensink OtherFoo (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `TasksExample::Numbers` (r:1 w:1) + /// Proof: `TasksExample::Numbers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `TasksExample::Total` (r:1 w:1) + /// Proof: `TasksExample::Total` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn add_number_into_total() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(RocksDbWeight::get().writes(1)) + // Measured: `149` + // Estimated: `3614` + // Minimum execution time: 5_776_000 picoseconds. + Weight::from_parts(6_178_000, 3614) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } } diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 69a970a89d93070fec01a3b80867e1ff529d014c..3841b010325b28c832ed197ac55c479c724c9afc 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -24,7 +24,7 @@ use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, testing::{Block, Digest, Header}, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, TransactionExtension}, transaction_validity::{ InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, }, @@ -309,6 +309,34 @@ parameter_types! { }; } +pub struct MockExtensionsWeights; +impl frame_system::ExtensionsWeightInfo for MockExtensionsWeights { + fn check_genesis() -> Weight { + Weight::zero() + } + fn check_mortality_mortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + fn check_mortality_immortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + fn check_non_zero_sender() -> Weight { + Weight::zero() + } + fn check_nonce() -> Weight { + Weight::from_parts(10, 0) + } + fn check_spec_version() -> Weight { + Weight::zero() + } + fn check_tx_version() -> Weight { + Weight::zero() + } + fn check_weight() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type BlockWeights = BlockWeights; @@ -323,6 +351,7 @@ impl frame_system::Config for Runtime { type PostInherents = MockedSystemCallbacks; type PostTransactions = MockedSystemCallbacks; type MultiBlockMigrator = MockedModeGetter; + type ExtensionsWeightInfo = MockExtensionsWeights; } #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, MaxEncodedLen, TypeInfo, RuntimeDebug)] @@ -336,15 +365,60 @@ impl VariantCount for FreezeReasonId { type Balance = u64; +pub struct BalancesWeights; +impl pallet_balances::WeightInfo for BalancesWeights { + fn transfer_allow_death() -> Weight { + Weight::from_parts(25, 0) + } + fn transfer_keep_alive() -> Weight { + Weight::zero() + } + fn force_set_balance_creating() -> Weight { + Weight::zero() + } + fn force_set_balance_killing() -> Weight { + Weight::zero() + } + fn force_transfer() -> Weight { + Weight::zero() + } + fn transfer_all() -> Weight { + Weight::zero() + } + fn force_unreserve() -> Weight { + Weight::zero() + } + fn upgrade_accounts(_u: u32) -> Weight { + Weight::zero() + } + fn force_adjust_total_issuance() -> Weight { + Weight::zero() + } + fn burn_allow_death() -> Weight { + Weight::zero() + } + fn burn_keep_alive() -> Weight { + Weight::zero() + } +} + #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Runtime { type Balance = Balance; type AccountStore = System; + type WeightInfo = BalancesWeights; type RuntimeFreezeReason = FreezeReasonId; type FreezeIdentifier = FreezeReasonId; type MaxFreezes = VariantCountOf; } +pub struct MockTxPaymentWeights; +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + parameter_types! { pub const TransactionByteFee: Balance = 0; } @@ -355,6 +429,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = (); + type WeightInfo = MockTxPaymentWeights; } impl custom::Config for Runtime {} @@ -372,14 +447,19 @@ parameter_types! { Default::default(); } -type SignedExtra = ( +type TxExtension = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); -type TestXt = sp_runtime::testing::TestXt; -type TestBlock = Block; +type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + TxExtension, +>; +type TestBlock = Block; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = b":custom:on_runtime"; @@ -399,7 +479,7 @@ impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { type Executive = super::Executive< Runtime, - Block, + Block, ChainContext, Runtime, AllPalletsWithSystem, @@ -474,17 +554,14 @@ impl MultiStepMigrator for MockedModeGetter { } } -fn extra(nonce: u64, fee: Balance) -> SignedExtra { +fn tx_ext(nonce: u64, fee: Balance) -> TxExtension { ( frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(fee), ) -} - -fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) + .into() } fn call_transfer(dest: u64, value: u64) -> RuntimeCall { @@ -497,8 +574,8 @@ fn balance_transfer_dispatch_works() { pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } .assimilate_storage(&mut t) .unwrap(); - let xt = TestXt::new(call_transfer(2, 69), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight + + let xt = UncheckedXt::new_signed(call_transfer(2, 69), 1, 1.into(), tx_ext(0, 0)); + let weight = xt.get_dispatch_info().total_weight() + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -608,7 +685,7 @@ fn block_import_of_bad_extrinsic_root_fails() { fn bad_extrinsic_not_inserted() { let mut t = new_test_ext(1); // bad nonce check! - let xt = TestXt::new(call_transfer(33, 69), sign_extra(1, 30, 0)); + let xt = UncheckedXt::new_signed(call_transfer(33, 69), 1, 1.into(), tx_ext(30, 0)); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); assert_err!( @@ -622,35 +699,47 @@ fn bad_extrinsic_not_inserted() { #[test] fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new( - RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), - ); - let encoded = xt.encode(); - let encoded_len = encoded.len() as u64; + let transfer_weight = + <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + let extension_weight = tx_ext(0u32.into(), 0) + .weight(&RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 })); // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); + let num_to_exhaust_block = + limit.ref_time() / (transfer_weight.ref_time() + extension_weight.ref_time() + 5); t.execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); // Base block execution weight + `on_initialize` weight from the custom module. assert_eq!(>::block_weight().total(), base_block_weight); for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, nonce.into(), 0), + 1, + 1.into(), + tx_ext(nonce.into(), 0), ); + let encoded = xt.encode(); + let encoded_len = encoded.len() as u64; let res = Executive::apply_extrinsic(xt); if nonce != num_to_exhaust_block { assert!(res.is_ok()); assert_eq!( >::block_weight().total(), - //--------------------- on_initialize + block_execution + extrinsic_base weight + extrinsic len - Weight::from_parts((encoded_len + 5) * (nonce + 1), (nonce + 1)* encoded_len) + base_block_weight, + //--------------------- + // on_initialize + // + block_execution + // + extrinsic_base weight + // + call weight + // + extension weight + // + extrinsic len + Weight::from_parts( + (transfer_weight.ref_time() + extension_weight.ref_time() + 5) * + (nonce + 1), + (nonce + 1) * encoded_len + ) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -665,20 +754,28 @@ fn block_weight_limit_enforced() { #[test] fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let x1 = TestXt::new( + let x1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 1, 0), + 1, + 1.into(), + tx_ext(1, 0), ); - let x2 = TestXt::new( + let x2 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 2, 0), + 1, + 1.into(), + tx_ext(2, 0), ); let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); + let extension_weight = xt.extension_weight(); + let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); + let mut t = new_test_ext(2); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module let base_block_weight = Weight::from_parts(175, 0) + @@ -693,8 +790,8 @@ fn block_weight_and_size_is_stored_per_tx() { assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - // default weight for `TestXt` == encoded length. - let extrinsic_weight = Weight::from_parts(len as u64, 0) + + let extrinsic_weight = transfer_weight + + extension_weight + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -720,8 +817,8 @@ fn block_weight_and_size_is_stored_per_tx() { #[test] fn validate_unsigned() { - let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); + let valid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::allowed_unsigned {})); + let invalid = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::unallowed_unsigned {})); let mut t = new_test_ext(1); t.execute_with(|| { @@ -762,9 +859,11 @@ fn can_not_pay_for_tx_fee_on_full_lock() { 110, ) .unwrap(); - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::System(frame_system::Call::remark { remark: vec![1u8] }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); Executive::initialize_block(&Header::new_from_number(1)); @@ -889,9 +988,11 @@ fn event_from_runtime_upgrade_is_included() { /// used through the `ExecuteBlock` trait. #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); let header = new_test_ext(1).execute_with(|| { @@ -919,7 +1020,10 @@ fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); - >>::execute_block(Block::new(header, vec![xt])); + >>::execute_block(Block::new( + header, + vec![xt], + )); assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); @@ -985,7 +1089,7 @@ fn offchain_worker_works_as_expected() { #[test] fn calculating_storage_root_twice_works() { let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); - let xt = TestXt::new(call, sign_extra(1, 0, 0)); + let xt = UncheckedXt::new_signed(call, 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1004,11 +1108,13 @@ fn calculating_storage_root_twice_works() { #[test] #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] fn invalid_inherent_position_fail() { - let xt1 = TestXt::new( + let xt1 = UncheckedXt::new_signed( RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 }), - sign_extra(1, 0, 0), + 1, + 1.into(), + tx_ext(0, 0), ); - let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt2 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1027,8 +1133,8 @@ fn invalid_inherent_position_fail() { #[test] fn valid_inherents_position_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1048,7 +1154,12 @@ fn valid_inherents_position_works() { #[test] #[should_panic(expected = "A call was labelled as mandatory, but resulted in an Error.")] fn invalid_inherents_fail_block_execution() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom(custom::Call::inherent {}), + 1, + 1.into(), + tx_ext(0, 0), + ); new_test_ext(1).execute_with(|| { Executive::execute_block(Block::new( @@ -1061,7 +1172,7 @@ fn invalid_inherents_fail_block_execution() { // Inherents are created by the runtime and don't need to be validated. #[test] fn inherents_fail_validate_block() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); new_test_ext(1).execute_with(|| { assert_eq!( @@ -1075,7 +1186,7 @@ fn inherents_fail_validate_block() { /// Inherents still work while `initialize_block` forbids transactions. #[test] fn inherents_ok_while_exts_forbidden_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1095,8 +1206,8 @@ fn inherents_ok_while_exts_forbidden_works() { #[test] #[should_panic = "Only inherents are allowed in this block"] fn transactions_in_only_inherents_block_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1116,8 +1227,8 @@ fn transactions_in_only_inherents_block_errors() { /// Same as above but no error. #[test] fn transactions_in_normal_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1137,8 +1248,8 @@ fn transactions_in_normal_block_works() { #[test] #[cfg(feature = "try-runtime")] fn try_execute_block_works() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { Executive::initialize_block(&Header::new_from_number(1)); @@ -1165,8 +1276,8 @@ fn try_execute_block_works() { #[cfg(feature = "try-runtime")] #[should_panic = "Only inherents allowed"] fn try_execute_tx_forbidden_errors() { - let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1193,9 +1304,9 @@ fn try_execute_tx_forbidden_errors() { /// Check that `ensure_inherents_are_first` reports the correct indices. #[test] fn ensure_inherents_are_first_works() { - let in1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None); - let in2 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); + let in2 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); // Mocked empty header: let header = new_test_ext(1).execute_with(|| { @@ -1273,18 +1384,20 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { for i in 0..n_in { let xt = if i % 2 == 0 { - TestXt::new(RuntimeCall::Custom(custom::Call::inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})) } else { - TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None) + UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})) }; Executive::apply_extrinsic(xt.clone()).unwrap().unwrap(); extrinsics.push(xt); } for t in 0..n_tx { - let xt = TestXt::new( + let xt = UncheckedXt::new_signed( RuntimeCall::Custom2(custom2::Call::some_call {}), - sign_extra(1, t as u64, 0), + 1, + 1.into(), + tx_ext(t as u64, 0), ); // Extrinsics can be applied even when MBMs are active. Only the `execute_block` // will reject it. @@ -1324,8 +1437,13 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { #[test] fn post_inherent_called_after_all_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1359,8 +1477,13 @@ fn post_inherent_called_after_all_inherents() { /// Regression test for AppSec finding #40. #[test] fn post_inherent_called_after_all_optional_inherents() { - let in1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); - let xt1 = TestXt::new(RuntimeCall::Custom2(custom2::Call::some_call {}), sign_extra(1, 0, 0)); + let in1 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); + let xt1 = UncheckedXt::new_signed( + RuntimeCall::Custom2(custom2::Call::some_call {}), + 1, + 1.into(), + tx_ext(0, 0), + ); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1393,14 +1516,14 @@ fn post_inherent_called_after_all_optional_inherents() { #[test] fn is_inherent_works() { - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::optional_inherent {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::optional_inherent {})); assert!(Runtime::is_inherent(&ext)); - let ext = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); + let ext = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); assert!(!Runtime::is_inherent(&ext)); - let ext = TestXt::new(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {}), None); + let ext = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::allowed_unsigned {})); assert!(!Runtime::is_inherent(&ext), "Unsigned ext are not automatically inherents"); } diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index b213c1ceb7219f8ae4c860ba85c23ef1033dde93..2366c957e9ab17e335ed5c830af64134ad39f033 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -110,7 +110,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -122,7 +122,7 @@ impl (EquivocationProof>, T::KeyOwnerProof), > for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -144,7 +144,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index caac4107cfb711813cd2b98c85ed5b0dc54c0466..cf4c29003a715b8217ca3799ed3dc7d2679c2ccd 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -72,14 +72,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = TestXt; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + TestXt::new_bare(call) + } +} + parameter_types! { pub const Period: u64 = 1; pub const Offset: u64 = 0; diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 8b12d63adaadb27c3835f29d8345bd6e6a5b6997..e1e963ce564a76a75e815ed22d52791f964a4786 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -882,7 +882,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); + assert!(info.call_weight.any_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index ee2a8451d6fb7935f4ae06e34a9c9fde039722ca..74d3bc6484dd47217be4bdc31d2a68f97e687a8e 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ BoundedSlice, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::*, }; pub use pallet::*; @@ -261,7 +261,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: SendTransactionTypes> + frame_system::Config { + pub trait Config: CreateInherent> + frame_system::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter @@ -642,7 +642,8 @@ impl Pallet { call, ); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| OffchainErr::SubmitTransaction)?; Ok(()) diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 882581702ea11341146c908d90205361dca0db30..a5d9a6e20e616945d00016944eea16d15b2d050d 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -25,11 +25,7 @@ use frame_support::{ weights::Weight, }; use pallet_session::historical as pallet_session_historical; -use sp_runtime::{ - testing::{TestXt, UintAuthorityId}, - traits::ConvertInto, - BuildStorage, Permill, -}; +use sp_runtime::{testing::UintAuthorityId, traits::ConvertInto, BuildStorage, Permill}; use sp_staking::{ offence::{OffenceError, ReportOffence}, SessionIndex, @@ -77,7 +73,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; @@ -191,14 +187,23 @@ impl Config for Runtime { type MaxPeerInHeartbeats = ConstU32<10_000>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub fn advance_session() { let now = System::block_number().max(1); System::set_block_number(now + 1); diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 12333d59ef8959d26836330bb4a3a1be93ac49ab..b9a2772da689c05d9c95d9bb6cfc1434fe880b31 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -225,7 +225,7 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), @@ -339,7 +339,7 @@ fn should_not_send_a_report_if_already_online() { assert_eq!(pool_state.read().transactions.len(), 0); // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { + let heartbeat = match ex.function { crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, e => panic!("Unexpected call: {:?}", e), diff --git a/substrate/frame/lottery/src/lib.rs b/substrate/frame/lottery/src/lib.rs index 0071b258fc45c0fc7539e0394d4c95104d2a94ce..6a15de55ebd761afce02da60ff7c1c0c4f7aa235 100644 --- a/substrate/frame/lottery/src/lib.rs +++ b/substrate/frame/lottery/src/lib.rs @@ -300,7 +300,7 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight( T::WeightInfo::buy_ticket() - .saturating_add(call.get_dispatch_info().weight) + .saturating_add(call.get_dispatch_info().call_weight) )] pub fn buy_ticket( origin: OriginFor, diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 31402f2a9d81d87c14f99e690eec69b88aa430ad..04620fa88d85b8106cbff3e9fac4fa13054d0eb3 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -868,13 +868,26 @@ impl Pallet { } } - /// The maximal weight that a single message can consume. + /// The maximal weight that a single message ever can consume. /// /// Any message using more than this will be marked as permanently overweight and not /// automatically re-attempted. Returns `None` if the servicing of a message cannot begin. /// `Some(0)` means that only messages with no weight may be served. fn max_message_weight(limit: Weight) -> Option { - limit.checked_sub(&Self::single_msg_overhead()) + let service_weight = T::ServiceWeight::get().unwrap_or_default(); + let on_idle_weight = T::IdleMaxServiceWeight::get().unwrap_or_default(); + + // Whatever weight is set, the one with the biggest one is used as the maximum weight. If a + // message is tried in one context and fails, it will be retried in the other context later. + let max_message_weight = + if service_weight.any_gt(on_idle_weight) { service_weight } else { on_idle_weight }; + + if max_message_weight.is_zero() { + // If no service weight is set, we need to use the given limit as max message weight. + limit.checked_sub(&Self::single_msg_overhead()) + } else { + max_message_weight.checked_sub(&Self::single_msg_overhead()) + } } /// The overhead of servicing a single message. @@ -896,6 +909,8 @@ impl Pallet { fn do_integrity_test() -> Result<(), String> { ensure!(!MaxMessageLenOf::::get().is_zero(), "HeapSize too low"); + let max_block = T::BlockWeights::get().max_block; + if let Some(service) = T::ServiceWeight::get() { if Self::max_message_weight(service).is_none() { return Err(format!( @@ -904,6 +919,31 @@ impl Pallet { Self::single_msg_overhead(), )) } + + if service.any_gt(max_block) { + return Err(format!( + "ServiceWeight {service} is bigger than max block weight {max_block}" + )) + } + } + + if let Some(on_idle) = T::IdleMaxServiceWeight::get() { + if on_idle.any_gt(max_block) { + return Err(format!( + "IdleMaxServiceWeight {on_idle} is bigger than max block weight {max_block}" + )) + } + } + + if let (Some(service_weight), Some(on_idle)) = + (T::ServiceWeight::get(), T::IdleMaxServiceWeight::get()) + { + if !(service_weight.all_gt(on_idle) || + on_idle.all_gt(service_weight) || + service_weight == on_idle) + { + return Err("One of `ServiceWeight` or `IdleMaxServiceWeight` needs to be `all_gt` or both need to be equal.".into()) + } } Ok(()) @@ -1531,7 +1571,7 @@ impl Pallet { let mut weight = WeightMeter::with_limit(weight_limit); // Get the maximum weight that processing a single message may take: - let max_weight = Self::max_message_weight(weight_limit).unwrap_or_else(|| { + let overweight_limit = Self::max_message_weight(weight_limit).unwrap_or_else(|| { if matches!(context, ServiceQueuesContext::OnInitialize) { defensive!("Not enough weight to service a single message."); } @@ -1549,7 +1589,8 @@ impl Pallet { let mut last_no_progress = None; loop { - let (progressed, n) = Self::service_queue(next.clone(), &mut weight, max_weight); + let (progressed, n) = + Self::service_queue(next.clone(), &mut weight, overweight_limit); next = match n { Some(n) => if !progressed { diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index d3f719c623565d7adbe71e201e1e2032296d8282..f1d341d1a5db125c45a5dec61df090dcadaa73c2 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -42,7 +42,7 @@ impl frame_system::Config for Test { type Block = Block; } parameter_types! { - pub const HeapSize: u32 = 24; + pub const HeapSize: u32 = 40; pub const MaxStale: u32 = 2; pub const ServiceWeight: Option = Some(Weight::from_parts(100, 100)); } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index b75764b67bea5f991b1f702781814598092683a6..c81e486a40dfa8b0bb82661dac456473bb9f01fb 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -177,7 +177,7 @@ fn service_queues_failing_messages_works() { MessageQueue::enqueue_message(msg("stacklimitreached"), Here); MessageQueue::enqueue_message(msg("yield"), Here); // Starts with four pages. - assert_pages(&[0, 1, 2, 3, 4]); + assert_pages(&[0, 1, 2]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( @@ -209,7 +209,7 @@ fn service_queues_failing_messages_works() { assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_eq!(System::events().len(), 4); // Last page with the `yield` stays in. - assert_pages(&[4]); + assert_pages(&[2]); }); } @@ -313,7 +313,7 @@ fn reap_page_permanent_overweight_works() { // Create 10 pages more than the stale limit. let n = (MaxStale::get() + 10) as usize; for _ in 0..n { - MessageQueue::enqueue_message(msg("weight=2"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadatadata"), Here); } assert_eq!(Pages::::iter().count(), n); assert_eq!(MessageQueue::footprint(Here).pages, n as u32); @@ -334,7 +334,7 @@ fn reap_page_permanent_overweight_works() { break } assert_ok!(MessageQueue::do_reap_page(&Here, i)); - assert_eq!(QueueChanges::take(), vec![(Here, b.message_count - 1, b.size - 8)]); + assert_eq!(QueueChanges::take(), vec![(Here, b.message_count - 1, b.size - 23)]); } // Cannot reap any more pages. @@ -353,20 +353,20 @@ fn reaping_overweight_fails_properly() { build_and_execute::(|| { // page 0 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("a"), Here); // page 1 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("b"), Here); // page 2 - MessageQueue::enqueue_message(msg("weight=4"), Here); + MessageQueue::enqueue_message(msg("weight=200 datadata"), Here); MessageQueue::enqueue_message(msg("c"), Here); // page 3 - MessageQueue::enqueue_message(msg("bigbig 1"), Here); + MessageQueue::enqueue_message(msg("bigbig 1 datadata"), Here); // page 4 - MessageQueue::enqueue_message(msg("bigbig 2"), Here); + MessageQueue::enqueue_message(msg("bigbig 2 datadata"), Here); // page 5 - MessageQueue::enqueue_message(msg("bigbig 3"), Here); + MessageQueue::enqueue_message(msg("bigbig 3 datadata"), Here); // Double-check that exactly these pages exist. assert_pages(&[0, 1, 2, 3, 4, 5]); @@ -385,7 +385,7 @@ fn reaping_overweight_fails_properly() { // 3 stale now: can take something 4 pages in history. assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 1"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 1 datadata"), Here)]); // Nothing reapable yet, because we haven't hit the stale limit. for (o, i, _) in Pages::::iter() { @@ -394,7 +394,7 @@ fn reaping_overweight_fails_properly() { assert_pages(&[0, 1, 2, 4, 5]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 2"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 2 datadata"), Here)]); assert_pages(&[0, 1, 2, 5]); // First is now reapable as it is too far behind the first ready page (5). @@ -406,7 +406,7 @@ fn reaping_overweight_fails_properly() { assert_pages(&[1, 2, 5]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 3"), Here)]); + assert_eq!(MessagesProcessed::take(), vec![(vmsg("bigbig 3 datadata"), Here)]); assert_noop!(MessageQueue::do_reap_page(&Here, 0), Error::::NoPage); assert_noop!(MessageQueue::do_reap_page(&Here, 3), Error::::NoPage); @@ -1062,29 +1062,29 @@ fn footprint_on_swept_works() { fn footprint_num_pages_works() { use MessageOrigin::*; build_and_execute::(|| { - MessageQueue::enqueue_message(msg("weight=2"), Here); - MessageQueue::enqueue_message(msg("weight=3"), Here); + MessageQueue::enqueue_message(msg("weight=200"), Here); + MessageQueue::enqueue_message(msg("weight=300"), Here); - assert_eq!(MessageQueue::footprint(Here), fp(2, 2, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 1, 2, 20)); // Mark the messages as overweight. assert_eq!(MessageQueue::service_queues(1.into_weight()), 0.into_weight()); assert_eq!(System::events().len(), 2); // `ready_pages` decreases but `page` count does not. - assert_eq!(MessageQueue::footprint(Here), fp(2, 0, 2, 16)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 2, 20)); // Now execute the second message. assert_eq!( - ::execute_overweight(3.into_weight(), (Here, 1, 0)) + ::execute_overweight(300.into_weight(), (Here, 0, 1)) .unwrap(), - 3.into_weight() + 300.into_weight() ); - assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 8)); + assert_eq!(MessageQueue::footprint(Here), fp(1, 0, 1, 10)); // And the first one: assert_eq!( - ::execute_overweight(2.into_weight(), (Here, 0, 0)) + ::execute_overweight(200.into_weight(), (Here, 0, 0)) .unwrap(), - 2.into_weight() + 200.into_weight() ); assert_eq!(MessageQueue::footprint(Here), Default::default()); assert_eq!(MessageQueue::footprint(Here), fp(0, 0, 0, 0)); @@ -1104,7 +1104,7 @@ fn execute_overweight_works() { // Enqueue a message let origin = MessageOrigin::Here; - MessageQueue::enqueue_message(msg("weight=6"), origin); + MessageQueue::enqueue_message(msg("weight=200"), origin); // Load the current book let book = BookStateFor::::get(origin); assert_eq!(book.message_count, 1); @@ -1112,10 +1112,10 @@ fn execute_overweight_works() { // Mark the message as permanently overweight. assert_eq!(MessageQueue::service_queues(4.into_weight()), 4.into_weight()); - assert_eq!(QueueChanges::take(), vec![(origin, 1, 8)]); + assert_eq!(QueueChanges::take(), vec![(origin, 1, 10)]); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=6"), + id: blake2_256(b"weight=200"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -1132,9 +1132,9 @@ fn execute_overweight_works() { assert_eq!(Pages::::iter().count(), 1); assert!(QueueChanges::take().is_empty()); let consumed = - ::execute_overweight(7.into_weight(), (origin, 0, 0)) + ::execute_overweight(200.into_weight(), (origin, 0, 0)) .unwrap(); - assert_eq!(consumed, 6.into_weight()); + assert_eq!(consumed, 200.into_weight()); assert_eq!(QueueChanges::take(), vec![(origin, 0, 0)]); // There is no message left in the book. let book = BookStateFor::::get(origin); @@ -1162,7 +1162,7 @@ fn permanently_overweight_book_unknits() { set_weight("service_queue_base", 1.into_weight()); set_weight("service_page_base_completion", 1.into_weight()); - MessageQueue::enqueue_messages([msg("weight=9")].into_iter(), Here); + MessageQueue::enqueue_messages([msg("weight=200")].into_iter(), Here); // It is the only ready book. assert_ring(&[Here]); @@ -1170,7 +1170,7 @@ fn permanently_overweight_book_unknits() { assert_eq!(MessageQueue::service_queues(8.into_weight()), 4.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=9"), + id: blake2_256(b"weight=200"), origin: Here, message_index: 0, page_index: 0, @@ -1201,19 +1201,19 @@ fn permanently_overweight_book_unknits_multiple() { set_weight("service_page_base_completion", 1.into_weight()); MessageQueue::enqueue_messages( - [msg("weight=1"), msg("weight=9"), msg("weight=9")].into_iter(), + [msg("weight=1"), msg("weight=200"), msg("weight=200")].into_iter(), Here, ); assert_ring(&[Here]); // Process the first message. assert_eq!(MessageQueue::service_queues(4.into_weight()), 4.into_weight()); - assert_eq!(num_overweight_enqueued_events(), 0); + assert_eq!(num_overweight_enqueued_events(), 1); assert_eq!(MessagesProcessed::take().len(), 1); // Book is still ready since it was not marked as overweight yet. assert_ring(&[Here]); - assert_eq!(MessageQueue::service_queues(8.into_weight()), 5.into_weight()); + assert_eq!(MessageQueue::service_queues(8.into_weight()), 4.into_weight()); assert_eq!(num_overweight_enqueued_events(), 2); assert_eq!(MessagesProcessed::take().len(), 0); // Now it is overweight. @@ -1566,12 +1566,12 @@ fn service_queues_suspend_works() { fn execute_overweight_respects_suspension() { build_and_execute::(|| { let origin = MessageOrigin::Here; - MessageQueue::enqueue_message(msg("weight=5"), origin); + MessageQueue::enqueue_message(msg("weight=200"), origin); // Mark the message as permanently overweight. MessageQueue::service_queues(4.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=5"), + id: blake2_256(b"weight=200"), origin, message_index: 0, page_index: 0, @@ -1598,9 +1598,9 @@ fn execute_overweight_respects_suspension() { assert_last_event::( Event::Processed { - id: blake2_256(b"weight=5").into(), + id: blake2_256(b"weight=200").into(), origin, - weight_used: 5.into_weight(), + weight_used: 200.into_weight(), success: true, } .into(), @@ -1768,7 +1768,7 @@ fn recursive_overweight_while_service_is_forbidden() { // Check that the message was permanently overweight. assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"weight=10"), + id: blake2_256(b"weight=200"), origin: There, message_index: 0, page_index: 0, @@ -1786,13 +1786,13 @@ fn recursive_overweight_while_service_is_forbidden() { Ok(()) })); - MessageQueue::enqueue_message(msg("weight=10"), There); + MessageQueue::enqueue_message(msg("weight=200"), There); MessageQueue::enqueue_message(msg("callback=0"), Here); // Mark it as permanently overweight. MessageQueue::service_queues(5.into_weight()); assert_ok!(::execute_overweight( - 10.into_weight(), + 200.into_weight(), (There, 0, 0) )); }); @@ -1812,7 +1812,7 @@ fn recursive_reap_page_is_forbidden() { // Create 10 pages more than the stale limit. let n = (MaxStale::get() + 10) as usize; for _ in 0..n { - MessageQueue::enqueue_message(msg("weight=2"), Here); + MessageQueue::enqueue_message(msg("weight=200"), Here); } // Mark all pages as stale since their message is permanently overweight. @@ -1886,6 +1886,11 @@ fn process_enqueued_on_idle_requires_enough_weight() { // Not enough weight to process on idle. Pallet::::on_idle(1, Weight::from_parts(0, 0)); assert_eq!(MessagesProcessed::take(), vec![]); + + assert!(!System::events().into_iter().any(|e| matches!( + e.event, + RuntimeEvent::MessageQueue(Event::::OverweightEnqueued { .. }) + ))); }) } @@ -1923,12 +1928,12 @@ fn execute_overweight_keeps_stack_ov_message() { // We need to create a mocked message that first reports insufficient weight, and then // `StackLimitReached`: IgnoreStackOvError::set(true); - MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + MessageQueue::enqueue_message(msg("weight=200 stacklimitreached"), Here); MessageQueue::service_queues(0.into_weight()); assert_last_event::( Event::OverweightEnqueued { - id: blake2_256(b"stacklimitreached"), + id: blake2_256(b"weight=200 stacklimitreached"), origin: MessageOrigin::Here, message_index: 0, page_index: 0, @@ -1952,7 +1957,7 @@ fn execute_overweight_keeps_stack_ov_message() { ); assert_last_event::( Event::ProcessingFailed { - id: blake2_256(b"stacklimitreached").into(), + id: blake2_256(b"weight=200 stacklimitreached").into(), origin: MessageOrigin::Here, error: ProcessMessageError::StackLimitReached, } @@ -1964,16 +1969,16 @@ fn execute_overweight_keeps_stack_ov_message() { // Now let's process it normally: IgnoreStackOvError::set(true); assert_eq!( - ::execute_overweight(1.into_weight(), (Here, 0, 0)) + ::execute_overweight(200.into_weight(), (Here, 0, 0)) .unwrap(), - 1.into_weight() + 200.into_weight() ); assert_last_event::( Event::Processed { - id: blake2_256(b"stacklimitreached").into(), + id: blake2_256(b"weight=200 stacklimitreached").into(), origin: MessageOrigin::Here, - weight_used: 1.into_weight(), + weight_used: 200.into_weight(), success: true, } .into(), diff --git a/substrate/frame/metadata-hash-extension/Cargo.toml b/substrate/frame/metadata-hash-extension/Cargo.toml index 10d90bba0911bc1830a29ede8c9dbe78e9e45716..bca2c3ffb198692608d3844d92d873a38430dc8a 100644 --- a/substrate/frame/metadata-hash-extension/Cargo.toml +++ b/substrate/frame/metadata-hash-extension/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } docify = { workspace = true } +const-hex = { workspace = true } [dev-dependencies] substrate-wasm-builder = { features = ["metadata-hash"], workspace = true, default-features = true } @@ -31,6 +32,7 @@ sp-tracing = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", + "const-hex/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/substrate/frame/metadata-hash-extension/src/lib.rs b/substrate/frame/metadata-hash-extension/src/lib.rs index d09acbfb3df22e6c3e6706019ede1f16621ef767..0b45f5a7e51524e63e10f5d8ae44650c0eacd168 100644 --- a/substrate/frame/metadata-hash-extension/src/lib.rs +++ b/substrate/frame/metadata-hash-extension/src/lib.rs @@ -17,14 +17,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -//! The [`CheckMetadataHash`] signed extension. +//! The [`CheckMetadataHash`] transaction extension. //! //! The extension for optionally checking the metadata hash. For information how it works and what //! it does exactly, see the docs of [`CheckMetadataHash`]. //! //! # Integration //! -//! As any signed extension you will need to add it to your runtime signed extensions: +//! As any transaction extension you will need to add it to your runtime transaction extensions: #![doc = docify::embed!("src/tests.rs", add_metadata_hash_extension)] //! As the extension requires the `RUNTIME_METADATA_HASH` environment variable to be present at //! compile time, it requires a little bit more setup. To have this environment variable available @@ -39,11 +39,12 @@ extern crate alloc; extern crate self as frame_metadata_hash_extension; use codec::{Decode, Encode}; -use frame_support::DebugNoBound; +use frame_support::{pallet_prelude::Weight, DebugNoBound}; use frame_system::Config; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, + traits::TransactionExtension, transaction_validity::{TransactionValidityError, UnknownTransaction}, }; @@ -67,12 +68,24 @@ enum MetadataHash { Custom([u8; 32]), } +const RUNTIME_METADATA: Option<[u8; 32]> = if let Some(hex) = option_env!("RUNTIME_METADATA_HASH") { + match const_hex::const_decode_to_array(hex.as_bytes()) { + Ok(hex) => Some(hex), + Err(_) => panic!( + "Invalid RUNTIME_METADATA_HASH environment variable: it must be a 32 \ + bytes value in hexadecimal: e.g. 0x123ABCabd...123ABCabc. Upper case or lower case, \ + 0x prefix is optional." + ), + } +} else { + None +}; + impl MetadataHash { /// Returns the metadata hash. fn hash(&self) -> Option<[u8; 32]> { match self { - Self::FetchFromEnv => - option_env!("RUNTIME_METADATA_HASH").map(array_bytes::hex2array_unchecked), + Self::FetchFromEnv => RUNTIME_METADATA, Self::Custom(hash) => Some(*hash), } } @@ -85,15 +98,15 @@ impl MetadataHash { /// This metadata hash should give users the confidence that what they build with an online wallet /// is the same they are signing with their offline wallet and then applying on chain. To ensure /// that the online wallet is not tricking the offline wallet into decoding and showing an incorrect -/// extrinsic, the offline wallet will include the metadata hash into the additional signed data and +/// extrinsic, the offline wallet will include the metadata hash into the extension implicit and /// the runtime will then do the same. If the metadata hash doesn't match, the signature /// verification will fail and thus, the transaction will be rejected. The RFC contains more details /// on how it works. /// /// The extension adds one byte (the `mode`) to the size of the extrinsic. This one byte is -/// controlling if the metadata hash should be added to the signed data or not. Mode `0` means that -/// the metadata hash is not added and thus, `None` is added to the signed data. Mode `1` means that -/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the signed data. Further +/// controlling if the metadata hash should be added to the implicit or not. Mode `0` means that +/// the metadata hash is not added and thus, `None` is added to the implicit. Mode `1` means that +/// the metadata hash is added and thus, `Some(metadata_hash)` is added to the implicit. Further /// values of `mode` are reserved for future changes. /// /// The metadata hash is read from the environment variable `RUNTIME_METADATA_HASH`. This @@ -110,7 +123,7 @@ pub struct CheckMetadataHash { } impl CheckMetadataHash { - /// Creates new `SignedExtension` to check metadata hash. + /// Creates new `TransactionExtension` to check metadata hash. pub fn new(enable: bool) -> Self { Self { _phantom: core::marker::PhantomData, @@ -131,14 +144,10 @@ impl CheckMetadataHash { } } -impl SignedExtension for CheckMetadataHash { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = Option<[u8; 32]>; - type Pre = (); +impl TransactionExtension for CheckMetadataHash { const IDENTIFIER: &'static str = "CheckMetadataHash"; - - fn additional_signed(&self) -> Result { + type Implicit = Option<[u8; 32]>; + fn implicit(&self) -> Result { let signed = match self.mode { Mode::Disabled => None, Mode::Enabled => match self.metadata_hash.hash() { @@ -149,20 +158,20 @@ impl SignedExtension for CheckMetadataHash { log::debug!( target: "runtime::metadata-hash", - "CheckMetadataHash::additional_signed => {:?}", + "CheckMetadataHash::implicit => {:?}", signed.as_ref().map(|h| array_bytes::bytes2hex("0x", h)), ); Ok(signed) } + type Val = (); + type Pre = (); - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + // The weight is the weight of implicit, it consists of a few match operation, it is + // negligible. + Weight::zero() } + + impl_tx_ext_default!(T::RuntimeCall; validate prepare); } diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs index f13eecfd94bfb5f78224e7dc40f2b08e1ce7c034..11a3345ee15ce2b7217f6070704f6bfbdabf78b9 100644 --- a/substrate/frame/metadata-hash-extension/src/tests.rs +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; use sp_api::{Metadata, ProvideRuntimeApi}; use sp_runtime::{ - traits::{Extrinsic as _, SignedExtension}, + traits::{ExtrinsicLike, TransactionExtension}, transaction_validity::{TransactionSource, UnknownTransaction}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; @@ -51,7 +51,7 @@ impl frame_system::Config for Test { #[test] fn rejects_when_no_metadata_hash_was_passed() { let ext = CheckMetadataHash::::decode(&mut &1u8.encode()[..]).unwrap(); - assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.additional_signed()); + assert_eq!(Err(UnknownTransaction::CannotLookup.into()), ext.implicit()); } #[test] @@ -92,7 +92,7 @@ fn ensure_check_metadata_works_on_real_extrinsics() { .metadata_hash(generate_metadata_hash(metadata)) .build(); // Ensure that the transaction is signed. - assert!(valid_transaction.is_signed().unwrap()); + assert!(!valid_transaction.is_bare()); runtime_api .validate_transaction(best_hash, TransactionSource::External, valid_transaction, best_hash) @@ -104,7 +104,7 @@ fn ensure_check_metadata_works_on_real_extrinsics() { .metadata_hash([10u8; 32]) .build(); // Ensure that the transaction is signed. - assert!(invalid_transaction.is_signed().unwrap()); + assert!(!invalid_transaction.is_bare()); assert_eq!( TransactionValidityError::from(InvalidTransaction::BadProof), @@ -132,8 +132,8 @@ mod docs { } } - /// The `SignedExtension` to the basic transaction logic. - pub type SignedExtra = ( + /// The `TransactionExtension` to the basic transaction logic. + pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -153,7 +153,7 @@ mod docs { /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; } // Put here to not have it in the docs as well. diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index c0505a4f010577cbb791d116526f119b603bbe76..6579ed678ae7a6a10200c80284a749acdb4bfbae 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; pub use pallet::*; @@ -178,7 +178,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// The maximum number of authorities per session. #[pallet::constant] type MaxAuthorities: Get; @@ -531,7 +531,8 @@ impl Pallet { return false }; let call = Call::register { registration, signature }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => true, Err(()) => { log::debug!( diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index 51c36773bdad34b4f1bf0f7760998de1fdcbc7fc..8faae73c7161e88a3df54fcc6a585989f30202a2 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -273,7 +273,7 @@ pub mod pallet { T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -554,7 +554,7 @@ impl Pallet { if let Some(call) = maybe_call.filter(|_| approvals >= threshold) { // verify weight ensure!( - call.get_dispatch_info().weight.all_lte(max_weight), + call.get_dispatch_info().call_weight.all_lte(max_weight), Error::::MaxWeightTooLow ); diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index cfdd33f7dfcc3d3f5a49c302c0784718a70dfaca..4f8a7a44243cfcd2f41daca62093c4351540202c 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -104,7 +104,7 @@ fn multisig_deposit_is_taken_and_returned() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, @@ -225,7 +225,7 @@ fn multisig_2_of_3_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -258,7 +258,7 @@ fn multisig_3_of_3_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -328,7 +328,7 @@ fn multisig_2_of_3_as_multi_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, @@ -360,9 +360,9 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call1 = call_transfer(6, 10); - let call1_weight = call1.get_dispatch_info().weight; + let call1_weight = call1.get_dispatch_info().call_weight; let call2 = call_transfer(7, 5); - let call2_weight = call2.get_dispatch_info().weight; + let call2_weight = call2.get_dispatch_info().call_weight; assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), @@ -411,7 +411,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 10); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), @@ -652,7 +652,7 @@ fn multisig_handles_no_preimage_after_all_approve() { assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index 60d7c639c88cb8f1c64af75f0042d3a50fb3e87c..d67fb404ea792d3f5037ad80d411d0aa56b1d51b 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -31,49 +31,48 @@ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; /// A type alias for handling balance deposits. -pub(super) type DepositBalanceOf = +pub type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; /// A type alias representing the details of a collection. -pub(super) type CollectionDetailsFor = +pub type CollectionDetailsFor = CollectionDetails<::AccountId, DepositBalanceOf>; /// A type alias for keeping track of approvals used by a single item. -pub(super) type ApprovalsOf = BoundedBTreeMap< +pub type ApprovalsOf = BoundedBTreeMap< ::AccountId, Option>, >::ApprovalsLimit, >; /// A type alias for keeping track of approvals for an item's attributes. -pub(super) type ItemAttributesApprovals = +pub type ItemAttributesApprovals = BoundedBTreeSet<::AccountId, >::ItemAttributesApprovalsLimit>; /// A type that holds the deposit for a single item. -pub(super) type ItemDepositOf = - ItemDeposit, ::AccountId>; +pub type ItemDepositOf = ItemDeposit, ::AccountId>; /// A type that holds the deposit amount for an item's attribute. -pub(super) type AttributeDepositOf = +pub type AttributeDepositOf = AttributeDeposit, ::AccountId>; /// A type that holds the deposit amount for an item's metadata. -pub(super) type ItemMetadataDepositOf = +pub type ItemMetadataDepositOf = ItemMetadataDeposit, ::AccountId>; /// A type that holds the details of a single item. -pub(super) type ItemDetailsFor = +pub type ItemDetailsFor = ItemDetails<::AccountId, ItemDepositOf, ApprovalsOf>; /// A type alias for an accounts balance. -pub(super) type BalanceOf = +pub type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; /// A type alias to represent the price of an item. -pub(super) type ItemPrice = BalanceOf; +pub type ItemPrice = BalanceOf; /// A type alias for the tips held by a single item. -pub(super) type ItemTipOf = ItemTip< +pub type ItemTipOf = ItemTip< >::CollectionId, >::ItemId, ::AccountId, BalanceOf, >; /// A type alias for the settings configuration of a collection. -pub(super) type CollectionConfigFor = +pub type CollectionConfigFor = CollectionConfig, BlockNumberFor, >::CollectionId>; /// A type alias for the pre-signed minting configuration for a specified collection. -pub(super) type PreSignedMintOf = PreSignedMint< +pub type PreSignedMintOf = PreSignedMint< >::CollectionId, >::ItemId, ::AccountId, @@ -81,7 +80,7 @@ pub(super) type PreSignedMintOf = PreSignedMint< BalanceOf, >; /// A type alias for the pre-signed minting configuration on the attribute level of an item. -pub(super) type PreSignedAttributesOf = PreSignedAttributes< +pub type PreSignedAttributesOf = PreSignedAttributes< >::CollectionId, >::ItemId, ::AccountId, @@ -92,18 +91,18 @@ pub(super) type PreSignedAttributesOf = PreSignedAttributes< #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Collection's owner. - pub(super) owner: AccountId, + pub owner: AccountId, /// The total balance deposited by the owner for all the storage data associated with this /// collection. Used by `destroy`. - pub(super) owner_deposit: DepositBalance, + pub owner_deposit: DepositBalance, /// The total number of outstanding items of this collection. - pub(super) items: u32, + pub items: u32, /// The total number of outstanding item metadata of this collection. - pub(super) item_metadatas: u32, + pub item_metadatas: u32, /// The total number of outstanding item configs of this collection. - pub(super) item_configs: u32, + pub item_configs: u32, /// The total number of attributes for this collection. - pub(super) attributes: u32, + pub attributes: u32, } /// Witness data for the destroy transactions. @@ -143,21 +142,21 @@ pub struct MintWitness { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct ItemDetails { /// The owner of this item. - pub(super) owner: AccountId, + pub owner: AccountId, /// The approved transferrer of this item, if one is set. - pub(super) approvals: Approvals, + pub approvals: Approvals, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. - pub(super) deposit: Deposit, + pub deposit: Deposit, } /// Information about the reserved item deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemDeposit { /// A depositor account. - pub(super) account: AccountId, + pub account: AccountId, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Information about the collection's metadata. @@ -168,11 +167,11 @@ pub struct CollectionMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: Deposit, + pub deposit: Deposit, /// General information concerning this collection. Limited in length by `StringLimit`. This /// will generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, } /// Information about the item's metadata. @@ -182,11 +181,11 @@ pub struct ItemMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: Deposit, + pub deposit: Deposit, /// General information concerning this item. Limited in length by `StringLimit`. This will - /// generally be either a JSON dump or the hash of some JSON which can be found on a + /// generally be either a JSON dump or the hash of some JSON which can be found on /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, } /// Information about the tip. @@ -206,31 +205,31 @@ pub struct ItemTip { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct PendingSwap { /// The collection that contains the item that the user wants to receive. - pub(super) desired_collection: CollectionId, + pub desired_collection: CollectionId, /// The item the user wants to receive. - pub(super) desired_item: Option, + pub desired_item: Option, /// A price for the desired `item` with the direction. - pub(super) price: Option, + pub price: Option, /// A deadline for the swap. - pub(super) deadline: Deadline, + pub deadline: Deadline, } /// Information about the reserved attribute deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct AttributeDeposit { /// A depositor account. - pub(super) account: Option, + pub account: Option, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Information about the reserved item's metadata deposit. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemMetadataDeposit { /// A depositor account, None means the deposit is collection's owner. - pub(super) account: Option, + pub account: Option, /// An amount that gets reserved. - pub(super) amount: DepositBalance, + pub amount: DepositBalance, } /// Specifies whether the tokens will be sent or received. diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index e243ad0e718eb85fadca375dbf9e883635653972..efaec49a65b3d2e905deb5891d127ec6934287db 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -112,8 +112,6 @@ parameter_types! { pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } -pub type Extrinsic = sp_runtime::testing::TestXt; - pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; @@ -157,12 +155,21 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } impl crate::Config for Test {} diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index 016f2cf225e0512bc8150708a924d6e84bb232eb..c041880a59df29cb13e88bd49ddbaecbb839b3ad 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -195,7 +195,7 @@ pub mod pallet { (T::WeightInfo::proxy(T::MaxProxies::get()) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(di.call_weight), di.class) })] pub fn proxy( @@ -487,7 +487,7 @@ pub mod pallet { (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(di.weight), + .saturating_add(di.call_weight), di.class) })] pub fn proxy_announced( diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 69be4df971bc47bde7d2cd367e03f241eba887b0..f8622880538efceb7b377e64b9dfe26b994bd7fb 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -378,7 +378,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::as_recovered().saturating_add(dispatch_info.weight), + T::WeightInfo::as_recovered().saturating_add(dispatch_info.call_weight), dispatch_info.class, )})] pub fn as_recovered( diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index e896d9e8fa26b636f7177d08a6c4a083dddcc86d..8dbad5ffd8b6bde8bd5ddb17233489c90ec7bb20 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -19,18 +19,22 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] environmental = { workspace = true } paste = { workspace = true } -polkavm = { version = "0.12.0", default-features = false } -polkavm-common = { version = "0.12.0", default-features = false } +polkavm = { version = "0.13.0", default-features = false } +polkavm-common = { version = "0.13.0", default-features = false } bitflags = { workspace = true } -codec = { features = [ - "derive", - "max-encoded-len", -], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } scale-info = { features = ["derive"], workspace = true } log = { workspace = true } -serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde = { features = [ + "alloc", + "derive", +], workspace = true, default-features = false } impl-trait-for-tuples = { workspace = true } rlp = { workspace = true } +derive_more = { workspace = true } +hex = { workspace = true } +jsonrpsee = { workspace = true, features = ["full"], optional = true } +ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] } # Polkadot SDK Dependencies frame-benchmarking = { optional = true, workspace = true } @@ -40,20 +44,28 @@ pallet-balances = { optional = true, workspace = true } pallet-revive-fixtures = { workspace = true, default-features = false } pallet-revive-uapi = { workspace = true, default-features = true } pallet-revive-proc-macro = { workspace = true, default-features = true } +pallet-transaction-payment = { workspace = true } sp-api = { workspace = true } +sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } +sp-weights = { workspace = true } xcm = { workspace = true } xcm-builder = { workspace = true } +subxt-signer = { workspace = true, optional = true, features = [ + "unstable-eth", +] } [dev-dependencies] array-bytes = { workspace = true, default-features = true } assert_matches = { workspace = true } pretty_assertions = { workspace = true } -wat = { workspace = true } pallet-revive-fixtures = { workspace = true, default-features = true } +secp256k1 = { workspace = true, features = ["recovery"] } +serde_json = { workspace = true } +hex-literal = { workspace = true } # Polkadot SDK Dependencies pallet-balances = { workspace = true, default-features = true } @@ -75,26 +87,35 @@ riscv = ["pallet-revive-fixtures/riscv"] std = [ "codec/std", "environmental/std", + "ethereum-types/std", "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "hex/std", + "jsonrpsee", "log/std", "pallet-balances?/std", "pallet-proxy/std", "pallet-revive-fixtures/std", "pallet-timestamp/std", + "pallet-transaction-payment/std", "pallet-utility/std", "polkavm-common/std", "polkavm/std", "rlp/std", "scale-info/std", - "serde", + "secp256k1/std", + "serde/std", + "serde_json/std", "sp-api/std", + "sp-arithmetic/std", "sp-core/std", "sp-io/std", "sp-keystore/std", "sp-runtime/std", "sp-std/std", + "sp-weights/std", + "subxt-signer", "xcm-builder/std", "xcm/std", ] @@ -107,6 +128,7 @@ runtime-benchmarks = [ "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", @@ -119,6 +141,7 @@ try-runtime = [ "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", "sp-runtime/try-runtime", ] diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml index 75b23fdd44d1f65adaaccedb38064378db35f1f4..1d89db002b721d42a1ef52f1748b3067cc9fa020 100644 --- a/substrate/frame/revive/fixtures/Cargo.toml +++ b/substrate/frame/revive/fixtures/Cargo.toml @@ -21,7 +21,7 @@ log = { workspace = true } parity-wasm = { workspace = true } tempfile = { workspace = true } toml = { workspace = true } -polkavm-linker = { version = "0.12.0" } +polkavm-linker = { version = "0.13.0" } anyhow = { workspace = true, default-features = true } [features] @@ -31,11 +31,4 @@ default = ["std"] # we will remove this once there is an upstream toolchain riscv = [] # only when std is enabled all fixtures are available -std = [ - "anyhow", - "frame-system", - "log/std", - "sp-core", - "sp-io", - "sp-runtime", -] +std = ["anyhow", "frame-system", "log/std", "sp-core", "sp-io", "sp-runtime"] diff --git a/substrate/frame/revive/fixtures/build.rs b/substrate/frame/revive/fixtures/build.rs index 3178baf6bbe425fac7867354cfb03224d094a5b0..ee7db4203cc95d1c91315a43db26894b398af313 100644 --- a/substrate/frame/revive/fixtures/build.rs +++ b/substrate/frame/revive/fixtures/build.rs @@ -65,7 +65,6 @@ mod build { } /// Collect all contract entries from the given source directory. - /// Contracts that have already been compiled are filtered out. fn collect_entries(contracts_dir: &Path) -> Vec { fs::read_dir(contracts_dir) .expect("src dir exists; qed") @@ -183,12 +182,14 @@ mod build { pub fn run() -> Result<()> { let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); let contracts_dir = fixtures_dir.join("contracts"); - let uapi_dir = fixtures_dir.parent().expect("uapi dir exits; qed").join("uapi"); let out_dir: PathBuf = env::var("OUT_DIR")?.into(); // the fixtures have a dependency on the uapi crate println!("cargo::rerun-if-changed={}", fixtures_dir.display()); - println!("cargo::rerun-if-changed={}", uapi_dir.display()); + let uapi_dir = fixtures_dir.parent().expect("parent dir exits; qed").join("uapi"); + if uapi_dir.exists() { + println!("cargo::rerun-if-changed={}", uapi_dir.display()); + } let entries = collect_entries(&contracts_dir); if entries.is_empty() { @@ -202,6 +203,17 @@ mod build { invoke_build(tmp_dir_path)?; write_output(tmp_dir_path, &out_dir, entries)?; + + #[cfg(unix)] + if let Ok(symlink_dir) = env::var("CARGO_WORKSPACE_ROOT_DIR") { + let symlink_dir: PathBuf = symlink_dir.into(); + let symlink_dir: PathBuf = symlink_dir.join("target").join("pallet-revive-fixtures"); + if symlink_dir.is_symlink() { + fs::remove_file(&symlink_dir)? + } + std::os::unix::fs::symlink(&out_dir, &symlink_dir)?; + } + Ok(()) } } diff --git a/substrate/frame/revive/fixtures/build/Cargo.toml b/substrate/frame/revive/fixtures/build/Cargo.toml index 948d7438cf98ee93c463214e95711be2a6a856c8..c4aaf131148e929927e22697be034636d178dc2e 100644 --- a/substrate/frame/revive/fixtures/build/Cargo.toml +++ b/substrate/frame/revive/fixtures/build/Cargo.toml @@ -11,7 +11,7 @@ edition = "2021" [dependencies] uapi = { package = 'pallet-revive-uapi', path = "", default-features = false } common = { package = 'pallet-revive-fixtures-common', path = "" } -polkavm-derive = { version = "0.12.0" } +polkavm-derive = { version = "0.13.0" } [profile.release] opt-level = 3 diff --git a/substrate/frame/revive/fixtures/contracts/code_hash.rs b/substrate/frame/revive/fixtures/contracts/code_hash.rs new file mode 100644 index 0000000000000000000000000000000000000000..b598a485a8c72a6805c844b2563001dccee964ed --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/code_hash.rs @@ -0,0 +1,40 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!( + address: &[u8; 20], + expected_code_hash: &[u8; 32], + ); + + let mut code_hash = [0u8; 32]; + api::code_hash(address, &mut code_hash); + + assert!(&code_hash == expected_code_hash); +} diff --git a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs index 2efacb4e683ffedbe1dce29b07130fd2b26fed87..54c7c7f3d5e20ddd9c995dd7f360028cac61409d 100644 --- a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs +++ b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs @@ -23,7 +23,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api}; -const ETH_ALICE: [u8; 20] = [1u8; 20]; +const ALICE_FALLBACK: [u8; 20] = [1u8; 20]; /// Load input data and perform the action specified by the input. /// If `delegate_call` is true, then delegate call into the contract. @@ -44,7 +44,7 @@ fn load_input(delegate_call: bool) { }, // 3 = Terminate 3 => { - api::terminate(Ð_ALICE); + api::terminate(&ALICE_FALLBACK); }, // Everything else is a noop _ => {}, diff --git a/substrate/frame/revive/fixtures/contracts/self_destruct.rs b/substrate/frame/revive/fixtures/contracts/self_destruct.rs index 524979991ec7ab1020073d48022d773ca809c3b8..2f37706634bd126f465ce6ea579cc0878d951bdd 100644 --- a/substrate/frame/revive/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/revive/fixtures/contracts/self_destruct.rs @@ -21,7 +21,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api}; -const ETH_DJANGO: [u8; 20] = [4u8; 20]; +const DJANGO_FALLBACK: [u8; 20] = [4u8; 20]; #[no_mangle] #[polkavm_derive::polkavm_export] @@ -52,6 +52,6 @@ pub extern "C" fn call() { .unwrap(); } else { // Try to terminate and give balance to django. - api::terminate(Ð_DJANGO); + api::terminate(&DJANGO_FALLBACK); } } diff --git a/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs b/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs new file mode 100644 index 0000000000000000000000000000000000000000..c078f9d46c1d71bfe079051bd34a2d01eb270328 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/terminate_and_send_to_eve.rs @@ -0,0 +1,33 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +extern crate common; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + let eve = [5u8; 20]; + api::terminate(&eve); +} diff --git a/substrate/frame/revive/mock-network/Cargo.toml b/substrate/frame/revive/mock-network/Cargo.toml index 85656a57b49ccf22c2f6bdd0e97f5f4dc557ff54..12de634b0b4a65aee557baace66676e30384354d 100644 --- a/substrate/frame/revive/mock-network/Cargo.toml +++ b/substrate/frame/revive/mock-network/Cargo.toml @@ -87,3 +87,17 @@ runtime-benchmarks = [ "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", ] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-balances/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-proxy/try-runtime", + "pallet-revive/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "polkadot-runtime-parachains/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs b/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs index c13c337d1667077301acef8a8e5d8e5537bc60f1..a2fa7cbf706811657f35c51dde2d473ff759f652 100644 --- a/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/revive/mock-network/src/parachain/contracts_config.rs @@ -20,7 +20,7 @@ use frame_support::derive_impl; #[derive_impl(pallet_revive::config_preludes::TestDefaultConfig)] impl pallet_revive::Config for Runtime { - type AddressMapper = pallet_revive::DefaultAddressMapper; + type AddressMapper = pallet_revive::AccountId32Mapper; type Currency = Balances; type Time = super::Timestamp; type Xcm = pallet_xcm::Pallet; diff --git a/substrate/frame/revive/src/address.rs b/substrate/frame/revive/src/address.rs index c51940ba771e6ba81cb8fadb27a23a4eed655bd7..45b5bf822dc91dd94691c00425d6c3ef3f826a2f 100644 --- a/substrate/frame/revive/src/address.rs +++ b/substrate/frame/revive/src/address.rs @@ -17,61 +17,173 @@ //! Functions that deal contract addresses. +use crate::{ensure, AddressSuffix, Config, Error, HoldReason}; use alloc::vec::Vec; -use sp_core::H160; +use core::marker::PhantomData; +use frame_support::traits::{fungible::MutateHold, tokens::Precision}; +use sp_core::{Get, H160}; use sp_io::hashing::keccak_256; -use sp_runtime::AccountId32; +use sp_runtime::{AccountId32, DispatchResult, SaturatedConversion, Saturating}; /// Map between the native chain account id `T` and an Ethereum [`H160`]. /// /// This trait exists only to emulate specialization for different concrete /// native account ids. **Not** to make the mapping user configurable. Hence -/// the trait is `Sealed` and only one mandatory implementor [`DefaultAddressMapper`] -/// exists. +/// the trait is `Sealed` and depending on your runtime configuration you need +/// to pick either [`AccountId32Mapper`] or [`H160Mapper`]. Picking the wrong +/// one will result in a compilation error. No footguns here. /// /// Please note that we assume that the native account is at least 20 bytes and /// only implement this type for a `T` where this is the case. Luckily, this is the -/// case for all existing runtimes as of right now. Reasing is that this will allow +/// case for all existing runtimes as of right now. Reasoning is that this will allow /// us to reverse an address -> account_id mapping by just stripping the prefix. -pub trait AddressMapper: private::Sealed { +/// +/// We require the mapping to be reversible. Since we are potentially dealing with types of +/// different sizes one direction of the mapping is necessarily lossy. This requires the mapping to +/// make use of the [`AddressSuffix`] storage item to reverse the mapping. +pub trait AddressMapper: private::Sealed { /// Convert an account id to an ethereum adress. - /// - /// This mapping is **not** required to be reversible. - fn to_address(account_id: &T) -> H160; + fn to_address(account_id: &T::AccountId) -> H160; /// Convert an ethereum address to a native account id. + fn to_account_id(address: &H160) -> T::AccountId; + + /// Same as [`Self::to_account_id`] but always returns the fallback account. + /// + /// This skips the query into [`AddressSuffix`] and always returns the stateless + /// fallback account. This is useful when we know for a fact that the `address` + /// in question is originally a `H160`. This is usually only the case when we + /// generated a new contract address. + fn to_fallback_account_id(address: &H160) -> T::AccountId; + + /// Create a stateful mapping for `account_id` + /// + /// This will enable `to_account_id` to map back to the original + /// `account_id` instead of the fallback account id. + fn map(account_id: &T::AccountId) -> DispatchResult; + + /// Remove the mapping in order to reclaim the deposit. /// - /// This mapping is **required** to be reversible. - fn to_account_id(address: &H160) -> T; + /// There is no reason why one would unmap their `account_id` except + /// for reclaiming the deposit. + fn unmap(account_id: &T::AccountId) -> DispatchResult; - /// Same as [`Self::to_account_id`] but when we know the address is a contract. + /// Returns true if the `account_id` is useable as an origin. /// - /// This is only the case when we just generated the new address. - fn to_account_id_contract(address: &H160) -> T; + /// This means either the `account_id` doesn't require a stateful mapping + /// or a stateful mapping exists. + fn is_mapped(account_id: &T::AccountId) -> bool; } mod private { pub trait Sealed {} - impl Sealed for super::DefaultAddressMapper {} + impl Sealed for super::AccountId32Mapper {} + impl Sealed for super::H160Mapper {} } -/// The only implementor for `AddressMapper`. -pub enum DefaultAddressMapper {} +/// The mapper to be used if the account id is `AccountId32`. +/// +/// It converts between addresses by either truncating the last 12 bytes or +/// suffixing them. The suffix is queried from [`AddressSuffix`] and will fall +/// back to all `0xEE` if no suffix was registered. This means contracts and +/// plain wallets controlled by an `secp256k1` always have a `0xEE` suffixed +/// account. +pub struct AccountId32Mapper(PhantomData); + +/// The mapper to be used if the account id is `H160`. +/// +/// It just trivially returns its inputs and doesn't make use of any state. +pub struct H160Mapper(PhantomData); -impl AddressMapper for DefaultAddressMapper { +impl AddressMapper for AccountId32Mapper +where + T: Config, +{ fn to_address(account_id: &AccountId32) -> H160 { H160::from_slice(&>::as_ref(&account_id)[..20]) } fn to_account_id(address: &H160) -> AccountId32 { + if let Some(suffix) = >::get(address) { + let mut account_id = Self::to_fallback_account_id(address); + let account_bytes: &mut [u8; 32] = account_id.as_mut(); + account_bytes[20..].copy_from_slice(suffix.as_slice()); + account_id + } else { + Self::to_fallback_account_id(address) + } + } + + fn to_fallback_account_id(address: &H160) -> AccountId32 { let mut account_id = AccountId32::new([0xEE; 32]); - >::as_mut(&mut account_id)[..20] - .copy_from_slice(address.as_bytes()); + let account_bytes: &mut [u8; 32] = account_id.as_mut(); + account_bytes[..20].copy_from_slice(address.as_bytes()); account_id } - fn to_account_id_contract(address: &H160) -> AccountId32 { - Self::to_account_id(address) + fn map(account_id: &T::AccountId) -> DispatchResult { + ensure!(!Self::is_mapped(account_id), >::AccountAlreadyMapped); + + let account_bytes: &[u8; 32] = account_id.as_ref(); + + // each mapping entry stores one AccountId32 distributed between key and value + let deposit = T::DepositPerByte::get() + .saturating_mul(account_bytes.len().saturated_into()) + .saturating_add(T::DepositPerItem::get()); + + let suffix: [u8; 12] = account_bytes[20..] + .try_into() + .expect("Skipping 20 byte of a an 32 byte array will fit into 12 bytes; qed"); + T::Currency::hold(&HoldReason::AddressMapping.into(), account_id, deposit)?; + >::insert(Self::to_address(account_id), suffix); + Ok(()) + } + + fn unmap(account_id: &T::AccountId) -> DispatchResult { + // will do nothing if address is not mapped so no check required + >::remove(Self::to_address(account_id)); + T::Currency::release_all( + &HoldReason::AddressMapping.into(), + account_id, + Precision::BestEffort, + )?; + Ok(()) + } + + fn is_mapped(account_id: &T::AccountId) -> bool { + let account_bytes: &[u8; 32] = account_id.as_ref(); + &account_bytes[20..] == &[0xEE; 12] || + >::contains_key(Self::to_address(account_id)) + } +} + +impl AddressMapper for H160Mapper +where + T: Config, + crate::AccountIdOf: AsRef<[u8; 20]> + From, +{ + fn to_address(account_id: &T::AccountId) -> H160 { + H160::from_slice(account_id.as_ref()) + } + + fn to_account_id(address: &H160) -> T::AccountId { + Self::to_fallback_account_id(address) + } + + fn to_fallback_account_id(address: &H160) -> T::AccountId { + (*address).into() + } + + fn map(_account_id: &T::AccountId) -> DispatchResult { + Ok(()) + } + + fn unmap(_account_id: &T::AccountId) -> DispatchResult { + Ok(()) + } + + fn is_mapped(_account_id: &T::AccountId) -> bool { + true } } @@ -102,7 +214,16 @@ pub fn create2(deployer: &H160, code: &[u8], input_data: &[u8], salt: &[u8; 32]) #[cfg(test)] mod test { use super::*; - use crate::test_utils::ALICE_ADDR; + use crate::{ + test_utils::*, + tests::{ExtBuilder, Test}, + AddressMapper, Error, + }; + use frame_support::{ + assert_err, + traits::fungible::{InspectHold, Mutate}, + }; + use pretty_assertions::assert_eq; use sp_core::{hex2array, H160}; #[test] @@ -125,4 +246,123 @@ mod test { H160(hex2array!("7f31e795e5836a19a8f919ab5a9de9a197ecd2b6")), ) } + + #[test] + fn fallback_map_works() { + assert!(::AddressMapper::is_mapped(&ALICE)); + assert_eq!( + ALICE_FALLBACK, + ::AddressMapper::to_fallback_account_id(&ALICE_ADDR) + ); + assert_eq!(ALICE_ADDR, ::AddressMapper::to_address(&ALICE_FALLBACK)); + } + + #[test] + fn map_works() { + ExtBuilder::default().build().execute_with(|| { + ::Currency::set_balance(&EVE, 1_000_000); + // before mapping the fallback account is returned + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!(EVE_FALLBACK, ::AddressMapper::to_account_id(&EVE_ADDR)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + + // when mapped the full account id is returned + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + assert_eq!(EVE, ::AddressMapper::to_account_id(&EVE_ADDR)); + assert!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ) > 0 + ); + }); + } + + #[test] + fn map_fallback_account_fails() { + ExtBuilder::default().build().execute_with(|| { + assert!(::AddressMapper::is_mapped(&ALICE)); + // alice is an e suffixed account and hence cannot be mapped + assert_err!( + ::AddressMapper::map(&ALICE), + >::AccountAlreadyMapped, + ); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &ALICE + ), + 0 + ); + }); + } + + #[test] + fn double_map_fails() { + ExtBuilder::default().build().execute_with(|| { + assert!(!::AddressMapper::is_mapped(&EVE)); + ::Currency::set_balance(&EVE, 1_000_000); + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + let deposit = ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE, + ); + assert_err!( + ::AddressMapper::map(&EVE), + >::AccountAlreadyMapped, + ); + assert!(::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + deposit + ); + }); + } + + #[test] + fn unmap_works() { + ExtBuilder::default().build().execute_with(|| { + ::Currency::set_balance(&EVE, 1_000_000); + ::AddressMapper::map(&EVE).unwrap(); + assert!(::AddressMapper::is_mapped(&EVE)); + assert!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ) > 0 + ); + + ::AddressMapper::unmap(&EVE).unwrap(); + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + + // another unmap is a noop + ::AddressMapper::unmap(&EVE).unwrap(); + assert!(!::AddressMapper::is_mapped(&EVE)); + assert_eq!( + ::Currency::balance_on_hold( + &HoldReason::AddressMapping.into(), + &EVE + ), + 0 + ); + }); + } } diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index ebafb6c7054ae7c1208c819f9533eca328d06134..8c9bf2cf70f0201539658e0b2495c243455d630b 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -63,6 +63,7 @@ const UNBALANCED_TRIE_LAYERS: u32 = 20; struct Contract { caller: T::AccountId, account_id: T::AccountId, + address: H160, } impl Contract @@ -71,11 +72,6 @@ where BalanceOf: Into + TryFrom, MomentOf: Into, { - /// Returns the address of the contract. - fn address(&self) -> H160 { - T::AddressMapper::to_address(&self.account_id) - } - /// Create new contract and use a default account id as instantiator. fn new(module: WasmModule, data: Vec) -> Result, &'static str> { Self::with_index(0, module, data) @@ -98,9 +94,12 @@ where ) -> Result, &'static str> { T::Currency::set_balance(&caller, caller_funding::()); let salt = Some([0xffu8; 32]); + let origin: T::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); + + Contracts::::map_account(origin.clone()).unwrap(); let outcome = Contracts::::bare_instantiate( - RawOrigin::Signed(caller.clone()).into(), + origin, 0u32.into(), Weight::MAX, default_deposit_limit::(), @@ -112,8 +111,8 @@ where ); let address = outcome.result?.addr; - let account_id = T::AddressMapper::to_account_id_contract(&address); - let result = Contract { caller, account_id: account_id.clone() }; + let account_id = T::AddressMapper::to_fallback_account_id(&address); + let result = Contract { caller, address, account_id }; ContractInfoOf::::insert(&address, result.info()?); @@ -143,7 +142,7 @@ where info.write(&Key::Fix(item.0), Some(item.1.clone()), None, false) .map_err(|_| "Failed to write storage to restoration dest")?; } - >::insert(T::AddressMapper::to_address(&self.account_id), info); + >::insert(&self.address, info); Ok(()) } @@ -223,6 +222,7 @@ fn default_deposit_limit() -> BalanceOf { T: Config + pallet_balances::Config, MomentOf: Into, ::RuntimeEvent: From>, + ::RuntimeCall: From>, as Currency>::Balance: From>, )] mod benchmarks { @@ -263,13 +263,12 @@ mod benchmarks { let instance = Contract::::with_caller(whitelisted_caller(), WasmModule::sized(c), vec![])?; let value = Pallet::::min_balance(); - let callee = T::AddressMapper::to_address(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] call( RawOrigin::Signed(instance.caller.clone()), - callee, + instance.address, value, Weight::MAX, storage_deposit, @@ -293,9 +292,10 @@ mod benchmarks { T::Currency::set_balance(&caller, caller_funding::()); let WasmModule { code, .. } = WasmModule::sized(c); let origin = RawOrigin::Signed(caller.clone()); + Contracts::::map_account(origin.clone().into()).unwrap(); let deployer = T::AddressMapper::to_address(&caller); let addr = crate::address::create2(&deployer, &code, &input, &salt); - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let account_id = T::AddressMapper::to_fallback_account_id(&addr); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] _(origin, value, Weight::MAX, storage_deposit, code, input, Some(salt)); @@ -305,9 +305,14 @@ mod benchmarks { // uploading the code reserves some balance in the callers account let code_deposit = T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &caller); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &caller); assert_eq!( T::Currency::balance(&caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance(), ); // contract has the full value assert_eq!(T::Currency::balance(&account_id), value + Pallet::::min_balance()); @@ -323,33 +328,31 @@ mod benchmarks { let caller = whitelisted_caller(); T::Currency::set_balance(&caller, caller_funding::()); let origin = RawOrigin::Signed(caller.clone()); + Contracts::::map_account(origin.clone().into()).unwrap(); let WasmModule { code, .. } = WasmModule::dummy(); let storage_deposit = default_deposit_limit::(); let deployer = T::AddressMapper::to_address(&caller); let addr = crate::address::create2(&deployer, &code, &input, &salt); - let hash = - Contracts::::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let hash = Contracts::::bare_upload_code(origin.clone().into(), code, storage_deposit)? + .code_hash; + let account_id = T::AddressMapper::to_fallback_account_id(&addr); #[extrinsic_call] - _( - RawOrigin::Signed(caller.clone()), - value, - Weight::MAX, - storage_deposit, - hash, - input, - Some(salt), - ); + _(origin, value, Weight::MAX, storage_deposit, hash, input, Some(salt)); let deposit = T::Currency::balance_on_hold(&HoldReason::StorageDepositReserve.into(), &account_id); let code_deposit = T::Currency::balance_on_hold(&HoldReason::CodeUploadDepositReserve.into(), &account_id); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &account_id); // value was removed from the caller assert_eq!( T::Currency::total_balance(&caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance(), ); // contract has the full value assert_eq!(T::Currency::balance(&account_id), value + Pallet::::min_balance()); @@ -371,11 +374,10 @@ mod benchmarks { Contract::::with_caller(whitelisted_caller(), WasmModule::dummy(), vec![])?; let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); - let callee = T::AddressMapper::to_address(&instance.account_id); let before = T::Currency::balance(&instance.account_id); let storage_deposit = default_deposit_limit::(); #[extrinsic_call] - _(origin, callee, value, Weight::MAX, storage_deposit, data); + _(origin, instance.address, value, Weight::MAX, storage_deposit, data); let deposit = T::Currency::balance_on_hold( &HoldReason::StorageDepositReserve.into(), &instance.account_id, @@ -384,10 +386,15 @@ mod benchmarks { &HoldReason::CodeUploadDepositReserve.into(), &instance.caller, ); + let mapping_deposit = + T::Currency::balance_on_hold(&HoldReason::AddressMapping.into(), &instance.caller); // value and value transferred via call should be removed from the caller assert_eq!( T::Currency::balance(&instance.caller), - caller_funding::() - value - deposit - code_deposit - Pallet::::min_balance(), + caller_funding::() - + value - deposit - + code_deposit - mapping_deposit - + Pallet::::min_balance() ); // contract should have received the value assert_eq!(T::Currency::balance(&instance.account_id), before + value); @@ -447,14 +454,46 @@ mod benchmarks { let storage_deposit = default_deposit_limit::(); let hash = >::bare_upload_code(origin.into(), code, storage_deposit)?.code_hash; - let callee = T::AddressMapper::to_address(&instance.account_id); assert_ne!(instance.info()?.code_hash, hash); #[extrinsic_call] - _(RawOrigin::Root, callee, hash); + _(RawOrigin::Root, instance.address, hash); assert_eq!(instance.info()?.code_hash, hash); Ok(()) } + #[benchmark(pov_mode = Measured)] + fn map_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + assert!(!T::AddressMapper::is_mapped(&caller)); + #[extrinsic_call] + _(origin); + assert!(T::AddressMapper::is_mapped(&caller)); + } + + #[benchmark(pov_mode = Measured)] + fn unmap_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + >::map_account(origin.clone().into()).unwrap(); + assert!(T::AddressMapper::is_mapped(&caller)); + #[extrinsic_call] + _(origin); + assert!(!T::AddressMapper::is_mapped(&caller)); + } + + #[benchmark(pov_mode = Measured)] + fn dispatch_as_fallback_account() { + let caller = whitelisted_caller(); + T::Currency::set_balance(&caller, caller_funding::()); + let origin = RawOrigin::Signed(caller.clone()); + let dispatchable = frame_system::Call::remark { remark: vec![] }.into(); + #[extrinsic_call] + _(origin, Box::new(dispatchable)); + } + #[benchmark(pov_mode = Measured)] fn noop_host_fn(r: Linear<0, API_BENCHMARK_RUNS>) { let mut setup = CallSetup::::new(WasmModule::noop()); @@ -636,7 +675,7 @@ mod benchmarks { build_runtime!(runtime, contract, memory: [(len as u32).encode(), vec![0u8; len],]); >::insert::<_, BoundedVec<_, _>>( - contract.address(), + contract.address, immutable_data.clone().try_into().unwrap(), ); @@ -669,10 +708,7 @@ mod benchmarks { } assert_ok!(result); - assert_eq!( - &memory[..], - &>::get(setup.contract().address()).unwrap()[..] - ); + assert_eq!(&memory[..], &>::get(setup.contract().address).unwrap()[..]); } #[benchmark(pov_mode = Measured)] @@ -835,7 +871,7 @@ mod benchmarks { assert_eq!( record.event, - crate::Event::ContractEmitted { contract: instance.address(), data, topics }.into(), + crate::Event::ContractEmitted { contract: instance.address, data, topics }.into(), ); } @@ -1542,7 +1578,7 @@ mod benchmarks { let salt = [42u8; 32]; let deployer = T::AddressMapper::to_address(&account_id); let addr = crate::address::create2(&deployer, &code.code, &input, &salt); - let account_id = T::AddressMapper::to_account_id_contract(&addr); + let account_id = T::AddressMapper::to_fallback_account_id(&addr); let mut memory = memory!(hash_bytes, deposit_bytes, value_bytes, input, salt,); let mut offset = { diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs new file mode 100644 index 0000000000000000000000000000000000000000..c3495fc0559d220a7ccaaeaac488a12963b98a79 --- /dev/null +++ b/substrate/frame/revive/src/evm.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//!Types, and traits to integrate pallet-revive with EVM. +#![warn(missing_docs)] + +mod api; +pub use api::*; +pub mod runtime; diff --git a/substrate/frame/revive/src/evm/api.rs b/substrate/frame/revive/src/evm/api.rs new file mode 100644 index 0000000000000000000000000000000000000000..fe18c8735bed4d8dc435f8517d86f67aa9c70393 --- /dev/null +++ b/substrate/frame/revive/src/evm/api.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! JSON-RPC methods and types, for Ethereum. + +mod byte; +pub use byte::*; + +mod rlp_codec; +pub use rlp; + +mod type_id; +pub use type_id::*; + +mod rpc_types; +mod rpc_types_gen; +pub use rpc_types_gen::*; + +#[cfg(feature = "std")] +mod account; + +#[cfg(feature = "std")] +pub use account::*; + +mod signature; diff --git a/substrate/frame/revive/src/evm/api/account.rs b/substrate/frame/revive/src/evm/api/account.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2217defc31f60f5241f4a0658becadce3674742 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/account.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Utilities for working with Ethereum accounts. +use crate::{ + evm::{TransactionLegacySigned, TransactionLegacyUnsigned}, + H160, +}; +use rlp::Encodable; + +/// A simple account that can sign transactions +pub struct Account(subxt_signer::eth::Keypair); + +impl Default for Account { + fn default() -> Self { + Self(subxt_signer::eth::dev::alith()) + } +} + +impl From for Account { + fn from(kp: subxt_signer::eth::Keypair) -> Self { + Self(kp) + } +} + +impl Account { + /// Get the [`H160`] address of the account. + pub fn address(&self) -> H160 { + H160::from_slice(&self.0.account_id().as_ref()) + } + + /// Sign a transaction. + pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { + let rlp_encoded = tx.rlp_bytes(); + let signature = self.0.sign(&rlp_encoded); + TransactionLegacySigned::from(tx, signature.as_ref()) + } +} diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs new file mode 100644 index 0000000000000000000000000000000000000000..df4ed1740ecdb75a8a027fc17fdc6b6f222b0ae9 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/byte.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Define Byte wrapper types for encoding and decoding hex strings +use alloc::{vec, vec::Vec}; +use codec::{Decode, Encode}; +use core::{ + fmt::{Debug, Display, Formatter, Result as FmtResult}, + str::FromStr, +}; +use hex_serde::HexCodec; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +mod hex_serde { + #[cfg(not(feature = "std"))] + use alloc::{format, string::String, vec::Vec}; + use serde::{Deserialize, Deserializer, Serializer}; + + pub trait HexCodec: Sized { + type Error; + fn to_hex(&self) -> String; + fn from_hex(s: String) -> Result; + } + + impl HexCodec for u8 { + type Error = core::num::ParseIntError; + fn to_hex(&self) -> String { + format!("0x{:x}", self) + } + fn from_hex(s: String) -> Result { + u8::from_str_radix(s.trim_start_matches("0x"), 16) + } + } + + impl HexCodec for [u8; T] { + type Error = hex::FromHexError; + fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self)) + } + fn from_hex(s: String) -> Result { + let data = hex::decode(s.trim_start_matches("0x"))?; + data.try_into().map_err(|_| hex::FromHexError::InvalidStringLength) + } + } + + impl HexCodec for Vec { + type Error = hex::FromHexError; + fn to_hex(&self) -> String { + format!("0x{}", hex::encode(self)) + } + fn from_hex(s: String) -> Result { + hex::decode(s.trim_start_matches("0x")) + } + } + + pub fn serialize(value: &T, serializer: S) -> Result + where + S: Serializer, + T: HexCodec, + { + let s = value.to_hex(); + serializer.serialize_str(&s) + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: HexCodec, + ::Error: core::fmt::Debug, + { + let s = String::deserialize(deserializer)?; + let value = T::from_hex(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?; + Ok(value) + } +} + +impl FromStr for Bytes { + type Err = hex::FromHexError; + fn from_str(s: &str) -> Result { + let data = hex::decode(s.trim_start_matches("0x"))?; + Ok(Bytes(data)) + } +} + +macro_rules! impl_hex { + ($type:ident, $inner:ty, $default:expr) => { + #[derive(Encode, Decode, Eq, PartialEq, TypeInfo, Clone, Serialize, Deserialize)] + #[doc = concat!("`", stringify!($inner), "`", " wrapper type for encoding and decoding hex strings")] + pub struct $type(#[serde(with = "hex_serde")] pub $inner); + + impl Default for $type { + fn default() -> Self { + $type($default) + } + } + + impl From<$inner> for $type { + fn from(inner: $inner) -> Self { + $type(inner) + } + } + + impl Debug for $type { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, concat!(stringify!($type), "({})"), self.0.to_hex()) + } + } + + impl Display for $type { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", self.0.to_hex()) + } + } + }; +} + +impl_hex!(Byte, u8, 0u8); +impl_hex!(Bytes, Vec, vec![]); +impl_hex!(Bytes8, [u8; 8], [0u8; 8]); +impl_hex!(Bytes256, [u8; 256], [0u8; 256]); + +#[test] +fn serialize_works() { + let a = Byte(42); + let s = serde_json::to_string(&a).unwrap(); + assert_eq!(s, "\"0x2a\""); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); + + let a = Bytes(b"bello world".to_vec()); + let s = serde_json::to_string(&a).unwrap(); + assert_eq!(s, "\"0x62656c6c6f20776f726c64\""); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); + + let a = Bytes256([42u8; 256]); + let s = serde_json::to_string(&a).unwrap(); + let b = serde_json::from_str::(&s).unwrap(); + assert_eq!(a, b); +} diff --git a/substrate/frame/revive/src/evm/api/rlp_codec.rs b/substrate/frame/revive/src/evm/api/rlp_codec.rs new file mode 100644 index 0000000000000000000000000000000000000000..e5f24c28a482d78f8f066e0c289c8252f7d811d3 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rlp_codec.rs @@ -0,0 +1,219 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! RLP encoding and decoding for Ethereum transactions. +//! See for more information about RLP encoding. + +use super::*; +use alloc::vec::Vec; +use rlp::{Decodable, Encodable}; + +impl TransactionLegacyUnsigned { + /// Get the rlp encoded bytes of a signed transaction with a dummy 65 bytes signature. + pub fn dummy_signed_payload(&self) -> Vec { + let mut s = rlp::RlpStream::new(); + s.append(self); + const DUMMY_SIGNATURE: [u8; 65] = [0u8; 65]; + s.append_raw(&DUMMY_SIGNATURE.as_ref(), 1); + s.out().to_vec() + } +} + +/// See +impl Encodable for TransactionLegacyUnsigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + if let Some(chain_id) = self.chain_id { + s.begin_list(9); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + s.append(&chain_id); + s.append(&0_u8); + s.append(&0_u8); + } else { + s.begin_list(6); + s.append(&self.nonce); + s.append(&self.gas_price); + s.append(&self.gas); + match self.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.value); + s.append(&self.input.0); + } + } +} + +/// See +impl Decodable for TransactionLegacyUnsigned { + fn decode(rlp: &rlp::Rlp) -> Result { + Ok(TransactionLegacyUnsigned { + nonce: rlp.val_at(0)?, + gas_price: rlp.val_at(1)?, + gas: rlp.val_at(2)?, + to: { + let to = rlp.at(3)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(4)?, + input: Bytes(rlp.val_at(5)?), + chain_id: { + if let Ok(chain_id) = rlp.val_at(6) { + Some(chain_id) + } else { + None + } + }, + ..Default::default() + }) + } +} + +impl Encodable for TransactionLegacySigned { + fn rlp_append(&self, s: &mut rlp::RlpStream) { + s.begin_list(9); + s.append(&self.transaction_legacy_unsigned.nonce); + s.append(&self.transaction_legacy_unsigned.gas_price); + s.append(&self.transaction_legacy_unsigned.gas); + match self.transaction_legacy_unsigned.to { + Some(ref to) => s.append(to), + None => s.append_empty_data(), + }; + s.append(&self.transaction_legacy_unsigned.value); + s.append(&self.transaction_legacy_unsigned.input.0); + + s.append(&self.v); + s.append(&self.r); + s.append(&self.s); + } +} + +/// See +impl Decodable for TransactionLegacySigned { + fn decode(rlp: &rlp::Rlp) -> Result { + let v: U256 = rlp.val_at(6)?; + + let extract_chain_id = |v: U256| { + if v.ge(&35u32.into()) { + Some((v - 35) / 2) + } else { + None + } + }; + + Ok(TransactionLegacySigned { + transaction_legacy_unsigned: { + TransactionLegacyUnsigned { + nonce: rlp.val_at(0)?, + gas_price: rlp.val_at(1)?, + gas: rlp.val_at(2)?, + to: { + let to = rlp.at(3)?; + if to.is_empty() { + None + } else { + Some(to.as_val()?) + } + }, + value: rlp.val_at(4)?, + input: Bytes(rlp.val_at(5)?), + chain_id: extract_chain_id(v).map(|v| v.into()), + r#type: Type0 {}, + } + }, + v, + r: rlp.val_at(7)?, + s: rlp.val_at(8)?, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn encode_decode_legacy_transaction_works() { + let tx = TransactionLegacyUnsigned { + chain_id: Some(596.into()), + gas: U256::from(21000), + nonce: U256::from(1), + gas_price: U256::from("0x640000006a"), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + value: U256::from(123123), + input: Bytes(vec![]), + r#type: Type0, + }; + + let rlp_bytes = rlp::encode(&tx); + let decoded = rlp::decode::(&rlp_bytes).unwrap(); + assert_eq!(&tx, &decoded); + + let tx = Account::default().sign_transaction(tx); + let rlp_bytes = rlp::encode(&tx); + let decoded = rlp::decode::(&rlp_bytes).unwrap(); + assert_eq!(&tx, &decoded); + } + + #[test] + fn dummy_signed_payload_works() { + let tx = TransactionLegacyUnsigned { + chain_id: Some(596.into()), + gas: U256::from(21000), + nonce: U256::from(1), + gas_price: U256::from("0x640000006a"), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + value: U256::from(123123), + input: Bytes(vec![]), + r#type: Type0, + }; + + let signed_tx = Account::default().sign_transaction(tx.clone()); + let rlp_bytes = rlp::encode(&signed_tx); + assert_eq!(tx.dummy_signed_payload().len(), rlp_bytes.len()); + } + + #[test] + fn recover_address_works() { + let account = Account::default(); + + let unsigned_tx = TransactionLegacyUnsigned { + value: 200_000_000_000_000_000_000u128.into(), + gas_price: 100_000_000_200u64.into(), + gas: 100_107u32.into(), + nonce: 3.into(), + to: Some(Account::from(subxt_signer::eth::dev::baltathar()).address()), + chain_id: Some(596.into()), + ..Default::default() + }; + + let tx = account.sign_transaction(unsigned_tx.clone()); + let recovered_address = tx.recover_eth_address().unwrap(); + + assert_eq!(account.address(), recovered_address); + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs new file mode 100644 index 0000000000000000000000000000000000000000..b15a0a53cd0788c1280d538f43d415b22c8121a8 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -0,0 +1,32 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Utility impl for the RPC types. +use super::{ReceiptInfo, TransactionInfo, TransactionSigned}; + +impl TransactionInfo { + /// Create a new [`TransactionInfo`] from a receipt and a signed transaction. + pub fn new(receipt: ReceiptInfo, transaction_signed: TransactionSigned) -> Self { + Self { + block_hash: receipt.block_hash, + block_number: receipt.block_number, + from: receipt.from, + hash: receipt.transaction_hash, + transaction_index: receipt.transaction_index, + transaction_signed, + } + } +} diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4663a82232c4ac94ae240d38b8df830c0e364b5 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -0,0 +1,682 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Generated JSON-RPC types. +#![allow(missing_docs)] + +use super::{byte::*, Type0, Type1, Type2}; +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use derive_more::{From, TryInto}; +pub use ethereum_types::*; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +/// Block object +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Block { + /// Base fee per gas + #[serde(rename = "baseFeePerGas", skip_serializing_if = "Option::is_none")] + pub base_fee_per_gas: Option, + /// Blob gas used + #[serde(rename = "blobGasUsed", skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// Difficulty + #[serde(skip_serializing_if = "Option::is_none")] + pub difficulty: Option, + /// Excess blob gas + #[serde(rename = "excessBlobGas", skip_serializing_if = "Option::is_none")] + pub excess_blob_gas: Option, + /// Extra data + #[serde(rename = "extraData")] + pub extra_data: Bytes, + /// Gas limit + #[serde(rename = "gasLimit")] + pub gas_limit: U256, + /// Gas used + #[serde(rename = "gasUsed")] + pub gas_used: U256, + /// Hash + pub hash: H256, + /// Bloom filter + #[serde(rename = "logsBloom")] + pub logs_bloom: Bytes256, + /// Coinbase + pub miner: Address, + /// Mix hash + #[serde(rename = "mixHash")] + pub mix_hash: H256, + /// Nonce + pub nonce: Bytes8, + /// Number + pub number: U256, + /// Parent Beacon Block Root + #[serde(rename = "parentBeaconBlockRoot", skip_serializing_if = "Option::is_none")] + pub parent_beacon_block_root: Option, + /// Parent block hash + #[serde(rename = "parentHash")] + pub parent_hash: H256, + /// Receipts root + #[serde(rename = "receiptsRoot")] + pub receipts_root: H256, + /// Ommers hash + #[serde(rename = "sha3Uncles")] + pub sha_3_uncles: H256, + /// Block size + pub size: U256, + /// State root + #[serde(rename = "stateRoot")] + pub state_root: H256, + /// Timestamp + pub timestamp: U256, + /// Total difficulty + #[serde(rename = "totalDifficulty", skip_serializing_if = "Option::is_none")] + pub total_difficulty: Option, + pub transactions: H256OrTransactionInfo, + /// Transactions root + #[serde(rename = "transactionsRoot")] + pub transactions_root: H256, + /// Uncles + pub uncles: Vec, + /// Withdrawals + #[serde(skip_serializing_if = "Option::is_none")] + pub withdrawals: Option>, + /// Withdrawals root + #[serde(rename = "withdrawalsRoot", skip_serializing_if = "Option::is_none")] + pub withdrawals_root: Option, +} + +/// Block number or tag +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum BlockNumberOrTag { + /// Block number + U256(U256), + /// Block tag + BlockTag(BlockTag), +} +impl Default for BlockNumberOrTag { + fn default() -> Self { + BlockNumberOrTag::U256(Default::default()) + } +} + +/// Block number, tag, or block hash +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum BlockNumberOrTagOrHash { + /// Block number + U256(U256), + /// Block tag + BlockTag(BlockTag), + /// Block hash + H256(H256), +} +impl Default for BlockNumberOrTagOrHash { + fn default() -> Self { + BlockNumberOrTagOrHash::U256(Default::default()) + } +} + +/// Transaction object generic to all types +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct GenericTransaction { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList", skip_serializing_if = "Option::is_none")] + pub access_list: Option, + /// blobVersionedHashes + /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. + #[serde(rename = "blobVersionedHashes", skip_serializing_if = "Option::is_none")] + pub blob_versioned_hashes: Option>, + /// blobs + /// Raw blob data. + #[serde(skip_serializing_if = "Option::is_none")] + pub blobs: Option>, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] + pub chain_id: Option, + /// from address + #[serde(skip_serializing_if = "Option::is_none")] + pub from: Option
, + /// gas limit + #[serde(skip_serializing_if = "Option::is_none")] + pub gas: Option, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice", skip_serializing_if = "Option::is_none")] + pub gas_price: Option, + /// input data + #[serde(alias = "data", skip_serializing_if = "Option::is_none")] + pub input: Option, + /// max fee per blob gas + /// The maximum total fee per gas the sender is willing to pay for blob gas in wei + #[serde(rename = "maxFeePerBlobGas", skip_serializing_if = "Option::is_none")] + pub max_fee_per_blob_gas: Option, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas", skip_serializing_if = "Option::is_none")] + pub max_fee_per_gas: Option, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas", skip_serializing_if = "Option::is_none")] + pub max_priority_fee_per_gas: Option, + /// nonce + #[serde(skip_serializing_if = "Option::is_none")] + pub nonce: Option, + /// to address + pub to: Option
, + /// type + #[serde(skip_serializing_if = "Option::is_none")] + pub r#type: Option, + /// value + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +/// Receipt information +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct ReceiptInfo { + /// blob gas price + /// The actual value per gas deducted from the sender's account for blob gas. Only specified + /// for blob transactions as defined by EIP-4844. + #[serde(rename = "blobGasPrice", skip_serializing_if = "Option::is_none")] + pub blob_gas_price: Option, + /// blob gas used + /// The amount of blob gas used for this specific transaction. Only specified for blob + /// transactions as defined by EIP-4844. + #[serde(rename = "blobGasUsed", skip_serializing_if = "Option::is_none")] + pub blob_gas_used: Option, + /// block hash + #[serde(rename = "blockHash")] + pub block_hash: H256, + /// block number + #[serde(rename = "blockNumber")] + pub block_number: U256, + /// contract address + /// The contract address created, if the transaction was a contract creation, otherwise null. + #[serde(rename = "contractAddress")] + pub contract_address: Option
, + /// cumulative gas used + /// The sum of gas used by this transaction and all preceding transactions in the same block. + #[serde(rename = "cumulativeGasUsed")] + pub cumulative_gas_used: U256, + /// effective gas price + /// The actual value per gas deducted from the sender's account. Before EIP-1559, this is equal + /// to the transaction's gas price. After, it is equal to baseFeePerGas + min(maxFeePerGas - + /// baseFeePerGas, maxPriorityFeePerGas). + #[serde(rename = "effectiveGasPrice")] + pub effective_gas_price: U256, + /// from + pub from: Address, + /// gas used + /// The amount of gas used for this specific transaction alone. + #[serde(rename = "gasUsed")] + pub gas_used: U256, + /// logs + pub logs: Vec, + /// logs bloom + #[serde(rename = "logsBloom")] + pub logs_bloom: Bytes256, + /// state root + /// The post-transaction state root. Only specified for transactions included before the + /// Byzantium upgrade. + #[serde(skip_serializing_if = "Option::is_none")] + pub root: Option, + /// status + /// Either 1 (success) or 0 (failure). Only specified for transactions included after the + /// Byzantium upgrade. + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + /// to + /// Address of the receiver or null in a contract creation transaction. + pub to: Option
, + /// transaction hash + #[serde(rename = "transactionHash")] + pub transaction_hash: H256, + /// transaction index + #[serde(rename = "transactionIndex")] + pub transaction_index: U256, + /// type + #[serde(skip_serializing_if = "Option::is_none")] + pub r#type: Option, +} + +/// Syncing status +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum SyncingStatus { + /// Syncing progress + SyncingProgress(SyncingProgress), + /// Not syncing + /// Should always return false if not syncing. + Bool(bool), +} +impl Default for SyncingStatus { + fn default() -> Self { + SyncingStatus::SyncingProgress(Default::default()) + } +} + +/// Transaction information +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionInfo { + /// block hash + #[serde(rename = "blockHash")] + pub block_hash: H256, + /// block number + #[serde(rename = "blockNumber")] + pub block_number: U256, + /// from address + pub from: Address, + /// transaction hash + pub hash: H256, + /// transaction index + #[serde(rename = "transactionIndex")] + pub transaction_index: U256, + #[serde(flatten)] + pub transaction_signed: TransactionSigned, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum TransactionUnsigned { + Transaction4844Unsigned(Transaction4844Unsigned), + Transaction1559Unsigned(Transaction1559Unsigned), + Transaction2930Unsigned(Transaction2930Unsigned), + TransactionLegacyUnsigned(TransactionLegacyUnsigned), +} +impl Default for TransactionUnsigned { + fn default() -> Self { + TransactionUnsigned::Transaction4844Unsigned(Default::default()) + } +} + +/// Access list +pub type AccessList = Vec; + +/// Block tag +/// `earliest`: The lowest numbered block the client has available; `finalized`: The most recent +/// crypto-economically secure block, cannot be re-orged outside of manual intervention driven by +/// community coordination; `safe`: The most recent block that is safe from re-orgs under honest +/// majority and certain synchronicity assumptions; `latest`: The most recent block in the canonical +/// chain observed by the client, this block may be re-orged out of the canonical chain even under +/// healthy/normal conditions; `pending`: A sample next block built by the client on top of `latest` +/// and containing the set of transactions usually taken from local mempool. Before the merge +/// transition is finalized, any call querying for `finalized` or `safe` block MUST be responded to +/// with `-39001: Unknown block` error +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub enum BlockTag { + #[serde(rename = "earliest")] + #[default] + Earliest, + #[serde(rename = "finalized")] + Finalized, + #[serde(rename = "safe")] + Safe, + #[serde(rename = "latest")] + Latest, + #[serde(rename = "pending")] + Pending, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum H256OrTransactionInfo { + /// Transaction hashes + H256s(Vec), + /// Full transactions + TransactionInfos(Vec), +} +impl Default for H256OrTransactionInfo { + fn default() -> Self { + H256OrTransactionInfo::H256s(Default::default()) + } +} + +/// log +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Log { + /// address + #[serde(skip_serializing_if = "Option::is_none")] + pub address: Option
, + /// block hash + #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] + pub block_hash: Option, + /// block number + #[serde(rename = "blockNumber", skip_serializing_if = "Option::is_none")] + pub block_number: Option, + /// data + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, + /// log index + #[serde(rename = "logIndex", skip_serializing_if = "Option::is_none")] + pub log_index: Option, + /// removed + #[serde(skip_serializing_if = "Option::is_none")] + pub removed: Option, + /// topics + #[serde(skip_serializing_if = "Option::is_none")] + pub topics: Option>, + /// transaction hash + #[serde(rename = "transactionHash")] + pub transaction_hash: H256, + /// transaction index + #[serde(rename = "transactionIndex", skip_serializing_if = "Option::is_none")] + pub transaction_index: Option, +} + +/// Syncing progress +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct SyncingProgress { + /// Current block + #[serde(rename = "currentBlock", skip_serializing_if = "Option::is_none")] + pub current_block: Option, + /// Highest block + #[serde(rename = "highestBlock", skip_serializing_if = "Option::is_none")] + pub highest_block: Option, + /// Starting block + #[serde(rename = "startingBlock", skip_serializing_if = "Option::is_none")] + pub starting_block: Option, +} + +/// EIP-1559 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction1559Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// gas price + /// The effective gas price paid by the sender in wei. For transactions not yet included in a + /// block, this value should be set equal to the max fee per gas. This field is DEPRECATED, + /// please transition to using effectiveGasPrice in the receipt object going forward. + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas")] + pub max_fee_per_gas: U256, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas")] + pub max_priority_fee_per_gas: U256, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: Type2, + /// value + pub value: U256, +} + +/// EIP-2930 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction2930Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: Type1, + /// value + pub value: U256, +} + +/// EIP-4844 transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction4844Unsigned { + /// accessList + /// EIP-2930 access list + #[serde(rename = "accessList")] + pub access_list: AccessList, + /// blobVersionedHashes + /// List of versioned blob hashes associated with the transaction's EIP-4844 data blobs. + #[serde(rename = "blobVersionedHashes")] + pub blob_versioned_hashes: Vec, + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId")] + pub chain_id: U256, + /// gas limit + pub gas: U256, + /// input data + pub input: Bytes, + /// max fee per blob gas + /// The maximum total fee per gas the sender is willing to pay for blob gas in wei + #[serde(rename = "maxFeePerBlobGas")] + pub max_fee_per_blob_gas: U256, + /// max fee per gas + /// The maximum total fee per gas the sender is willing to pay (includes the network / base fee + /// and miner / priority fee) in wei + #[serde(rename = "maxFeePerGas")] + pub max_fee_per_gas: U256, + /// max priority fee per gas + /// Maximum fee per gas the sender is willing to pay to miners in wei + #[serde(rename = "maxPriorityFeePerGas")] + pub max_priority_fee_per_gas: U256, + /// nonce + pub nonce: U256, + /// to address + pub to: Address, + /// type + pub r#type: Byte, + /// value + pub value: U256, +} + +/// Legacy transaction. +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionLegacyUnsigned { + /// chainId + /// Chain ID that this transaction is valid on. + #[serde(rename = "chainId", skip_serializing_if = "Option::is_none")] + pub chain_id: Option, + /// gas limit + pub gas: U256, + /// gas price + /// The gas price willing to be paid by the sender in wei + #[serde(rename = "gasPrice")] + pub gas_price: U256, + /// input data + pub input: Bytes, + /// nonce + pub nonce: U256, + /// to address + pub to: Option
, + /// type + pub r#type: Type0, + /// value + pub value: U256, +} + +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum TransactionSigned { + Transaction4844Signed(Transaction4844Signed), + Transaction1559Signed(Transaction1559Signed), + Transaction2930Signed(Transaction2930Signed), + TransactionLegacySigned(TransactionLegacySigned), +} +impl Default for TransactionSigned { + fn default() -> Self { + TransactionSigned::Transaction4844Signed(Default::default()) + } +} + +/// Validator withdrawal +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Withdrawal { + /// recipient address for withdrawal value + pub address: Address, + /// value contained in withdrawal + pub amount: U256, + /// index of withdrawal + pub index: U256, + /// index of validator that generated withdrawal + #[serde(rename = "validatorIndex")] + pub validator_index: U256, +} + +/// Access list entry +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct AccessListEntry { + pub address: Address, + #[serde(rename = "storageKeys")] + pub storage_keys: Vec, +} + +/// Signed 1559 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction1559Signed { + #[serde(flatten)] + pub transaction_1559_unsigned: Transaction1559Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + /// For backwards compatibility, `v` is optionally provided as an alternative to `yParity`. + /// This field is DEPRECATED and all use of it should migrate to `yParity`. + #[serde(skip_serializing_if = "Option::is_none")] + pub v: Option, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] + pub y_parity: Option, +} + +/// Signed 2930 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction2930Signed { + #[serde(flatten)] + pub transaction_2930_unsigned: Transaction2930Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + /// For backwards compatibility, `v` is optionally provided as an alternative to `yParity`. + /// This field is DEPRECATED and all use of it should migrate to `yParity`. + #[serde(skip_serializing_if = "Option::is_none")] + pub v: Option, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity")] + pub y_parity: U256, +} + +/// Signed 4844 Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Transaction4844Signed { + #[serde(flatten)] + pub transaction_4844_unsigned: Transaction4844Unsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// yParity + /// The parity (0 for even, 1 for odd) of the y-value of the secp256k1 signature. + #[serde(rename = "yParity", skip_serializing_if = "Option::is_none")] + pub y_parity: Option, +} + +/// Signed Legacy Transaction +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct TransactionLegacySigned { + #[serde(flatten)] + pub transaction_legacy_unsigned: TransactionLegacyUnsigned, + /// r + pub r: U256, + /// s + pub s: U256, + /// v + pub v: U256, +} diff --git a/substrate/frame/revive/src/evm/api/signature.rs b/substrate/frame/revive/src/evm/api/signature.rs new file mode 100644 index 0000000000000000000000000000000000000000..957d50c8e324164b404c390056581a3fd71f4946 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/signature.rs @@ -0,0 +1,80 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Ethereum signature utilities +use super::{TransactionLegacySigned, TransactionLegacyUnsigned}; +use rlp::Encodable; +use sp_core::{H160, U256}; +use sp_io::{crypto::secp256k1_ecdsa_recover, hashing::keccak_256}; + +impl TransactionLegacyUnsigned { + /// Recover the Ethereum address, from an RLP encoded transaction and a 65 bytes signature. + pub fn recover_eth_address(rlp_encoded: &[u8], signature: &[u8; 65]) -> Result { + let hash = keccak_256(rlp_encoded); + let mut addr = H160::default(); + let pk = secp256k1_ecdsa_recover(&signature, &hash).map_err(|_| ())?; + addr.assign_from_slice(&keccak_256(&pk[..])[12..]); + + Ok(addr) + } +} + +impl TransactionLegacySigned { + /// Create a signed transaction from an [`TransactionLegacyUnsigned`] and a signature. + pub fn from( + transaction_legacy_unsigned: TransactionLegacyUnsigned, + signature: &[u8; 65], + ) -> TransactionLegacySigned { + let r = U256::from_big_endian(&signature[..32]); + let s = U256::from_big_endian(&signature[32..64]); + let recovery_id = signature[64] as u32; + let v = transaction_legacy_unsigned + .chain_id + .map(|chain_id| chain_id * 2 + 35 + recovery_id) + .unwrap_or_else(|| U256::from(27) + recovery_id); + + TransactionLegacySigned { transaction_legacy_unsigned, r, s, v } + } + + /// Get the raw 65 bytes signature from the signed transaction. + pub fn raw_signature(&self) -> Result<[u8; 65], ()> { + let mut s = [0u8; 65]; + self.r.write_as_big_endian(s[0..32].as_mut()); + self.s.write_as_big_endian(s[32..64].as_mut()); + s[64] = self.extract_recovery_id().ok_or(())?; + Ok(s) + } + + /// Get the recovery ID from the signed transaction. + /// See https://eips.ethereum.org/EIPS/eip-155 + fn extract_recovery_id(&self) -> Option { + if let Some(chain_id) = self.transaction_legacy_unsigned.chain_id { + // self.v - chain_id * 2 - 35 + let v: u64 = self.v.try_into().ok()?; + let chain_id: u64 = chain_id.try_into().ok()?; + let r = v.checked_sub(chain_id.checked_mul(2)?)?.checked_sub(35)?; + r.try_into().ok() + } else { + self.v.try_into().ok() + } + } + + /// Recover the Ethereum address from the signed transaction. + pub fn recover_eth_address(&self) -> Result { + let rlp_encoded = self.transaction_legacy_unsigned.rlp_bytes(); + TransactionLegacyUnsigned::recover_eth_address(&rlp_encoded, &self.raw_signature()?) + } +} diff --git a/substrate/frame/revive/src/evm/api/type_id.rs b/substrate/frame/revive/src/evm/api/type_id.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d75d53500b61cfd0b2abb4ddbe22aa49fc4c8d3 --- /dev/null +++ b/substrate/frame/revive/src/evm/api/type_id.rs @@ -0,0 +1,95 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Ethereum Typed Transaction types +use super::Byte; +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +/// A macro to generate Transaction type identifiers +/// See +macro_rules! transaction_type { + ($name:ident, $value:literal) => { + #[doc = concat!("Transaction type identifier: ", $value)] + #[derive(Clone, Default, Debug, Eq, PartialEq)] + pub struct $name; + + impl $name { + /// Convert to Byte + pub fn as_byte(&self) -> Byte { + Byte::from($value) + } + + /// Try to convert from Byte + pub fn try_from_byte(byte: Byte) -> Result { + if byte.0 == $value { + Ok(Self {}) + } else { + Err(byte) + } + } + } + + impl Encode for $name { + fn using_encoded R>(&self, f: F) -> R { + f(&[$value]) + } + } + impl Decode for $name { + fn decode(input: &mut I) -> Result { + if $value == input.read_byte()? { + Ok(Self {}) + } else { + Err(codec::Error::from(concat!("expected ", $value))) + } + } + } + + impl TypeInfo for $name { + type Identity = u8; + fn type_info() -> scale_info::Type { + ::type_info() + } + } + + impl Serialize for $name { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(concat!("0x", $value)) + } + } + impl<'de> Deserialize<'de> for $name { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: &str = Deserialize::deserialize(deserializer)?; + if s == concat!("0x", $value) { + Ok($name {}) + } else { + Err(serde::de::Error::custom(concat!("expected ", $value))) + } + } + } + }; +} + +transaction_type!(Type0, 0); +transaction_type!(Type1, 1); +transaction_type!(Type2, 2); diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..80406a49bca2c63b11ed3b5bf10b195431db03c2 --- /dev/null +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -0,0 +1,700 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//! Runtime types for integrating `pallet-revive` with the EVM. +use crate::{ + evm::api::{TransactionLegacySigned, TransactionLegacyUnsigned}, + AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET, +}; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchInfo, GetDispatchInfo}, + traits::{ExtrinsicCall, InherentBuilder, SignedTransactionBuilder}, +}; +use pallet_transaction_payment::OnChargeTransaction; +use scale_info::TypeInfo; +use sp_arithmetic::Percent; +use sp_core::{Get, U256}; +use sp_runtime::{ + generic::{self, CheckedExtrinsic, ExtrinsicFormat}, + traits::{ + self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member, + TransactionExtension, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError}, + OpaqueExtrinsic, RuntimeDebug, Saturating, +}; + +use alloc::vec::Vec; + +type CallOf = ::RuntimeCall; + +/// The EVM gas price. +/// This constant is used by the proxy to advertise it via the eth_gas_price RPC. +/// +/// We use a fixed value for the gas price. +/// This let us calculate the gas estimate for a transaction with the formula: +/// `estimate_gas = substrate_fee / gas_price`. +pub const GAS_PRICE: u32 = 1_000u32; + +/// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned +/// [`crate::Call::eth_transact`] extrinsic. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[scale_info(skip_type_params(E))] +pub struct UncheckedExtrinsic( + pub generic::UncheckedExtrinsic, Signature, E::Extension>, +); + +impl + From, Signature, E::Extension>> + for UncheckedExtrinsic +{ + fn from( + utx: generic::UncheckedExtrinsic, Signature, E::Extension>, + ) -> Self { + Self(utx) + } +} + +impl ExtrinsicLike + for UncheckedExtrinsic +{ + fn is_bare(&self) -> bool { + ExtrinsicLike::is_bare(&self.0) + } +} + +impl ExtrinsicMetadata + for UncheckedExtrinsic +{ + const VERSION: u8 = + generic::UncheckedExtrinsic::, Signature, E::Extension>::VERSION; + type TransactionExtensions = E::Extension; +} + +impl ExtrinsicCall + for UncheckedExtrinsic +{ + type Call = CallOf; + + fn call(&self) -> &Self::Call { + self.0.call() + } +} + +use sp_runtime::traits::MaybeDisplay; +type OnChargeTransactionBalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; + +impl Checkable + for UncheckedExtrinsic +where + E: EthExtra, + Self: Encode, + ::Nonce: TryFrom, + ::RuntimeCall: Dispatchable, + OnChargeTransactionBalanceOf: Into>, + BalanceOf: Into + TryFrom, + MomentOf: Into, + CallOf: From> + TryInto>, + + // required by Checkable for `generic::UncheckedExtrinsic` + LookupSource: Member + MaybeDisplay, + CallOf: Encode + Member + Dispatchable, + Signature: Member + traits::Verify, + ::Signer: IdentifyAccount>, + E::Extension: Encode + TransactionExtension>, + Lookup: traits::Lookup>, +{ + type Checked = CheckedExtrinsic, CallOf, E::Extension>; + + fn check(self, lookup: &Lookup) -> Result { + if !self.0.is_signed() { + if let Ok(call) = self.0.function.clone().try_into() { + if let crate::Call::eth_transact { payload, gas_limit, storage_deposit_limit } = + call + { + let checked = E::try_into_checked_extrinsic( + payload, + gas_limit, + storage_deposit_limit, + self.encoded_size(), + )?; + return Ok(checked) + }; + } + } + self.0.check(lookup) + } + + #[cfg(feature = "try-runtime")] + fn unchecked_into_checked_i_know_what_i_am_doing( + self, + lookup: &Lookup, + ) -> Result { + self.0.unchecked_into_checked_i_know_what_i_am_doing(lookup) + } +} + +impl GetDispatchInfo for UncheckedExtrinsic +where + CallOf: GetDispatchInfo + Dispatchable, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.0.get_dispatch_info() + } +} + +impl serde::Serialize + for UncheckedExtrinsic +{ + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.0.serialize(seq) + } +} + +impl<'a, Address: Decode, Signature: Decode, E: EthExtra> serde::Deserialize<'a> + for UncheckedExtrinsic +{ + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(sp_runtime::format!("Decode error: {}", e))) + } +} + +impl SignedTransactionBuilder + for UncheckedExtrinsic +where + Address: TypeInfo, + CallOf: TypeInfo, + Signature: TypeInfo, + E::Extension: TypeInfo, +{ + type Address = Address; + type Signature = Signature; + type Extension = E::Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Address, + signature: Signature, + tx_ext: E::Extension, + ) -> Self { + generic::UncheckedExtrinsic::new_signed(call, signed, signature, tx_ext).into() + } +} + +impl InherentBuilder for UncheckedExtrinsic +where + Address: TypeInfo, + CallOf: TypeInfo, + Signature: TypeInfo, + E::Extension: TypeInfo, +{ + fn new_inherent(call: Self::Call) -> Self { + generic::UncheckedExtrinsic::new_bare(call).into() + } +} + +impl From> + for OpaqueExtrinsic +where + Address: Encode, + Signature: Encode, + CallOf: Encode, + E::Extension: Encode, +{ + fn from(extrinsic: UncheckedExtrinsic) -> Self { + Self::from_bytes(extrinsic.encode().as_slice()).expect( + "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ + raw Vec encoding; qed", + ) + } +} + +/// EthExtra convert an unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`]. +pub trait EthExtra { + /// The Runtime configuration. + type Config: crate::Config + pallet_transaction_payment::Config; + + /// The Runtime's transaction extension. + /// It should include at least: + /// - [`frame_system::CheckNonce`] to ensure that the nonce from the Ethereum transaction is + /// correct. + type Extension: TransactionExtension>; + + /// Get the transaction extension to apply to an unsigned [`crate::Call::eth_transact`] + /// extrinsic. + /// + /// # Parameters + /// - `nonce`: The nonce extracted from the Ethereum transaction. + /// - `tip`: The transaction tip calculated from the Ethereum transaction. + fn get_eth_extension( + nonce: ::Nonce, + tip: BalanceOf, + ) -> Self::Extension; + + /// Convert the unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`]. + /// and ensure that the fees from the Ethereum transaction correspond to the fees computed from + /// the encoded_len, the injected gas_limit and storage_deposit_limit. + /// + /// # Parameters + /// - `payload`: The RLP-encoded Ethereum transaction. + /// - `gas_limit`: The gas limit for the extrinsic + /// - `storage_deposit_limit`: The storage deposit limit for the extrinsic, + /// - `encoded_len`: The encoded length of the extrinsic. + fn try_into_checked_extrinsic( + payload: Vec, + gas_limit: Weight, + storage_deposit_limit: BalanceOf, + encoded_len: usize, + ) -> Result< + CheckedExtrinsic, CallOf, Self::Extension>, + InvalidTransaction, + > + where + ::Nonce: TryFrom, + BalanceOf: Into + TryFrom, + MomentOf: Into, + ::RuntimeCall: Dispatchable, + OnChargeTransactionBalanceOf: Into>, + CallOf: From>, + { + let tx = rlp::decode::(&payload).map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}"); + InvalidTransaction::Call + })?; + + let signer = tx.recover_eth_address().map_err(|err| { + log::debug!(target: LOG_TARGET, "Failed to recover signer: {err:?}"); + InvalidTransaction::BadProof + })?; + + let signer = + ::AddressMapper::to_fallback_account_id(&signer); + let TransactionLegacyUnsigned { nonce, chain_id, to, value, input, gas, gas_price, .. } = + tx.transaction_legacy_unsigned; + + if chain_id.unwrap_or_default() != ::ChainId::get().into() { + log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}"); + return Err(InvalidTransaction::Call); + } + + let call = if let Some(dest) = to { + crate::Call::call:: { + dest, + value: value.try_into().map_err(|_| InvalidTransaction::Call)?, + gas_limit, + storage_deposit_limit, + data: input.0, + } + } else { + let blob = match polkavm::ProgramBlob::blob_length(&input.0) { + Some(blob_len) => blob_len + .try_into() + .ok() + .and_then(|blob_len| (input.0.split_at_checked(blob_len))), + _ => None, + }; + + let Some((code, data)) = blob else { + log::debug!(target: LOG_TARGET, "Failed to extract polkavm code & data"); + return Err(InvalidTransaction::Call); + }; + + crate::Call::instantiate_with_code:: { + value: value.try_into().map_err(|_| InvalidTransaction::Call)?, + gas_limit, + storage_deposit_limit, + code: code.to_vec(), + data: data.to_vec(), + salt: None, + } + }; + + let nonce = nonce.try_into().map_err(|_| InvalidTransaction::Call)?; + + // Fees calculated with the fixed `GAS_PRICE` that should be used to estimate the gas. + let eth_fee_no_tip = U256::from(GAS_PRICE) + .saturating_mul(gas) + .try_into() + .map_err(|_| InvalidTransaction::Call)?; + + // Fees with the actual gas_price from the transaction. + let eth_fee: BalanceOf = U256::from(gas_price) + .saturating_mul(gas) + .try_into() + .map_err(|_| InvalidTransaction::Call)?; + + let info = call.get_dispatch_info(); + let function: CallOf = call.into(); + + // Fees calculated from the extrinsic, without the tip. + let actual_fee: BalanceOf = + pallet_transaction_payment::Pallet::::compute_fee( + encoded_len as u32, + &info, + Default::default(), + ) + .into(); + log::debug!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}"); + + if eth_fee < actual_fee { + log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}"); + return Err(InvalidTransaction::Payment.into()) + } + + let min = actual_fee.min(eth_fee_no_tip); + let max = actual_fee.max(eth_fee_no_tip); + let diff = Percent::from_rational(max - min, min); + if diff > Percent::from_percent(10) { + log::debug!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?} should be no more than 10% got {diff:?}"); + return Err(InvalidTransaction::Call.into()) + } else { + log::debug!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?}: {diff:?}"); + } + + let tip = eth_fee.saturating_sub(eth_fee_no_tip); + log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce {nonce:?} and tip: {tip:?}"); + Ok(CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signer.into(), Self::get_eth_extension(nonce, tip)), + function, + }) + } +} + +#[cfg(feature = "riscv")] +#[cfg(test)] +mod test { + use super::*; + use crate::{ + evm::*, + test_utils::*, + tests::{ExtBuilder, RuntimeCall, RuntimeOrigin, Test}, + }; + use frame_support::{error::LookupError, traits::fungible::Mutate}; + use pallet_revive_fixtures::compile_module; + use rlp::Encodable; + use sp_runtime::{ + traits::{Checkable, DispatchTransaction}, + MultiAddress, MultiSignature, + }; + type AccountIdOf = ::AccountId; + + /// A simple account that can sign transactions + pub struct Account(subxt_signer::eth::Keypair); + + impl Default for Account { + fn default() -> Self { + Self(subxt_signer::eth::dev::alith()) + } + } + + impl From for Account { + fn from(kp: subxt_signer::eth::Keypair) -> Self { + Self(kp) + } + } + + impl Account { + /// Get the [`AccountId`] of the account. + pub fn account_id(&self) -> AccountIdOf { + let address = self.address(); + ::AddressMapper::to_fallback_account_id(&address) + } + + /// Get the [`H160`] address of the account. + pub fn address(&self) -> H160 { + H160::from_slice(&self.0.account_id().as_ref()) + } + + /// Sign a transaction. + pub fn sign_transaction(&self, tx: TransactionLegacyUnsigned) -> TransactionLegacySigned { + let rlp_encoded = tx.rlp_bytes(); + let signature = self.0.sign(&rlp_encoded); + TransactionLegacySigned::from(tx, signature.as_ref()) + } + } + + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Extra; + type SignedExtra = (frame_system::CheckNonce, ChargeTransactionPayment); + + use pallet_transaction_payment::ChargeTransactionPayment; + impl EthExtra for Extra { + type Config = Test; + type Extension = SignedExtra; + + fn get_eth_extension(nonce: u32, tip: BalanceOf) -> Self::Extension { + (frame_system::CheckNonce::from(nonce), ChargeTransactionPayment::from(tip)) + } + } + + type Ex = UncheckedExtrinsic, MultiSignature, Extra>; + struct TestContext; + + impl traits::Lookup for TestContext { + type Source = MultiAddress; + type Target = AccountIdOf; + fn lookup(&self, s: Self::Source) -> Result { + match s { + MultiAddress::Id(id) => Ok(id), + _ => Err(LookupError), + } + } + } + + /// A builder for creating an unchecked extrinsic, and test that the check function works. + #[derive(Clone)] + struct UncheckedExtrinsicBuilder { + tx: TransactionLegacyUnsigned, + gas_limit: Weight, + storage_deposit_limit: BalanceOf, + } + + impl UncheckedExtrinsicBuilder { + /// Create a new builder with default values. + fn new() -> Self { + Self { + tx: TransactionLegacyUnsigned { + chain_id: Some(::ChainId::get().into()), + gas_price: U256::from(GAS_PRICE), + ..Default::default() + }, + gas_limit: Weight::zero(), + storage_deposit_limit: 0, + } + } + + fn estimate_gas(&mut self) { + let dry_run = crate::Pallet::::bare_eth_transact( + Account::default().account_id(), + self.tx.to, + self.tx.value.try_into().unwrap(), + self.tx.input.clone().0, + Weight::MAX, + u64::MAX, + |call| { + let call = RuntimeCall::Contracts(call); + let uxt: Ex = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + uxt.encoded_size() as u32 + }, + crate::DebugInfo::Skip, + crate::CollectEvents::Skip, + ); + self.tx.gas = ((dry_run.fee + GAS_PRICE as u64) / (GAS_PRICE as u64)).into(); + } + + /// Create a new builder with a call to the given address. + fn call_with(dest: H160) -> Self { + let mut builder = Self::new(); + builder.tx.to = Some(dest); + builder.estimate_gas(); + builder + } + + /// Create a new builder with an instantiate call. + fn instantiate_with(code: Vec, data: Vec) -> Self { + let mut builder = Self::new(); + builder.tx.input = Bytes(code.into_iter().chain(data.into_iter()).collect()); + builder.estimate_gas(); + builder + } + + /// Update the transaction with the given function. + fn update(mut self, f: impl FnOnce(&mut TransactionLegacyUnsigned) -> ()) -> Self { + f(&mut self.tx); + self + } + + /// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension. + fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> { + let UncheckedExtrinsicBuilder { tx, gas_limit, storage_deposit_limit } = self.clone(); + + // Fund the account. + let account = Account::default(); + let _ = ::Currency::set_balance( + &account.account_id(), + 100_000_000_000_000, + ); + + let payload = account.sign_transaction(tx).rlp_bytes().to_vec(); + let call = RuntimeCall::Contracts(crate::Call::eth_transact { + payload, + gas_limit, + storage_deposit_limit, + }); + + let encoded_len = call.encoded_size(); + let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into(); + let result: CheckedExtrinsic<_, _, _> = uxt.check(&TestContext {})?; + let (account_id, extra): (AccountId32, SignedExtra) = match result.format { + ExtrinsicFormat::Signed(signer, extra) => (signer, extra), + _ => unreachable!(), + }; + + extra.clone().validate_and_prepare( + RuntimeOrigin::signed(account_id), + &result.function, + &result.function.get_dispatch_info(), + encoded_len, + )?; + + Ok((result.function, extra)) + } + } + + #[test] + fn check_eth_transact_call_works() { + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check().unwrap().0, + crate::Call::call:: { + dest: builder.tx.to.unwrap(), + value: builder.tx.value.as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + data: builder.tx.input.0 + } + .into() + ); + }); + } + + #[test] + fn check_eth_transact_instantiate_works() { + ExtBuilder::default().build().execute_with(|| { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + assert_eq!( + builder.check().unwrap().0, + crate::Call::instantiate_with_code:: { + value: builder.tx.value.as_u64(), + gas_limit: builder.gas_limit, + storage_deposit_limit: builder.storage_deposit_limit, + code, + data, + salt: None + } + .into() + ); + }); + } + + #[test] + fn check_eth_transact_nonce_works() { + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.nonce = 1u32.into()); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Future)) + ); + + >::inc_account_nonce(Account::default().account_id()); + + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])); + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + ); + }); + } + + #[test] + fn check_eth_transact_chain_id_works() { + ExtBuilder::default().build().execute_with(|| { + let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])) + .update(|tx| tx.chain_id = Some(42.into())); + + assert_eq!( + builder.check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + }); + } + + #[test] + fn check_instantiate_data() { + ExtBuilder::default().build().execute_with(|| { + let code = b"invalid code".to_vec(); + let data = vec![1]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()); + + // Fail because the tx input fail to get the blob length + assert_eq!( + builder.clone().update(|tx| tx.input = Bytes(vec![1, 2, 3])).check(), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + ); + }); + } + + #[test] + fn check_transaction_fees() { + ExtBuilder::default().build().execute_with(|| { + let scenarios: [(_, Box, _); 5] = [ + ("Eth fees too low", Box::new(|tx| tx.gas_price /= 2), InvalidTransaction::Payment), + ("Gas fees too high", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), + ("Gas fees too low", Box::new(|tx| tx.gas *= 2), InvalidTransaction::Call), + ( + "Diff > 10%", + Box::new(|tx| tx.gas = tx.gas * 111 / 100), + InvalidTransaction::Call, + ), + ( + "Diff < 10%", + Box::new(|tx| { + tx.gas_price *= 2; + tx.gas = tx.gas * 89 / 100 + }), + InvalidTransaction::Call, + ), + ]; + + for (msg, update_tx, err) in scenarios { + let builder = + UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx); + + assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg); + } + }); + } + + #[test] + fn check_transaction_tip() { + ExtBuilder::default().build().execute_with(|| { + let (code, _) = compile_module("dummy").unwrap(); + let data = vec![]; + let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone()) + .update(|tx| tx.gas_price = tx.gas_price * 103 / 100); + + let tx = &builder.tx; + let expected_tip = tx.gas_price * tx.gas - U256::from(GAS_PRICE) * tx.gas; + let (_, extra) = builder.check().unwrap(); + assert_eq!(U256::from(extra.1.tip()), expected_tip); + }); + } +} diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index fffc3e4f48376efed32b97a6546f0cf3bfab2c1c..9f3a75c0090dd160232ec72c590526c7a1d164f4 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -66,6 +66,10 @@ type VarSizedKey = BoundedVec>; const FRAME_ALWAYS_EXISTS_ON_INSTANTIATE: &str = "The return value is only `None` if no contract exists at the specified address. This cannot happen on instantiate or delegate; qed"; +/// Code hash of existing account without code (keccak256 hash of empty data). +pub const EMPTY_CODE_HASH: H256 = + H256(sp_core::hex2array!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")); + /// Combined key type for both fixed and variable sized storage keys. pub enum Key { /// Variant for fixed sized keys. @@ -163,6 +167,18 @@ impl Origin { Origin::Root => Err(DispatchError::RootNotAllowed), } } + + /// Make sure that this origin is mapped. + /// + /// We require an origin to be mapped in order to be used in a `Stack`. Otherwise + /// [`Stack::caller`] returns an address that can't be reverted to the original address. + fn ensure_mapped(&self) -> DispatchResult { + match self { + Self::Root => Ok(()), + Self::Signed(account_id) if T::AddressMapper::is_mapped(account_id) => Ok(()), + Self::Signed(_) => Err(>::AccountUnmapped.into()), + } + } } /// An interface that provides access to the external environment in which the @@ -272,9 +288,8 @@ pub trait Ext: sealing::Sealed { fn is_contract(&self, address: &H160) -> bool; /// Returns the code hash of the contract for the given `address`. - /// - /// Returns `None` if the `address` does not belong to a contract. - fn code_hash(&self, address: &H160) -> Option; + /// If not a contract but account exists then `keccak_256([])` is returned, otherwise `zero`. + fn code_hash(&self, address: &H160) -> H256; /// Returns the code hash of the contract being executed. fn own_code_hash(&mut self) -> &H256; @@ -749,7 +764,7 @@ where )? { stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output) } else { - Self::transfer_no_contract(&origin, &dest, value) + Self::transfer_from_origin(&origin, &dest, value) } } @@ -830,6 +845,7 @@ where value: BalanceOf, debug_message: Option<&'a mut DebugBuffer>, ) -> Result, ExecError> { + origin.ensure_mapped()?; let Some((first_frame, executable)) = Self::new_frame( args, value, @@ -838,6 +854,7 @@ where storage_meter, BalanceOf::::zero(), false, + true, )? else { return Ok(None); @@ -871,6 +888,7 @@ where storage_meter: &mut storage::meter::GenericMeter, deposit_limit: BalanceOf, read_only: bool, + origin_is_caller: bool, ) -> Result, E)>, ExecError> { let (account_id, contract_info, executable, delegate_caller, entry_point) = match frame_args { @@ -902,7 +920,17 @@ where let address = if let Some(salt) = salt { address::create2(&deployer, executable.code(), input_data, salt) } else { - address::create1(&deployer, account_nonce.saturated_into()) + use sp_runtime::Saturating; + address::create1( + &deployer, + // the Nonce from the origin has been incremented pre-dispatch, so we need + // to subtract 1 to get the nonce at the time of the call. + if origin_is_caller { + account_nonce.saturating_sub(1u32.into()).saturated_into() + } else { + account_nonce.saturated_into() + }, + ) }; let contract = ContractInfo::new( &address, @@ -910,7 +938,7 @@ where *executable.code_hash(), )?; ( - T::AddressMapper::to_account_id_contract(&address), + T::AddressMapper::to_fallback_account_id(&address), contract, executable, None, @@ -973,6 +1001,7 @@ where nested_storage, deposit_limit, read_only, + false, )? { self.frames.try_push(frame).map_err(|_| Error::::MaxCallDepthReached)?; Ok(Some(executable)) @@ -1225,13 +1254,13 @@ where } /// Transfer some funds from `from` to `to`. - fn transfer(from: &T::AccountId, to: &T::AccountId, value: BalanceOf) -> DispatchResult { + fn transfer(from: &T::AccountId, to: &T::AccountId, value: BalanceOf) -> ExecResult { // this avoids events to be emitted for zero balance transfers if !value.is_zero() { T::Currency::transfer(from, to, value, Preservation::Preserve) .map_err(|_| Error::::TransferFailed)?; } - Ok(()) + Ok(Default::default()) } /// Same as `transfer` but `from` is an `Origin`. @@ -1239,28 +1268,17 @@ where from: &Origin, to: &T::AccountId, value: BalanceOf, - ) -> DispatchResult { + ) -> ExecResult { // If the from address is root there is no account to transfer from, and therefore we can't // take any `value` other than 0. let from = match from { Origin::Signed(caller) => caller, - Origin::Root if value.is_zero() => return Ok(()), - Origin::Root => return DispatchError::RootNotAllowed.into(), + Origin::Root if value.is_zero() => return Ok(Default::default()), + Origin::Root => return Err(DispatchError::RootNotAllowed.into()), }; Self::transfer(from, to, value) } - /// Same as `transfer_from_origin` but creates an `ExecReturnValue` on success. - fn transfer_no_contract( - from: &Origin, - to: &T::AccountId, - value: BalanceOf, - ) -> ExecResult { - Self::transfer_from_origin(from, to, value) - .map(|_| ExecReturnValue::default()) - .map_err(Into::into) - } - /// Reference to the current (top) frame. fn top_frame(&self) -> &Frame { top_frame!(self) @@ -1363,12 +1381,7 @@ where )? { self.run(executable, input_data) } else { - Self::transfer_no_contract( - &Origin::from_account_id(self.account_id().clone()), - &dest, - value, - )?; - Ok(()) + Self::transfer(&self.account_id(), &dest, value).map(|_| ()) } }; @@ -1472,6 +1485,8 @@ where &T::AddressMapper::to_account_id(to), value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, ) + .map(|_| ()) + .map_err(|error| error.error) } fn get_storage(&mut self, key: &Key) -> Option> { @@ -1536,8 +1551,15 @@ where ContractInfoOf::::contains_key(&address) } - fn code_hash(&self, address: &H160) -> Option { - >::get(&address).map(|contract| contract.code_hash) + fn code_hash(&self, address: &H160) -> H256 { + >::get(&address) + .map(|contract| contract.code_hash) + .unwrap_or_else(|| { + if System::::account_exists(&T::AddressMapper::to_account_id(address)) { + return EMPTY_CODE_HASH; + } + H256::zero() + }) } fn own_code_hash(&mut self) -> &H256 { @@ -1817,9 +1839,10 @@ mod tests { }; use assert_matches::assert_matches; use frame_support::{assert_err, assert_ok, parameter_types}; - use frame_system::{EventRecord, Phase}; + use frame_system::{AccountInfo, EventRecord, Phase}; use pallet_revive_uapi::ReturnFlags; use pretty_assertions::assert_eq; + use sp_io::hashing::keccak_256; use sp_runtime::{traits::Hash, DispatchError}; use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc}; @@ -1870,8 +1893,8 @@ mod tests { f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, ) -> H256 { Loader::mutate(|loader| { - // Generate code hashes as monotonically increasing values. - let hash = ::Hash::from_low_u64_be(loader.counter); + // Generate code hashes from contract index value. + let hash = H256(keccak_256(&loader.counter.to_le_bytes())); loader.counter += 1; loader.map.insert( hash, @@ -2007,7 +2030,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, success_ch); set_balance(&ALICE, 100); - let balance = get_balance(&BOB_CONTRACT_ID); + let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap(); @@ -2023,7 +2046,7 @@ mod tests { .unwrap(); assert_eq!(get_balance(&ALICE), 100 - value); - assert_eq!(get_balance(&BOB_CONTRACT_ID), balance + value); + assert_eq!(get_balance(&BOB_FALLBACK), balance + value); }); } @@ -2045,7 +2068,7 @@ mod tests { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, delegate_ch); set_balance(&ALICE, 100); - let balance = get_balance(&BOB_CONTRACT_ID); + let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); @@ -2061,7 +2084,7 @@ mod tests { .unwrap(); assert_eq!(get_balance(&ALICE), 100 - value); - assert_eq!(get_balance(&BOB_CONTRACT_ID), balance + value); + assert_eq!(get_balance(&BOB_FALLBACK), balance + value); }); } @@ -2302,9 +2325,10 @@ mod tests { // Record the caller for bob. WitnessedCallerBob::mutate(|caller| { let origin = ctx.ext.caller(); - *caller = Some(::AddressMapper::to_address( - &origin.account_id().unwrap(), - )); + *caller = + Some(<::AddressMapper as AddressMapper>::to_address( + &origin.account_id().unwrap(), + )); }); // Call into CHARLIE contract. @@ -2326,9 +2350,10 @@ mod tests { // Record the caller for charlie. WitnessedCallerCharlie::mutate(|caller| { let origin = ctx.ext.caller(); - *caller = Some(::AddressMapper::to_address( - &origin.account_id().unwrap(), - )); + *caller = + Some(<::AddressMapper as AddressMapper>::to_address( + &origin.account_id().unwrap(), + )); }); exec_success() }); @@ -2386,16 +2411,25 @@ mod tests { #[test] fn code_hash_returns_proper_values() { - let code_bob = MockLoader::insert(Call, |ctx, _| { - // ALICE is not a contract and hence they do not have a code_hash - assert!(ctx.ext.code_hash(&ALICE_ADDR).is_none()); - // BOB is a contract and hence it has a code_hash - assert!(ctx.ext.code_hash(&BOB_ADDR).is_some()); + let bob_code_hash = MockLoader::insert(Call, |ctx, _| { + // ALICE is not a contract but account exists so it returns hash of empty data + assert_eq!(ctx.ext.code_hash(&ALICE_ADDR), EMPTY_CODE_HASH); + // BOB is a contract (this function) and hence it has a code_hash. + // `MockLoader` uses contract index to generate the code hash. + assert_eq!(ctx.ext.code_hash(&BOB_ADDR), H256(keccak_256(&0u64.to_le_bytes()))); + // [0xff;20] doesn't exist and returns hash zero + assert!(ctx.ext.code_hash(&H160([0xff; 20])).is_zero()); + exec_success() }); ExtBuilder::default().build().execute_with(|| { - place_contract(&BOB, code_bob); + // add alice account info to test case EOA code hash + frame_system::Account::::insert( + ::AddressMapper::to_account_id(&ALICE_ADDR), + AccountInfo { consumers: 1, providers: 1, ..Default::default() }, + ); + place_contract(&BOB, bob_code_hash); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); // ALICE (not contract) -> BOB (contract) @@ -2415,7 +2449,7 @@ mod tests { #[test] fn own_code_hash_returns_proper_values() { let bob_ch = MockLoader::insert(Call, |ctx, _| { - let code_hash = ctx.ext.code_hash(&BOB_ADDR).unwrap(); + let code_hash = ctx.ext.code_hash(&BOB_ADDR); assert_eq!(*ctx.ext.own_code_hash(), code_hash); exec_success() }); @@ -2683,10 +2717,11 @@ mod tests { ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the newly created account has the expected code hash and // there are instantiation event. @@ -2738,10 +2773,11 @@ mod tests { Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the account has not been created. assert!(ContractInfo::::load_code_hash(&instantiated_contract_id).is_none()); @@ -2804,10 +2840,11 @@ mod tests { let instantiated_contract_address = *instantiated_contract_address.borrow().as_ref().unwrap(); - let instantiated_contract_id = - ::AddressMapper::to_account_id_contract( - &instantiated_contract_address, - ); + let instantiated_contract_id = <::AddressMapper as AddressMapper< + Test, + >>::to_fallback_account_id( + &instantiated_contract_address + ); // Check that the newly created account has the expected code hash and // there are instantiation event. @@ -2862,7 +2899,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB_FALLBACK, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); @@ -2992,7 +3029,8 @@ mod tests { fn recursive_call_during_constructor_is_balance_transfer() { let code = MockLoader::insert(Constructor, |ctx, _| { let account_id = ctx.ext.account_id().clone(); - let addr = ::AddressMapper::to_address(&account_id); + let addr = + <::AddressMapper as AddressMapper>::to_address(&account_id); let balance = ctx.ext.balance(); // Calling ourselves during the constructor will trigger a balance @@ -3053,7 +3091,8 @@ mod tests { fn cannot_send_more_balance_than_available_to_self() { let code_hash = MockLoader::insert(Call, |ctx, _| { let account_id = ctx.ext.account_id().clone(); - let addr = ::AddressMapper::to_address(&account_id); + let addr = + <::AddressMapper as AddressMapper>::to_address(&account_id); let balance = ctx.ext.balance(); assert_err!( @@ -3325,7 +3364,7 @@ mod tests { EventRecord { phase: Phase::Initialization, event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB_CONTRACT_ID, + sender: BOB_FALLBACK, hash: remark_hash }), topics: vec![], @@ -3409,7 +3448,7 @@ mod tests { EventRecord { phase: Phase::Initialization, event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB_CONTRACT_ID, + sender: BOB_FALLBACK, hash: remark_hash }), topics: vec![], @@ -3473,7 +3512,10 @@ mod tests { ) .unwrap(); - let account_id = ::AddressMapper::to_account_id_contract(&addr); + let account_id = + <::AddressMapper as AddressMapper>::to_fallback_account_id( + &addr, + ); assert_eq!(System::account_nonce(&ALICE), alice_nonce); assert_eq!(System::account_nonce(ctx.ext.account_id()), 1); @@ -4266,7 +4308,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); @@ -4451,7 +4493,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); @@ -4568,7 +4610,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); @@ -4612,7 +4654,7 @@ mod tests { .build() .execute_with(|| { set_balance(&ALICE, 1000); - set_balance(&BOB_CONTRACT_ID, 100); + set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 9986da472c9682627e4f112c1f7485620c08081a..1967f9868d2b5644676ee917fa9376ccdd93fbdb 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -26,30 +26,29 @@ mod benchmarking; mod benchmarking_dummy; mod exec; mod gas; -mod primitives; -use crate::exec::MomentOf; -use frame_support::traits::IsType; -pub use primitives::*; -use sp_core::U256; - mod limits; +mod primitives; mod storage; mod transient_storage; mod wasm; +#[cfg(test)] +mod tests; + pub mod chain_extension; pub mod debug; +pub mod evm; pub mod test_utils; pub mod weights; -#[cfg(test)] -mod tests; use crate::{ + evm::{runtime::GAS_PRICE, TransactionLegacyUnsigned}, exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, wasm::{CodeInfo, RuntimeCosts, WasmBlob}, }; +use alloc::boxed::Box; use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ @@ -58,9 +57,10 @@ use frame_support::{ PostDispatchInfo, RawOrigin, }, ensure, + pallet_prelude::DispatchClass, traits::{ fungible::{Inspect, Mutate, MutateHold}, - ConstU32, ConstU64, Contains, EnsureOrigin, Get, Time, + ConstU32, ConstU64, Contains, EnsureOrigin, Get, IsType, OriginTrait, Time, }, weights::{Weight, WeightMeter}, BoundedVec, RuntimeDebugNoBound, @@ -70,18 +70,21 @@ use frame_system::{ pallet_prelude::{BlockNumberFor, OriginFor}, EventRecord, Pallet as System, }; +use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; -use sp_core::{H160, H256}; +use sp_core::{H160, H256, U256}; use sp_runtime::{ traits::{BadOrigin, Convert, Dispatchable, Saturating}, DispatchError, }; pub use crate::{ - address::{AddressMapper, DefaultAddressMapper}, + address::{create1, create2, AccountId32Mapper, AddressMapper}, debug::Tracing, + exec::MomentOf, pallet::*, }; +pub use primitives::*; pub use weights::WeightInfo; #[cfg(doc)] @@ -90,6 +93,7 @@ pub use crate::wasm::SyscallDoc; type TrieId = BoundedVec>; type BalanceOf = <::Currency as Inspect<::AccountId>>::Balance; +type OnChargeTransactionBalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; type CodeVec = BoundedVec>; type EventRecordOf = EventRecord<::RuntimeEvent, ::Hash>; @@ -134,7 +138,7 @@ pub mod pallet { use sp_runtime::Perbill; /// The in-code storage version. - pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -157,10 +161,9 @@ pub mod pallet { /// The overarching call type. #[pallet::no_default_bounds] - type RuntimeCall: Dispatchable - + GetDispatchInfo - + codec::Decode - + IsType<::RuntimeCall>; + type RuntimeCall: Parameter + + Dispatchable + + GetDispatchInfo; /// Overarching hold reason. #[pallet::no_default_bounds] @@ -229,9 +232,9 @@ pub mod pallet { #[pallet::constant] type CodeHashLockupDepositPercent: Get; - /// Only valid type is [`DefaultAddressMapper`]. - #[pallet::no_default_bounds] - type AddressMapper: AddressMapper>; + /// Use either valid type is [`address::AccountId32Mapper`] or [`address::H160Mapper`]. + #[pallet::no_default] + type AddressMapper: AddressMapper; /// Make contract callable functions marked as `#[unstable]` available. /// @@ -357,7 +360,6 @@ pub mod pallet { #[inject_runtime_type] type RuntimeCall = (); - type AddressMapper = DefaultAddressMapper; type CallFilter = (); type ChainExtension = (); type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; @@ -558,6 +560,12 @@ pub mod pallet { /// Immutable data can only be set during deploys and only be read during calls. /// Additionally, it is only valid to set the data once and it must not be empty. InvalidImmutableAccess, + /// An `AccountID32` account tried to interact with the pallet without having a mapping. + /// + /// Call [`Pallet::map_account`] in order to create a mapping for the account. + AccountUnmapped, + /// Tried to map an account that is already mapped. + AccountAlreadyMapped, } /// A reason for the pallet contracts placing a hold on funds. @@ -567,6 +575,8 @@ pub mod pallet { CodeUploadDepositReserve, /// The Pallet has reserved it for storage deposit. StorageDepositReserve, + /// Deposit for creating an address mapping in [`AddressSuffix`]. + AddressMapping, } /// A mapping from a contract's code hash to its code. @@ -598,6 +608,14 @@ pub mod pallet { pub(crate) type DeletionQueueCounter = StorageValue<_, DeletionQueueManager, ValueQuery>; + /// Map a Ethereum address to its original `AccountId32`. + /// + /// Stores the last 12 byte for addresses that were originally an `AccountId32` instead + /// of an `H160`. Register your `AccountId32` using [`Pallet::map_account`] in order to + /// use it with this pallet. + #[pallet::storage] + pub(crate) type AddressSuffix = StorageMap<_, Identity, H160, [u8; 12]>; + #[pallet::extra_constants] impl Pallet { #[pallet::constant_name(ApiVersion)] @@ -738,6 +756,33 @@ pub mod pallet { BalanceOf: Into + TryFrom, MomentOf: Into, { + /// A raw EVM transaction, typically dispatched by an Ethereum JSON-RPC server. + /// + /// # Parameters + /// + /// * `payload`: The RLP-encoded [`crate::evm::TransactionLegacySigned`]. + /// * `gas_limit`: The gas limit enforced during contract execution. + /// * `storage_deposit_limit`: The maximum balance that can be charged to the caller for + /// storage usage. + /// + /// # Note + /// + /// This call cannot be dispatched directly; attempting to do so will result in a failed + /// transaction. It serves as a wrapper for an Ethereum transaction. When submitted, the + /// runtime converts it into a [`sp_runtime::generic::CheckedExtrinsic`] by recovering the + /// signer and validating the transaction. + #[allow(unused_variables)] + #[pallet::call_index(0)] + #[pallet::weight(Weight::MAX)] + pub fn eth_transact( + origin: OriginFor, + payload: Vec, + gas_limit: Weight, + #[pallet::compact] storage_deposit_limit: BalanceOf, + ) -> DispatchResultWithPostInfo { + Err(frame_system::Error::CallFiltered::.into()) + } + /// Makes a call to an account, optionally transferring some balance. /// /// # Parameters @@ -754,7 +799,7 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::call_index(0)] + #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, @@ -764,6 +809,7 @@ pub mod pallet { #[pallet::compact] storage_deposit_limit: BalanceOf, data: Vec, ) -> DispatchResultWithPostInfo { + log::info!(target: LOG_TARGET, "Call: {:?} {:?} {:?}", dest, value, data); let mut output = Self::bare_call( origin, dest, @@ -787,7 +833,7 @@ pub mod pallet { /// This function is identical to [`Self::instantiate_with_code`] but without the /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. - #[pallet::call_index(1)] + #[pallet::call_index(2)] #[pallet::weight( T::WeightInfo::instantiate(data.len() as u32).saturating_add(*gas_limit) )] @@ -851,7 +897,7 @@ pub mod pallet { /// - The smart-contract account is created at the computed address. /// - The `value` is transferred to the new account. /// - The `deploy` function is executed in the context of the newly-created account. - #[pallet::call_index(2)] + #[pallet::call_index(3)] #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, data.len() as u32) .saturating_add(*gas_limit) @@ -902,7 +948,7 @@ pub mod pallet { /// To avoid this situation a constructor could employ access control so that it can /// only be instantiated by permissioned entities. The same is true when uploading /// through [`Self::instantiate_with_code`]. - #[pallet::call_index(3)] + #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::upload_code(code.len() as u32))] pub fn upload_code( origin: OriginFor, @@ -916,7 +962,7 @@ pub mod pallet { /// /// A code can only be removed by its original uploader (its owner) and only if it is /// not used by any contract. - #[pallet::call_index(4)] + #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::remove_code())] pub fn remove_code( origin: OriginFor, @@ -938,7 +984,7 @@ pub mod pallet { /// This does **not** change the address of the contract in question. This means /// that the contract address is no longer derived from its code hash after calling /// this dispatchable. - #[pallet::call_index(5)] + #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::set_code())] pub fn set_code( origin: OriginFor, @@ -963,6 +1009,51 @@ pub mod pallet { Ok(()) }) } + + /// Register the callers account id so that it can be used in contract interactions. + /// + /// This will error if the origin is already mapped or is a eth native `Address20`. It will + /// take a deposit that can be released by calling [`Self::unmap_account`]. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::map_account())] + pub fn map_account(origin: OriginFor) -> DispatchResult { + let origin = ensure_signed(origin)?; + T::AddressMapper::map(&origin) + } + + /// Unregister the callers account id in order to free the deposit. + /// + /// There is no reason to ever call this function other than freeing up the deposit. + /// This is only useful when the account should no longer be used. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::unmap_account())] + pub fn unmap_account(origin: OriginFor) -> DispatchResult { + let origin = ensure_signed(origin)?; + T::AddressMapper::unmap(&origin) + } + + /// Dispatch an `call` with the origin set to the callers fallback address. + /// + /// Every `AccountId32` can control its corresponding fallback account. The fallback account + /// is the `AccountId20` with the last 12 bytes set to `0xEE`. This is essentially a + /// recovery function in case an `AccountId20` was used without creating a mapping first. + #[pallet::call_index(9)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as_fallback_account().saturating_add(dispatch_info.call_weight), + dispatch_info.class + ) + })] + pub fn dispatch_as_fallback_account( + origin: OriginFor, + call: Box<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let unmapped_account = + T::AddressMapper::to_fallback_account_id(&T::AddressMapper::to_address(&origin)); + call.dispatch(RawOrigin::Signed(unmapped_account).into()) + } } } @@ -1002,7 +1093,7 @@ where data: Vec, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractExecResult, EventRecordOf> { + ) -> ContractResult, EventRecordOf> { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_deposit = Default::default(); let mut debug_message = if matches!(debug, DebugInfo::UnsafeDebug) { @@ -1031,7 +1122,7 @@ where } else { None }; - ContractExecResult { + ContractResult { result: result.map_err(|r| r.error), gas_consumed: gas_meter.gas_consumed(), gas_required: gas_meter.gas_required(), @@ -1057,7 +1148,7 @@ where salt: Option<[u8; 32]>, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractInstantiateResult, EventRecordOf> { + ) -> ContractResult, EventRecordOf> { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_deposit = Default::default(); let mut debug_message = @@ -1099,7 +1190,7 @@ where } else { None }; - ContractInstantiateResult { + ContractResult { result: output .map(|(addr, result)| InstantiateReturnValue { result, addr }) .map_err(|e| e.error), @@ -1111,6 +1202,185 @@ where } } + /// A version of [`Self::eth_transact`] used to dry-run Ethereum calls. + /// + /// # Parameters + /// + /// - `origin`: The origin of the call. + /// - `dest`: The destination address of the call. + /// - `value`: The value to transfer. + /// - `input`: The input data. + /// - `gas_limit`: The gas limit enforced during contract execution. + /// - `storage_deposit_limit`: The maximum balance that can be charged to the caller for storage + /// usage. + /// - `utx_encoded_size`: A function that takes a call and returns the encoded size of the + /// unchecked extrinsic. + /// - `debug`: Debugging configuration. + /// - `collect_events`: Event collection configuration. + pub fn bare_eth_transact( + origin: T::AccountId, + dest: Option, + value: BalanceOf, + input: Vec, + gas_limit: Weight, + storage_deposit_limit: BalanceOf, + utx_encoded_size: impl Fn(Call) -> u32, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> EthContractResult> + where + T: pallet_transaction_payment::Config, + ::RuntimeCall: + Dispatchable, + ::RuntimeCall: From>, + ::RuntimeCall: Encode, + OnChargeTransactionBalanceOf: Into>, + T::Nonce: Into, + { + // Get the nonce to encode in the tx. + let nonce: T::Nonce = >::account_nonce(&origin); + + // Use a big enough gas price to ensure that the encoded size is large enough. + let max_gas_fee: BalanceOf = + (pallet_transaction_payment::Pallet::::weight_to_fee(Weight::MAX) / + GAS_PRICE.into()) + .into(); + + // A contract call. + if let Some(dest) = dest { + // Dry run the call. + let result = crate::Pallet::::bare_call( + T::RuntimeOrigin::signed(origin), + dest, + value, + gas_limit, + storage_deposit_limit, + input.clone(), + debug, + collect_events, + ); + + // Get the encoded size of the transaction. + let tx = TransactionLegacyUnsigned { + value: value.into(), + input: input.into(), + nonce: nonce.into(), + chain_id: Some(T::ChainId::get().into()), + gas_price: GAS_PRICE.into(), + gas: max_gas_fee.into(), + to: Some(dest), + ..Default::default() + }; + + let eth_dispatch_call = crate::Call::::eth_transact { + payload: tx.dummy_signed_payload(), + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + }; + let encoded_len = utx_encoded_size(eth_dispatch_call); + + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = crate::Call::::call { + dest, + value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + data: tx.input.0, + } + .into(); + let dispatch_info = dispatch_call.get_dispatch_info(); + + // Compute the fee. + let fee = pallet_transaction_payment::Pallet::::compute_fee( + encoded_len, + &dispatch_info, + 0u32.into(), + ) + .into(); + + log::debug!(target: LOG_TARGET, "bare_eth_call: len: {encoded_len:?} fee: {fee:?}"); + EthContractResult { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + result: result.result.map(|v| v.data), + fee, + } + // A contract deployment + } else { + // Extract code and data from the input. + let (code, data) = match polkavm::ProgramBlob::blob_length(&input) { + Some(blob_len) => blob_len + .try_into() + .ok() + .and_then(|blob_len| (input.split_at_checked(blob_len))) + .unwrap_or_else(|| (&input[..], &[][..])), + _ => { + log::debug!(target: LOG_TARGET, "Failed to extract polkavm blob length"); + (&input[..], &[][..]) + }, + }; + + // Dry run the call. + let result = crate::Pallet::::bare_instantiate( + T::RuntimeOrigin::signed(origin), + value, + gas_limit, + storage_deposit_limit, + Code::Upload(code.to_vec()), + data.to_vec(), + None, + debug, + collect_events, + ); + + // Get the encoded size of the transaction. + let tx = TransactionLegacyUnsigned { + gas: max_gas_fee.into(), + nonce: nonce.into(), + value: value.into(), + input: input.clone().into(), + gas_price: GAS_PRICE.into(), + chain_id: Some(T::ChainId::get().into()), + ..Default::default() + }; + let eth_dispatch_call = crate::Call::::eth_transact { + payload: tx.dummy_signed_payload(), + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + }; + let encoded_len = utx_encoded_size(eth_dispatch_call); + + // Get the dispatch info of the call. + let dispatch_call: ::RuntimeCall = + crate::Call::::instantiate_with_code { + value, + gas_limit: result.gas_required, + storage_deposit_limit: result.storage_deposit.charge_or_zero(), + code: code.to_vec(), + data: data.to_vec(), + salt: None, + } + .into(); + let dispatch_info = dispatch_call.get_dispatch_info(); + + // Compute the fee. + let fee = pallet_transaction_payment::Pallet::::compute_fee( + encoded_len, + &dispatch_info, + 0u32.into(), + ) + .into(); + + log::debug!(target: LOG_TARGET, "Call dry run Result: dispatch_info: {dispatch_info:?} len: {encoded_len:?} fee: {fee:?}"); + EthContractResult { + gas_required: result.gas_required, + storage_deposit: result.storage_deposit.charge_or_zero(), + result: result.result.map(|v| v.result.data), + fee, + } + } + } + /// A generalized version of [`Self::upload_code`]. /// /// It is identical to [`Self::upload_code`] and only differs in the information it returns. @@ -1199,7 +1469,7 @@ sp_api::decl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> ContractExecResult; + ) -> ContractResult; /// Instantiate a new contract. /// @@ -1212,7 +1482,20 @@ sp_api::decl_runtime_apis! { code: Code, data: Vec, salt: Option<[u8; 32]>, - ) -> ContractInstantiateResult; + ) -> ContractResult; + + + /// Perform an Ethereum call. + /// + /// See [`crate::Pallet::bare_eth_transact`] + fn eth_transact( + origin: H160, + dest: Option, + value: Balance, + input: Vec, + gas_limit: Option, + storage_deposit_limit: Option, + ) -> EthContractResult; /// Upload new code without instantiating a contract from it. /// diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index 67bc144c3dd233924928695afdbe5a5057ce46a9..af0100d59cbeaad57833fafeaf46dd66a0307751 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -76,19 +76,24 @@ pub struct ContractResult { /// RPC calls. pub debug_message: Vec, /// The execution result of the wasm code. - pub result: R, + pub result: Result, /// The events that were emitted during execution. It is an option as event collection is /// optional. pub events: Option>, } -/// Result type of a `bare_call` call as well as `ContractsApi::call`. -pub type ContractExecResult = - ContractResult, Balance, EventRecord>; - -/// Result type of a `bare_instantiate` call as well as `ContractsApi::instantiate`. -pub type ContractInstantiateResult = - ContractResult, Balance, EventRecord>; +/// The result of the execution of a `eth_transact` call. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct EthContractResult { + /// The fee charged for the execution. + pub fee: Balance, + /// The amount of gas that was necessary to execute the transaction. + pub gas_required: Weight, + /// Storage deposit charged. + pub storage_deposit: Balance, + /// The execution result. + pub result: Result, DispatchError>, +} /// Result type of a `bare_code_upload` call. pub type CodeUploadResult = Result, DispatchError>; diff --git a/substrate/frame/revive/src/test_utils.rs b/substrate/frame/revive/src/test_utils.rs index 92c21297a3ece617175358ef3f95f6d34ba3bff2..acd9a4cda38a808f55aa37bc858f45ecd0484674 100644 --- a/substrate/frame/revive/src/test_utils.rs +++ b/substrate/frame/revive/src/test_utils.rs @@ -27,38 +27,35 @@ use frame_support::weights::Weight; use sp_core::H160; pub use sp_runtime::AccountId32; -const fn ee_suffix(addr: H160) -> AccountId32 { - let mut id = [0u8; 32]; - let mut i = 0; - while i < 20 { - id[i] = addr.0[i]; +const fn ee_suffix(mut account: [u8; 32]) -> AccountId32 { + let mut i = 20; + while i < 32 { + account[i] = 0xee; i += 1; } - - let mut j = 20; - while j < 32 { - id[j] = 0xee; - j += 1; - } - - AccountId32::new(id) + AccountId32::new(account) } -pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const ALICE: AccountId32 = ee_suffix([1u8; 32]); pub const ALICE_ADDR: H160 = H160([1u8; 20]); -pub const ETH_ALICE: AccountId32 = ee_suffix(ALICE_ADDR); +pub const ALICE_FALLBACK: AccountId32 = ee_suffix([1u8; 32]); -pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const BOB: AccountId32 = ee_suffix([2u8; 32]); pub const BOB_ADDR: H160 = H160([2u8; 20]); -pub const BOB_CONTRACT_ID: AccountId32 = ee_suffix(BOB_ADDR); +pub const BOB_FALLBACK: AccountId32 = ee_suffix([2u8; 32]); -pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const CHARLIE: AccountId32 = ee_suffix([3u8; 32]); pub const CHARLIE_ADDR: H160 = H160([3u8; 20]); -pub const CHARLIE_CONTRACT_ID: AccountId32 = ee_suffix(CHARLIE_ADDR); +pub const CHARLIE_FALLBACK: AccountId32 = ee_suffix([3u8; 32]); -pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); +pub const DJANGO: AccountId32 = ee_suffix([4u8; 32]); pub const DJANGO_ADDR: H160 = H160([4u8; 20]); -pub const ETH_DJANGO: AccountId32 = ee_suffix(DJANGO_ADDR); +pub const DJANGO_FALLBACK: AccountId32 = ee_suffix([4u8; 32]); + +/// Eve is a non ee account and hence needs a stateful mapping. +pub const EVE: AccountId32 = AccountId32::new([5u8; 32]); +pub const EVE_ADDR: H160 = H160([5u8; 20]); +pub const EVE_FALLBACK: AccountId32 = ee_suffix([5u8; 32]); pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 1024); diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index d361590df95ae8d1b3aedaec391c4b5f772e266e..e64f588944326720af153920c86baa1e59961790 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -17,9 +17,8 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ - address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, - ContractExecResult, ContractInstantiateResult, DebugInfo, EventRecordOf, ExecReturnValue, - InstantiateReturnValue, OriginFor, Pallet, Weight, + address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult, + DebugInfo, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, }; use frame_support::pallet_prelude::DispatchResultWithPostInfo; use paste::paste; @@ -140,7 +139,7 @@ builder!( salt: Option<[u8; 32]>, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractInstantiateResult, EventRecordOf>; + ) -> ContractResult, EventRecordOf>; /// Build the instantiate call and unwrap the result. pub fn build_and_unwrap_result(self) -> InstantiateReturnValue { @@ -203,7 +202,7 @@ builder!( data: Vec, debug: DebugInfo, collect_events: CollectEvents, - ) -> ContractExecResult, EventRecordOf>; + ) -> ContractResult, EventRecordOf>; /// Build the call and unwrap the result. pub fn build_and_unwrap_result(self) -> ExecReturnValue { diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 4816e65f8f5c3951c91775fbd1726e764ea51ec1..1b5e64739d88f3ed9ff2df21431c7e8207e0eee0 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -39,9 +39,9 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::Memory, weights::WeightInfo, - BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, - DefaultAddressMapper, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, PristineCode, - H160, + AccountId32Mapper, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo, + ContractInfoOf, DebugInfo, DeletionQueueCounter, Error, HoldReason, Origin, Pallet, + PristineCode, H160, }; use crate::test_utils::builder::Contract; @@ -58,16 +58,17 @@ use frame_support::{ tokens::Preservation, ConstU32, ConstU64, Contains, OnIdle, OnInitialize, StorageVersion, }, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight, WeightMeter}, + weights::{constants::WEIGHT_REF_TIME_PER_SECOND, FixedFee, IdentityFee, Weight, WeightMeter}, }; use frame_system::{EventRecord, Phase}; use pallet_revive_fixtures::{bench::dummy_unique, compile_module}; use pallet_revive_uapi::ReturnErrorCode as RuntimeReturnCode; +use pallet_transaction_payment::{ConstFeeMultiplier, Multiplier}; use sp_io::hashing::blake2_256; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Convert, IdentityLookup}, + traits::{BlakeTwo256, Convert, IdentityLookup, One}, AccountId32, BuildStorage, DispatchError, Perbill, TokenError, }; @@ -82,6 +83,7 @@ frame_support::construct_runtime!( Utility: pallet_utility, Contracts: pallet_revive, Proxy: pallet_proxy, + TransactionPayment: pallet_transaction_payment, Dummy: pallet_dummy } ); @@ -112,7 +114,8 @@ pub mod test_utils { pub fn place_contract(address: &AccountIdOf, code_hash: sp_core::H256) { set_balance(address, Contracts::min_balance() * 10); >::insert(code_hash, CodeInfo::new(address.clone())); - let address = ::AddressMapper::to_address(&address); + let address = + <::AddressMapper as AddressMapper>::to_address(&address); let contract = >::new(&address, 0, code_hash).unwrap(); >::insert(address, contract); } @@ -415,6 +418,18 @@ impl pallet_proxy::Config for Test { type AnnouncementDepositFactor = ConstU64<1>; } +parameter_types! { + pub FeeMultiplier: Multiplier = Multiplier::one(); +} + +#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter; + type WeightToFee = IdentityFee<::Balance>; + type LengthToFee = FixedFee<100, ::Balance>; + type FeeMultiplierUpdate = ConstFeeMultiplier; +} + impl pallet_dummy::Config for Test {} parameter_types! { @@ -494,13 +509,13 @@ parameter_types! { #[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { type Time = Timestamp; + type AddressMapper = AccountId32Mapper; type Currency = Balances; type CallFilter = TestFilter; type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; - type AddressMapper = DefaultAddressMapper; type UnsafeUnstableInterface = UnstableInterface; type UploadOrigin = EnsureAccount; type InstantiateOrigin = EnsureAccount; @@ -509,6 +524,17 @@ impl Config for Test { type ChainId = ChainId; } +impl TryFrom for crate::Call { + type Error = (); + + fn try_from(value: RuntimeCall) -> Result { + match value { + RuntimeCall::Contracts(call) => Ok(call), + _ => Err(()), + } + } +} + pub struct ExtBuilder { existential_deposit: u64, storage_version: Option, @@ -610,9 +636,9 @@ mod run_tests { ExtBuilder::default().build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 100_000_000); assert!(!>::contains_key(BOB_ADDR)); - assert_eq!(test_utils::get_balance(&BOB_CONTRACT_ID), 0); + assert_eq!(test_utils::get_balance(&BOB_FALLBACK), 0); let result = builder::bare_call(BOB_ADDR).value(42).build_and_unwrap_result(); - assert_eq!(test_utils::get_balance(&BOB_CONTRACT_ID), 42); + assert_eq!(test_utils::get_balance(&BOB_FALLBACK), 42); assert_eq!(result, Default::default()); }); } @@ -727,15 +753,16 @@ mod run_tests { )); assert_eq!(System::account_nonce(&ALICE), 0); + System::inc_account_nonce(&ALICE); - for nonce in 0..3 { + for nonce in 1..3 { let Contract { addr, .. } = builder::bare_instantiate(Code::Existing(code_hash)) .salt(None) .build_and_unwrap_contract(); assert!(ContractInfoOf::::contains_key(&addr)); assert_eq!( addr, - create1(&::AddressMapper::to_address(&ALICE), nonce) + create1(&::AddressMapper::to_address(&ALICE), nonce - 1) ); } assert_eq!(System::account_nonce(&ALICE), 3); @@ -747,7 +774,7 @@ mod run_tests { assert!(ContractInfoOf::::contains_key(&addr)); assert_eq!( addr, - create1(&::AddressMapper::to_address(&ALICE), nonce) + create1(&::AddressMapper::to_address(&ALICE), nonce - 1) ); } assert_eq!(System::account_nonce(&ALICE), 6); @@ -1276,7 +1303,7 @@ mod run_tests { let (wasm, code_hash) = compile_module("self_destruct").unwrap(); ExtBuilder::default().existential_deposit(1_000).build().execute_with(|| { let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(Ð_DJANGO, 1_000_000); + let _ = ::Currency::set_balance(&DJANGO_FALLBACK, 1_000_000); let min_balance = Contracts::min_balance(); // Instantiate the BOB contract. @@ -1304,7 +1331,7 @@ mod run_tests { // Check that the beneficiary (django) got remaining balance. assert_eq!( - ::Currency::free_balance(ETH_DJANGO), + ::Currency::free_balance(DJANGO_FALLBACK), 1_000_000 + 100_000 + min_balance ); @@ -1356,7 +1383,7 @@ mod run_tests { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: contract.account_id.clone(), - to: ETH_DJANGO, + to: DJANGO_FALLBACK, amount: 100_000 + min_balance, }), topics: vec![], @@ -1519,7 +1546,7 @@ mod run_tests { // Sending at least the minimum balance should result in success but // no code called. - assert_eq!(test_utils::get_balance(Ð_DJANGO), 0); + assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 0); let result = builder::bare_call(bob.addr) .data( AsRef::<[u8]>::as_ref(&DJANGO_ADDR) @@ -1530,7 +1557,7 @@ mod run_tests { ) .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::Success); - assert_eq!(test_utils::get_balance(Ð_DJANGO), 55); + assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 55); let django = builder::bare_instantiate(Code::Upload(callee_code)) .origin(RuntimeOrigin::signed(CHARLIE)) @@ -3706,7 +3733,7 @@ mod run_tests { // Instantiate the caller contract with the given input. let instantiate = |input: &(u32, H256)| { builder::bare_instantiate(Code::Upload(wasm_caller.clone())) - .origin(RuntimeOrigin::signed(ETH_ALICE)) + .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) .data(input.encode()) .build() }; @@ -3714,13 +3741,13 @@ mod run_tests { // Call contract with the given input. let call = |addr_caller: &H160, input: &(u32, H256)| { builder::bare_call(*addr_caller) - .origin(RuntimeOrigin::signed(ETH_ALICE)) + .origin(RuntimeOrigin::signed(ALICE_FALLBACK)) .data(input.encode()) .build() }; const ED: u64 = 2000; ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { - let _ = Balances::set_balance(Ð_ALICE, 1_000_000); + let _ = Balances::set_balance(&ALICE_FALLBACK, 1_000_000); // Instantiate with lock_delegate_dependency should fail since the code is not yet on // chain. @@ -3734,7 +3761,7 @@ mod run_tests { for code in callee_codes.iter() { let CodeUploadReturnValue { deposit: deposit_per_code, .. } = Contracts::bare_upload_code( - RuntimeOrigin::signed(ETH_ALICE), + RuntimeOrigin::signed(ALICE_FALLBACK), code.clone(), deposit_limit::(), ) @@ -3764,7 +3791,7 @@ mod run_tests { // Removing the code should fail, since we have added a dependency. assert_err!( - Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0]), + Contracts::remove_code(RuntimeOrigin::signed(ALICE_FALLBACK), callee_hashes[0]), >::CodeInUse ); @@ -3822,14 +3849,17 @@ mod run_tests { ); // Since we unlocked the dependency we should now be able to remove the code. - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0])); + assert_ok!(Contracts::remove_code( + RuntimeOrigin::signed(ALICE_FALLBACK), + callee_hashes[0] + )); // Calling should fail since the delegated contract is not on chain anymore. assert_err!(call(&addr_caller, &noop_input).result, Error::::ContractTrapped); // Add the dependency back. Contracts::upload_code( - RuntimeOrigin::signed(ETH_ALICE), + RuntimeOrigin::signed(ALICE_FALLBACK), callee_codes[0].clone(), deposit_limit::(), ) @@ -3837,15 +3867,18 @@ mod run_tests { call(&addr_caller, &lock_delegate_dependency_input).result.unwrap(); // Call terminate should work, and return the deposit. - let balance_before = test_utils::get_balance(Ð_ALICE); + let balance_before = test_utils::get_balance(&ALICE_FALLBACK); assert_ok!(call(&addr_caller, &terminate_input).result); assert_eq!( - test_utils::get_balance(Ð_ALICE), + test_utils::get_balance(&ALICE_FALLBACK), ED + balance_before + contract.storage_base_deposit() + dependency_deposit ); // Terminate should also remove the dependency, so we can remove the code. - assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ETH_ALICE), callee_hashes[0])); + assert_ok!(Contracts::remove_code( + RuntimeOrigin::signed(ALICE_FALLBACK), + callee_hashes[0] + )); }); } @@ -4083,13 +4116,13 @@ mod run_tests { let (wasm, _code_hash) = compile_module("balance_of").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { let _ = Balances::set_balance(&ALICE, 1_000_000); - let _ = Balances::set_balance(Ð_ALICE, 1_000_000); + let _ = Balances::set_balance(&ALICE_FALLBACK, 1_000_000); let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm.to_vec())).build_and_unwrap_contract(); // The fixture asserts a non-zero returned free balance of the account; - // The ETH_ALICE account is endowed; + // The ALICE_FALLBACK account is endowed; // Hence we should not revert assert_ok!(builder::call(addr).data(ALICE_ADDR.0.to_vec()).build()); @@ -4442,4 +4475,106 @@ mod run_tests { ); }); } + + #[test] + fn code_hash_works() { + let (code_hash_code, self_code_hash) = compile_module("code_hash").unwrap(); + let (dummy_code, code_hash) = compile_module("dummy").unwrap(); + + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code_hash_code)).build_and_unwrap_contract(); + let Contract { addr: dummy_addr, .. } = + builder::bare_instantiate(Code::Upload(dummy_code)).build_and_unwrap_contract(); + + // code hash of dummy contract + assert_ok!(builder::call(addr).data((dummy_addr, code_hash).encode()).build()); + // code has of itself + assert_ok!(builder::call(addr).data((addr, self_code_hash).encode()).build()); + + // EOA doesn't exists + assert_err!( + builder::bare_call(addr) + .data((BOB_ADDR, crate::exec::EMPTY_CODE_HASH).encode()) + .build() + .result, + Error::::ContractTrapped + ); + // non-existing will return zero + assert_ok!(builder::call(addr).data((BOB_ADDR, H256::zero()).encode()).build()); + + // create EOA + let _ = ::Currency::set_balance( + &::AddressMapper::to_account_id(&BOB_ADDR), + 1_000_000, + ); + + // EOA returns empty code hash + assert_ok!(builder::call(addr) + .data((BOB_ADDR, crate::exec::EMPTY_CODE_HASH).encode()) + .build()); + }); + } + + #[test] + fn origin_must_be_mapped() { + let (code, hash) = compile_module("dummy").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + ::Currency::set_balance(&EVE, 1_000_000); + + let eve = RuntimeOrigin::signed(EVE); + + // alice can instantiate as she doesn't need a mapping + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // without a mapping eve can neither call nor instantiate + assert_err!( + builder::bare_call(addr).origin(eve.clone()).build().result, + >::AccountUnmapped + ); + assert_err!( + builder::bare_instantiate(Code::Existing(hash)) + .origin(eve.clone()) + .build() + .result, + >::AccountUnmapped + ); + + // after mapping eve is usable as an origin + >::map_account(eve.clone()).unwrap(); + assert_ok!(builder::bare_call(addr).origin(eve.clone()).build().result); + assert_ok!(builder::bare_instantiate(Code::Existing(hash)).origin(eve).build().result); + }); + } + + #[test] + fn mapped_address_works() { + let (code, _) = compile_module("terminate_and_send_to_eve").unwrap(); + + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + ::Currency::set_balance(&ALICE, 1_000_000); + + // without a mapping everything will be send to the fallback account + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code.clone())).build_and_unwrap_contract(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 0); + builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 100); + + // after mapping it will be sent to the real eve account + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + // need some balance to pay for the map deposit + ::Currency::set_balance(&EVE, 1_000); + >::map_account(RuntimeOrigin::signed(EVE)).unwrap(); + builder::bare_call(addr).build_and_unwrap_result(); + assert_eq!(::Currency::total_balance(&EVE_FALLBACK), 100); + assert_eq!(::Currency::total_balance(&EVE), 1_100); + }); + } } diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index e2256d7dcea77a384268c054803ab92f24c41de0..2b8022903849bf4dfa88ba4356d409a0ad28a075 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -200,7 +200,10 @@ where &self.code_info.owner, deposit, ) - .map_err(|_| >::StorageDepositNotEnoughFunds)?; + .map_err(|err| { + log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner); + >::StorageDepositNotEnoughFunds + })?; self.code_info.refcount = 0; >::insert(code_hash, &self.code); diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index 245c91278a7f80343f592a8f7dec152ae12094db..00be26aeaf8b10cf79e6b76487ee1cc27b9df99c 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -44,11 +44,6 @@ type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; -/// Encode a `U256` into a 32 byte buffer. -fn as_bytes(u: U256) -> [u8; 32] { - u.to_little_endian() -} - #[derive(Clone, Copy)] pub enum ApiVersion { /// Expose all APIs even unversioned ones. Only used for testing and benchmarking. @@ -643,7 +638,7 @@ impl<'a, E: Ext, M: ?Sized + Memory> Runtime<'a, E, M> { run: impl FnOnce(&mut Self) -> DispatchResultWithPostInfo, ) -> Result { use frame_support::dispatch::extract_actual_weight; - let charged = self.charge_gas(runtime_cost(dispatch_info.weight))?; + let charged = self.charge_gas(runtime_cost(dispatch_info.call_weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); self.adjust_gas(charged, runtime_cost(actual_weight)); @@ -1406,27 +1401,17 @@ pub mod env { /// Retrieve the code hash for a specified contract address. /// See [`pallet_revive_uapi::HostFn::code_hash`]. #[api_version(0)] - fn code_hash( - &mut self, - memory: &mut M, - addr_ptr: u32, - out_ptr: u32, - ) -> Result { + fn code_hash(&mut self, memory: &mut M, addr_ptr: u32, out_ptr: u32) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::CodeHash)?; let mut address = H160::zero(); memory.read_into_buf(addr_ptr, address.as_bytes_mut())?; - if let Some(value) = self.ext.code_hash(&address) { - self.write_fixed_sandbox_output( - memory, - out_ptr, - &value.as_bytes(), - false, - already_charged, - )?; - Ok(ReturnErrorCode::Success) - } else { - Ok(ReturnErrorCode::KeyNotFound) - } + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &self.ext.code_hash(&address).as_bytes(), + false, + already_charged, + )?) } /// Retrieve the code hash of the currently executing contract. @@ -1555,7 +1540,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.balance()), + &self.ext.balance().to_little_endian(), false, already_charged, )?) @@ -1576,7 +1561,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.balance_of(&address)), + &self.ext.balance_of(&address).to_little_endian(), false, already_charged, )?) @@ -1589,7 +1574,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(U256::from(::ChainId::get())), + &U256::from(::ChainId::get()).to_little_endian(), false, |_| Some(RuntimeCosts::CopyToContract(32)), )?) @@ -1603,7 +1588,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.value_transferred()), + &self.ext.value_transferred().to_little_endian(), false, already_charged, )?) @@ -1617,7 +1602,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.now()), + &self.ext.now().to_little_endian(), false, already_charged, )?) @@ -1631,7 +1616,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.minimum_balance()), + &self.ext.minimum_balance().to_little_endian(), false, already_charged, )?) @@ -1685,7 +1670,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(self.ext.block_number()), + &self.ext.block_number().to_little_endian(), false, already_charged, )?) @@ -1845,7 +1830,7 @@ pub mod env { let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = self.ext.gas_meter().gas_left().max(execute_weight); - let dispatch_info = DispatchInfo { weight, ..Default::default() }; + let dispatch_info = DispatchInfo { call_weight: weight, ..Default::default() }; self.call_dispatchable::( dispatch_info, @@ -2043,7 +2028,7 @@ pub mod env { Ok(self.write_fixed_sandbox_output( memory, out_ptr, - &as_bytes(U256::from(self.ext.last_frame_output().data.len())), + &U256::from(self.ext.last_frame_output().data.len()).to_little_endian(), false, |len| Some(RuntimeCosts::CopyToContract(len)), )?) diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index 9a1b2310b4eb98c3b3e9c7206c5e60414b6ea471..3203a0cba9fb9da6fbd9303fde5107fbc25e4f5d 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_revive` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-10-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-10-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-jniz7bxx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-dr4vwrkf-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -58,6 +58,9 @@ pub trait WeightInfo { fn upload_code(c: u32, ) -> Weight; fn remove_code() -> Weight; fn set_code() -> Weight; + fn map_account() -> Weight; + fn unmap_account() -> Weight; + fn dispatch_as_fallback_account() -> Weight; fn noop_host_fn(r: u32, ) -> Weight; fn seal_caller() -> Weight; fn seal_is_contract() -> Weight; @@ -126,8 +129,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_712_000 picoseconds. - Weight::from_parts(2_882_000, 1594) + // Minimum execution time: 3_053_000 picoseconds. + Weight::from_parts(3_150_000, 1594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -135,18 +138,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + k * (69 ±0)` - // Estimated: `382 + k * (70 ±0)` - // Minimum execution time: 13_394_000 picoseconds. - Weight::from_parts(13_668_000, 382) - // Standard Error: 2_208 - .saturating_add(Weight::from_parts(1_340_842, 0).saturating_mul(k.into())) + // Measured: `425 + k * (69 ±0)` + // Estimated: `415 + k * (70 ±0)` + // Minimum execution time: 15_219_000 picoseconds. + Weight::from_parts(12_576_960, 415) + // Standard Error: 1_429 + .saturating_add(Weight::from_parts(1_341_896, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -158,21 +163,21 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn call_with_code_per_byte(c: u32, ) -> Weight { + fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1466` - // Estimated: `4931` - // Minimum execution time: 80_390_000 picoseconds. - Weight::from_parts(83_627_295, 4931) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `1519` + // Estimated: `7459` + // Minimum execution time: 88_906_000 picoseconds. + Weight::from_parts(93_353_224, 7459) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -185,19 +190,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `303` - // Estimated: `6232` - // Minimum execution time: 184_708_000 picoseconds. - Weight::from_parts(177_995_416, 6232) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_609, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + // Measured: `416` + // Estimated: `6360` + // Minimum execution time: 202_688_000 picoseconds. + Weight::from_parts(197_366_807, 6360) + // Standard Error: 13 + .saturating_add(Weight::from_parts(4_261, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -205,19 +212,21 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// The range of component `i` is `[0, 262144]`. fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1261` - // Estimated: `4706` - // Minimum execution time: 150_137_000 picoseconds. - Weight::from_parts(136_548_469, 4706) + // Measured: `1313` + // Estimated: `4779` + // Minimum execution time: 169_246_000 picoseconds. + Weight::from_parts(149_480_457, 4779) // Standard Error: 16 - .saturating_add(Weight::from_parts(4_531, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(4_041, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -230,17 +239,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1466` - // Estimated: `4931` - // Minimum execution time: 83_178_000 picoseconds. - Weight::from_parts(84_633_000, 4931) - .saturating_add(T::DbWeight::get().reads(5_u64)) + // Measured: `1519` + // Estimated: `7459` + // Minimum execution time: 91_129_000 picoseconds. + Weight::from_parts(94_220_000, 7459) + .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. @@ -248,23 +257,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 51_526_000 picoseconds. - Weight::from_parts(54_565_973, 3574) + // Minimum execution time: 54_849_000 picoseconds. + Weight::from_parts(57_508_591, 3574) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 41_885_000 picoseconds. - Weight::from_parts(42_467_000, 3750) + // Minimum execution time: 45_017_000 picoseconds. + Weight::from_parts(46_312_000, 3750) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -274,113 +283,153 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `492` - // Estimated: `6432` - // Minimum execution time: 24_905_000 picoseconds. - Weight::from_parts(25_483_000, 6432) + // Measured: `529` + // Estimated: `6469` + // Minimum execution time: 26_992_000 picoseconds. + Weight::from_parts(28_781_000, 6469) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + fn map_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 44_031_000 picoseconds. + Weight::from_parts(45_133_000, 3574) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:0 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + fn unmap_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 35_681_000 picoseconds. + Weight::from_parts(36_331_000, 3521) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) + fn dispatch_as_fallback_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 11_550_000 picoseconds. + Weight::from_parts(12_114_000, 3610) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } /// The range of component `r` is `[0, 1600]`. fn noop_host_fn(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_979_000 picoseconds. - Weight::from_parts(8_272_348, 0) - // Standard Error: 137 - .saturating_add(Weight::from_parts(172_489, 0).saturating_mul(r.into())) + // Minimum execution time: 7_063_000 picoseconds. + Weight::from_parts(7_671_454, 0) + // Standard Error: 105 + .saturating_add(Weight::from_parts(175_349, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 251_000 picoseconds. - Weight::from_parts(318_000, 0) + // Minimum execution time: 266_000 picoseconds. + Weight::from_parts(313_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `3737` - // Minimum execution time: 6_966_000 picoseconds. - Weight::from_parts(7_240_000, 3737) + // Measured: `306` + // Estimated: `3771` + // Minimum execution time: 7_397_000 picoseconds. + Weight::from_parts(7_967_000, 3771) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `369` - // Estimated: `3834` - // Minimum execution time: 7_589_000 picoseconds. - Weight::from_parts(7_958_000, 3834) + // Measured: `403` + // Estimated: `3868` + // Minimum execution time: 8_395_000 picoseconds. + Weight::from_parts(8_863_000, 3868) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 235_000 picoseconds. - Weight::from_parts(285_000, 0) + // Minimum execution time: 265_000 picoseconds. + Weight::from_parts(292_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 283_000 picoseconds. - Weight::from_parts(326_000, 0) + // Minimum execution time: 298_000 picoseconds. + Weight::from_parts(334_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(298_000, 0) + // Minimum execution time: 262_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 240_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 277_000 picoseconds. + Weight::from_parts(297_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 651_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 620_000 picoseconds. + Weight::from_parts(706_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_476_000 picoseconds. - Weight::from_parts(4_671_000, 0) + // Minimum execution time: 5_475_000 picoseconds. + Weight::from_parts(5_706_000, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 3_800_000 picoseconds. - Weight::from_parts(3_968_000, 3517) - .saturating_add(T::DbWeight::get().reads(1_u64)) + // Measured: `264` + // Estimated: `3729` + // Minimum execution time: 9_141_000 picoseconds. + Weight::from_parts(9_674_000, 3729) + .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) /// The range of component `n` is `[1, 4096]`. fn seal_get_immutable_data(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `205 + n * (1 ±0)` - // Estimated: `3670 + n * (1 ±0)` - // Minimum execution time: 5_845_000 picoseconds. - Weight::from_parts(6_473_478, 3670) - // Standard Error: 4 - .saturating_add(Weight::from_parts(651, 0).saturating_mul(n.into())) + // Measured: `238 + n * (1 ±0)` + // Estimated: `3703 + n * (1 ±0)` + // Minimum execution time: 6_443_000 picoseconds. + Weight::from_parts(7_252_595, 3703) + // Standard Error: 12 + .saturating_add(Weight::from_parts(915, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -391,39 +440,39 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_980_000 picoseconds. - Weight::from_parts(2_324_567, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(512, 0).saturating_mul(n.into())) + // Minimum execution time: 2_745_000 picoseconds. + Weight::from_parts(3_121_250, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(627, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 259_000 picoseconds. - Weight::from_parts(285_000, 0) + // Minimum execution time: 255_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 244_000 picoseconds. - Weight::from_parts(291_000, 0) + // Minimum execution time: 235_000 picoseconds. + Weight::from_parts(261_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 252_000 picoseconds. - Weight::from_parts(291_000, 0) + // Minimum execution time: 249_000 picoseconds. + Weight::from_parts(263_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 245_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 287_000 picoseconds. + Weight::from_parts(300_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -431,8 +480,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 5_650_000 picoseconds. - Weight::from_parts(5_783_000, 1552) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_562_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 262140]`. @@ -440,21 +489,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 427_000 picoseconds. - Weight::from_parts(351_577, 0) + // Minimum execution time: 453_000 picoseconds. + Weight::from_parts(548_774, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 265_000 picoseconds. - Weight::from_parts(746_316, 0) + // Minimum execution time: 264_000 picoseconds. + Weight::from_parts(490_374, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(236, 0).saturating_mul(n.into())) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::DeletionQueueCounter` (r:1 w:1) /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:33 w:33) @@ -466,13 +517,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + n * (88 ±0)` - // Estimated: `3737 + n * (2563 ±0)` - // Minimum execution time: 15_988_000 picoseconds. - Weight::from_parts(18_796_705, 3737) - // Standard Error: 10_437 - .saturating_add(Weight::from_parts(4_338_085, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `323 + n * (88 ±0)` + // Estimated: `3788 + n * (2563 ±0)` + // Minimum execution time: 22_833_000 picoseconds. + Weight::from_parts(24_805_620, 3788) + // Standard Error: 9_498 + .saturating_add(Weight::from_parts(4_486_714, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -484,22 +535,22 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_237_000 picoseconds. - Weight::from_parts(4_128_112, 0) - // Standard Error: 2_947 - .saturating_add(Weight::from_parts(198_825, 0).saturating_mul(t.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(1_007, 0).saturating_mul(n.into())) + // Minimum execution time: 4_969_000 picoseconds. + Weight::from_parts(4_994_916, 0) + // Standard Error: 3_727 + .saturating_add(Weight::from_parts(188_374, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(925, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 292_000 picoseconds. - Weight::from_parts(1_297_376, 0) + // Minimum execution time: 328_000 picoseconds. + Weight::from_parts(928_905, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(724, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(753, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -507,8 +558,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 7_812_000 picoseconds. - Weight::from_parts(8_171_000, 744) + // Minimum execution time: 8_612_000 picoseconds. + Weight::from_parts(9_326_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -517,8 +568,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 44_179_000 picoseconds. - Weight::from_parts(45_068_000, 10754) + // Minimum execution time: 44_542_000 picoseconds. + Weight::from_parts(45_397_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -527,8 +578,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 8_964_000 picoseconds. - Weight::from_parts(9_336_000, 744) + // Minimum execution time: 10_343_000 picoseconds. + Weight::from_parts(10_883_000, 744) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -538,8 +589,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 45_606_000 picoseconds. - Weight::from_parts(47_190_000, 10754) + // Minimum execution time: 46_835_000 picoseconds. + Weight::from_parts(47_446_000, 10754) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -551,12 +602,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_077_000 picoseconds. - Weight::from_parts(9_823_489, 247) - // Standard Error: 54 - .saturating_add(Weight::from_parts(392, 0).saturating_mul(n.into())) - // Standard Error: 54 - .saturating_add(Weight::from_parts(408, 0).saturating_mul(o.into())) + // Minimum execution time: 10_604_000 picoseconds. + Weight::from_parts(11_282_849, 247) + // Standard Error: 48 + .saturating_add(Weight::from_parts(496, 0).saturating_mul(n.into())) + // Standard Error: 48 + .saturating_add(Weight::from_parts(764, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -568,10 +619,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_812_000 picoseconds. - Weight::from_parts(9_626_925, 247) - // Standard Error: 77 - .saturating_add(Weight::from_parts(269, 0).saturating_mul(n.into())) + // Minimum execution time: 10_081_000 picoseconds. + Weight::from_parts(11_186_557, 247) + // Standard Error: 68 + .saturating_add(Weight::from_parts(782, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -583,10 +634,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_143_000 picoseconds. - Weight::from_parts(9_229_363, 247) - // Standard Error: 77 - .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(n.into())) + // Minimum execution time: 8_758_000 picoseconds. + Weight::from_parts(9_939_492, 247) + // Standard Error: 69 + .saturating_add(Weight::from_parts(1_703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -597,10 +648,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 7_899_000 picoseconds. - Weight::from_parts(8_591_860, 247) - // Standard Error: 56 - .saturating_add(Weight::from_parts(461, 0).saturating_mul(n.into())) + // Minimum execution time: 8_525_000 picoseconds. + Weight::from_parts(9_522_265, 247) + // Standard Error: 66 + .saturating_add(Weight::from_parts(426, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -611,10 +662,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_198_528, 247) - // Standard Error: 75 - .saturating_add(Weight::from_parts(1_521, 0).saturating_mul(n.into())) + // Minimum execution time: 10_603_000 picoseconds. + Weight::from_parts(11_817_752, 247) + // Standard Error: 82 + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -623,36 +674,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_406_000 picoseconds. - Weight::from_parts(1_515_000, 0) + // Minimum execution time: 1_553_000 picoseconds. + Weight::from_parts(1_615_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_782_000 picoseconds. - Weight::from_parts(1_890_000, 0) + // Minimum execution time: 1_932_000 picoseconds. + Weight::from_parts(2_064_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_380_000 picoseconds. - Weight::from_parts(1_422_000, 0) + // Minimum execution time: 1_510_000 picoseconds. + Weight::from_parts(1_545_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_504_000 picoseconds. - Weight::from_parts(1_583_000, 0) + // Minimum execution time: 1_663_000 picoseconds. + Weight::from_parts(1_801_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_045_000 picoseconds. - Weight::from_parts(1_138_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_137_000, 0) } /// The range of component `n` is `[0, 512]`. /// The range of component `o` is `[0, 512]`. @@ -660,60 +711,63 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_039_000 picoseconds. - Weight::from_parts(2_317_406, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(283, 0).saturating_mul(n.into())) - // Standard Error: 13 - .saturating_add(Weight::from_parts(347, 0).saturating_mul(o.into())) + // Minimum execution time: 2_446_000 picoseconds. + Weight::from_parts(2_644_525, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(113, 0).saturating_mul(n.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(179, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(2_251_392, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(313, 0).saturating_mul(n.into())) + // Minimum execution time: 2_085_000 picoseconds. + Weight::from_parts(2_379_853, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(366, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_763_000 picoseconds. - Weight::from_parts(1_951_912, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(268, 0).saturating_mul(n.into())) + // Minimum execution time: 1_876_000 picoseconds. + Weight::from_parts(2_073_689, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(376, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_536_000 picoseconds. - Weight::from_parts(1_779_085, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(148, 0).saturating_mul(n.into())) + // Minimum execution time: 1_688_000 picoseconds. + Weight::from_parts(1_914_470, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(125, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. - fn seal_take_transient_storage(n: u32, ) -> Weight { + fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_343_000 picoseconds. - Weight::from_parts(2_587_750, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) + // Minimum execution time: 2_479_000 picoseconds. + Weight::from_parts(2_758_250, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn seal_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `0` - // Minimum execution time: 9_250_000 picoseconds. - Weight::from_parts(9_637_000, 0) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 15_745_000 picoseconds. + Weight::from_parts(16_300_000, 3817) + .saturating_add(T::DbWeight::get().reads(1_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -724,17 +778,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1221 + t * (103 ±0)` - // Estimated: `4686 + t * (103 ±0)` - // Minimum execution time: 33_333_000 picoseconds. - Weight::from_parts(34_378_774, 4686) - // Standard Error: 41_131 - .saturating_add(Weight::from_parts(1_756_626, 0).saturating_mul(t.into())) + // Measured: `1309 + t * (140 ±0)` + // Estimated: `4774 + t * (140 ±0)` + // Minimum execution time: 39_639_000 picoseconds. + Weight::from_parts(40_909_376, 4774) + // Standard Error: 54_479 + .saturating_add(Weight::from_parts(1_526_185, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(4, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 103).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 140).saturating_mul(t.into())) } /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -742,10 +796,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` - // Estimated: `4513` - // Minimum execution time: 27_096_000 picoseconds. - Weight::from_parts(27_934_000, 4513) + // Measured: `1081` + // Estimated: `4546` + // Minimum execution time: 29_651_000 picoseconds. + Weight::from_parts(31_228_000, 4546) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) @@ -759,12 +813,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1257` - // Estimated: `4715` - // Minimum execution time: 118_412_000 picoseconds. - Weight::from_parts(106_130_041, 4715) - // Standard Error: 12 - .saturating_add(Weight::from_parts(4_235, 0).saturating_mul(i.into())) + // Measured: `1327` + // Estimated: `4792` + // Minimum execution time: 126_995_000 picoseconds. + Weight::from_parts(114_028_446, 4792) + // Standard Error: 11 + .saturating_add(Weight::from_parts(3_781, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -773,73 +827,73 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 614_000 picoseconds. - Weight::from_parts(4_320_897, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_371, 0).saturating_mul(n.into())) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(973_524, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_048, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_062_000 picoseconds. - Weight::from_parts(4_571_371, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(3_572, 0).saturating_mul(n.into())) + // Minimum execution time: 1_118_000 picoseconds. + Weight::from_parts(795_498, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_260, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 609_000 picoseconds. - Weight::from_parts(4_008_056, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_497, 0).saturating_mul(n.into())) + // Minimum execution time: 647_000 picoseconds. + Weight::from_parts(667_024, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_183, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 608_000 picoseconds. - Weight::from_parts(3_839_383, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_504, 0).saturating_mul(n.into())) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(675_568, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_181, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_110_000 picoseconds. - Weight::from_parts(31_941_593, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(5_233, 0).saturating_mul(n.into())) + // Minimum execution time: 42_743_000 picoseconds. + Weight::from_parts(26_131_984, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(4_867, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_798_000 picoseconds. - Weight::from_parts(49_225_000, 0) + // Minimum execution time: 50_838_000 picoseconds. + Weight::from_parts(52_248_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_576_000 picoseconds. - Weight::from_parts(12_731_000, 0) + // Minimum execution time: 12_605_000 picoseconds. + Weight::from_parts(12_796_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `266` - // Estimated: `3731` - // Minimum execution time: 14_306_000 picoseconds. - Weight::from_parts(15_011_000, 3731) + // Measured: `300` + // Estimated: `3765` + // Minimum execution time: 16_377_000 picoseconds. + Weight::from_parts(16_932_000, 3765) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -847,10 +901,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `303` - // Estimated: `3768` - // Minimum execution time: 10_208_000 picoseconds. - Weight::from_parts(10_514_000, 3768) + // Measured: `338` + // Estimated: `3803` + // Minimum execution time: 11_499_000 picoseconds. + Weight::from_parts(12_104_000, 3803) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -858,10 +912,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `303` + // Measured: `338` // Estimated: `3561` - // Minimum execution time: 9_062_000 picoseconds. - Weight::from_parts(9_414_000, 3561) + // Minimum execution time: 10_308_000 picoseconds. + Weight::from_parts(11_000_000, 3561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -870,10 +924,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_074_000 picoseconds. - Weight::from_parts(9_646_158, 0) - // Standard Error: 58 - .saturating_add(Weight::from_parts(84_694, 0).saturating_mul(r.into())) + // Minimum execution time: 8_162_000 picoseconds. + Weight::from_parts(9_180_011, 0) + // Standard Error: 63 + .saturating_add(Weight::from_parts(84_822, 0).saturating_mul(r.into())) } } @@ -885,8 +939,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1594` - // Minimum execution time: 2_712_000 picoseconds. - Weight::from_parts(2_882_000, 1594) + // Minimum execution time: 3_053_000 picoseconds. + Weight::from_parts(3_150_000, 1594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -894,18 +948,20 @@ impl WeightInfo for () { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `392 + k * (69 ±0)` - // Estimated: `382 + k * (70 ±0)` - // Minimum execution time: 13_394_000 picoseconds. - Weight::from_parts(13_668_000, 382) - // Standard Error: 2_208 - .saturating_add(Weight::from_parts(1_340_842, 0).saturating_mul(k.into())) + // Measured: `425 + k * (69 ±0)` + // Estimated: `415 + k * (70 ±0)` + // Minimum execution time: 15_219_000 picoseconds. + Weight::from_parts(12_576_960, 415) + // Standard Error: 1_429 + .saturating_add(Weight::from_parts(1_341_896, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) .saturating_add(Weight::from_parts(0, 70).saturating_mul(k.into())) } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -917,21 +973,21 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn call_with_code_per_byte(c: u32, ) -> Weight { + fn call_with_code_per_byte(_c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1466` - // Estimated: `4931` - // Minimum execution time: 80_390_000 picoseconds. - Weight::from_parts(83_627_295, 4931) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `1519` + // Estimated: `7459` + // Minimum execution time: 88_906_000 picoseconds. + Weight::from_parts(93_353_224, 7459) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -944,19 +1000,21 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `303` - // Estimated: `6232` - // Minimum execution time: 184_708_000 picoseconds. - Weight::from_parts(177_995_416, 6232) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_609, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + // Measured: `416` + // Estimated: `6360` + // Minimum execution time: 202_688_000 picoseconds. + Weight::from_parts(197_366_807, 6360) + // Standard Error: 13 + .saturating_add(Weight::from_parts(4_261, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:1 w:0) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Timestamp::Now` (r:1 w:0) @@ -964,19 +1022,21 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// The range of component `i` is `[0, 262144]`. fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1261` - // Estimated: `4706` - // Minimum execution time: 150_137_000 picoseconds. - Weight::from_parts(136_548_469, 4706) + // Measured: `1313` + // Estimated: `4779` + // Minimum execution time: 169_246_000 picoseconds. + Weight::from_parts(149_480_457, 4779) // Standard Error: 16 - .saturating_add(Weight::from_parts(4_531, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(Weight::from_parts(4_041, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + /// Storage: `Revive::AddressSuffix` (r:2 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:1) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -989,17 +1049,17 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1466` - // Estimated: `4931` - // Minimum execution time: 83_178_000 picoseconds. - Weight::from_parts(84_633_000, 4931) - .saturating_add(RocksDbWeight::get().reads(5_u64)) + // Measured: `1519` + // Estimated: `7459` + // Minimum execution time: 91_129_000 picoseconds. + Weight::from_parts(94_220_000, 7459) + .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. @@ -1007,23 +1067,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `3574` - // Minimum execution time: 51_526_000 picoseconds. - Weight::from_parts(54_565_973, 3574) + // Minimum execution time: 54_849_000 picoseconds. + Weight::from_parts(57_508_591, 3574) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(337), added: 2812, mode: `Measured`) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: // Measured: `285` // Estimated: `3750` - // Minimum execution time: 41_885_000 picoseconds. - Weight::from_parts(42_467_000, 3750) + // Minimum execution time: 45_017_000 picoseconds. + Weight::from_parts(46_312_000, 3750) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1033,113 +1093,153 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `492` - // Estimated: `6432` - // Minimum execution time: 24_905_000 picoseconds. - Weight::from_parts(25_483_000, 6432) + // Measured: `529` + // Estimated: `6469` + // Minimum execution time: 26_992_000 picoseconds. + Weight::from_parts(28_781_000, 6469) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + fn map_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 44_031_000 picoseconds. + Weight::from_parts(45_133_000, 3574) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `Measured`) + /// Storage: `Revive::AddressSuffix` (r:0 w:1) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) + fn unmap_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 35_681_000 picoseconds. + Weight::from_parts(36_331_000, 3521) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) + fn dispatch_as_fallback_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 11_550_000 picoseconds. + Weight::from_parts(12_114_000, 3610) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } /// The range of component `r` is `[0, 1600]`. fn noop_host_fn(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_979_000 picoseconds. - Weight::from_parts(8_272_348, 0) - // Standard Error: 137 - .saturating_add(Weight::from_parts(172_489, 0).saturating_mul(r.into())) + // Minimum execution time: 7_063_000 picoseconds. + Weight::from_parts(7_671_454, 0) + // Standard Error: 105 + .saturating_add(Weight::from_parts(175_349, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 251_000 picoseconds. - Weight::from_parts(318_000, 0) + // Minimum execution time: 266_000 picoseconds. + Weight::from_parts(313_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `3737` - // Minimum execution time: 6_966_000 picoseconds. - Weight::from_parts(7_240_000, 3737) + // Measured: `306` + // Estimated: `3771` + // Minimum execution time: 7_397_000 picoseconds. + Weight::from_parts(7_967_000, 3771) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `369` - // Estimated: `3834` - // Minimum execution time: 7_589_000 picoseconds. - Weight::from_parts(7_958_000, 3834) + // Measured: `403` + // Estimated: `3868` + // Minimum execution time: 8_395_000 picoseconds. + Weight::from_parts(8_863_000, 3868) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 235_000 picoseconds. - Weight::from_parts(285_000, 0) + // Minimum execution time: 265_000 picoseconds. + Weight::from_parts(292_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 283_000 picoseconds. - Weight::from_parts(326_000, 0) + // Minimum execution time: 298_000 picoseconds. + Weight::from_parts(334_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(298_000, 0) + // Minimum execution time: 262_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 240_000 picoseconds. - Weight::from_parts(290_000, 0) + // Minimum execution time: 277_000 picoseconds. + Weight::from_parts(297_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 651_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 620_000 picoseconds. + Weight::from_parts(706_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `103` + // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_476_000 picoseconds. - Weight::from_parts(4_671_000, 0) + // Minimum execution time: 5_475_000 picoseconds. + Weight::from_parts(5_706_000, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `System::Account` (r:1 w:0) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 3_800_000 picoseconds. - Weight::from_parts(3_968_000, 3517) - .saturating_add(RocksDbWeight::get().reads(1_u64)) + // Measured: `264` + // Estimated: `3729` + // Minimum execution time: 9_141_000 picoseconds. + Weight::from_parts(9_674_000, 3729) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) /// Proof: `Revive::ImmutableDataOf` (`max_values`: None, `max_size`: Some(4118), added: 6593, mode: `Measured`) /// The range of component `n` is `[1, 4096]`. fn seal_get_immutable_data(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `205 + n * (1 ±0)` - // Estimated: `3670 + n * (1 ±0)` - // Minimum execution time: 5_845_000 picoseconds. - Weight::from_parts(6_473_478, 3670) - // Standard Error: 4 - .saturating_add(Weight::from_parts(651, 0).saturating_mul(n.into())) + // Measured: `238 + n * (1 ±0)` + // Estimated: `3703 + n * (1 ±0)` + // Minimum execution time: 6_443_000 picoseconds. + Weight::from_parts(7_252_595, 3703) + // Standard Error: 12 + .saturating_add(Weight::from_parts(915, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1150,39 +1250,39 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_980_000 picoseconds. - Weight::from_parts(2_324_567, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(512, 0).saturating_mul(n.into())) + // Minimum execution time: 2_745_000 picoseconds. + Weight::from_parts(3_121_250, 0) + // Standard Error: 4 + .saturating_add(Weight::from_parts(627, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 259_000 picoseconds. - Weight::from_parts(285_000, 0) + // Minimum execution time: 255_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 244_000 picoseconds. - Weight::from_parts(291_000, 0) + // Minimum execution time: 235_000 picoseconds. + Weight::from_parts(261_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 252_000 picoseconds. - Weight::from_parts(291_000, 0) + // Minimum execution time: 249_000 picoseconds. + Weight::from_parts(263_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 245_000 picoseconds. - Weight::from_parts(277_000, 0) + // Minimum execution time: 287_000 picoseconds. + Weight::from_parts(300_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1190,8 +1290,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 5_650_000 picoseconds. - Weight::from_parts(5_783_000, 1552) + // Minimum execution time: 6_147_000 picoseconds. + Weight::from_parts(6_562_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 262140]`. @@ -1199,21 +1299,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 427_000 picoseconds. - Weight::from_parts(351_577, 0) + // Minimum execution time: 453_000 picoseconds. + Weight::from_parts(548_774, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(114, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 265_000 picoseconds. - Weight::from_parts(746_316, 0) + // Minimum execution time: 264_000 picoseconds. + Weight::from_parts(490_374, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(236, 0).saturating_mul(n.into())) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::DeletionQueueCounter` (r:1 w:1) /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:33 w:33) @@ -1225,13 +1327,13 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `272 + n * (88 ±0)` - // Estimated: `3737 + n * (2563 ±0)` - // Minimum execution time: 15_988_000 picoseconds. - Weight::from_parts(18_796_705, 3737) - // Standard Error: 10_437 - .saturating_add(Weight::from_parts(4_338_085, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `323 + n * (88 ±0)` + // Estimated: `3788 + n * (2563 ±0)` + // Minimum execution time: 22_833_000 picoseconds. + Weight::from_parts(24_805_620, 3788) + // Standard Error: 9_498 + .saturating_add(Weight::from_parts(4_486_714, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -1243,22 +1345,22 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_237_000 picoseconds. - Weight::from_parts(4_128_112, 0) - // Standard Error: 2_947 - .saturating_add(Weight::from_parts(198_825, 0).saturating_mul(t.into())) - // Standard Error: 26 - .saturating_add(Weight::from_parts(1_007, 0).saturating_mul(n.into())) + // Minimum execution time: 4_969_000 picoseconds. + Weight::from_parts(4_994_916, 0) + // Standard Error: 3_727 + .saturating_add(Weight::from_parts(188_374, 0).saturating_mul(t.into())) + // Standard Error: 33 + .saturating_add(Weight::from_parts(925, 0).saturating_mul(n.into())) } /// The range of component `i` is `[0, 262144]`. fn seal_debug_message(i: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 292_000 picoseconds. - Weight::from_parts(1_297_376, 0) + // Minimum execution time: 328_000 picoseconds. + Weight::from_parts(928_905, 0) // Standard Error: 1 - .saturating_add(Weight::from_parts(724, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(753, 0).saturating_mul(i.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1266,8 +1368,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 7_812_000 picoseconds. - Weight::from_parts(8_171_000, 744) + // Minimum execution time: 8_612_000 picoseconds. + Weight::from_parts(9_326_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1276,8 +1378,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 44_179_000 picoseconds. - Weight::from_parts(45_068_000, 10754) + // Minimum execution time: 44_542_000 picoseconds. + Weight::from_parts(45_397_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1286,8 +1388,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `744` // Estimated: `744` - // Minimum execution time: 8_964_000 picoseconds. - Weight::from_parts(9_336_000, 744) + // Minimum execution time: 10_343_000 picoseconds. + Weight::from_parts(10_883_000, 744) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1297,8 +1399,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `10754` // Estimated: `10754` - // Minimum execution time: 45_606_000 picoseconds. - Weight::from_parts(47_190_000, 10754) + // Minimum execution time: 46_835_000 picoseconds. + Weight::from_parts(47_446_000, 10754) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1310,12 +1412,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + o * (1 ±0)` // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_077_000 picoseconds. - Weight::from_parts(9_823_489, 247) - // Standard Error: 54 - .saturating_add(Weight::from_parts(392, 0).saturating_mul(n.into())) - // Standard Error: 54 - .saturating_add(Weight::from_parts(408, 0).saturating_mul(o.into())) + // Minimum execution time: 10_604_000 picoseconds. + Weight::from_parts(11_282_849, 247) + // Standard Error: 48 + .saturating_add(Weight::from_parts(496, 0).saturating_mul(n.into())) + // Standard Error: 48 + .saturating_add(Weight::from_parts(764, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1327,10 +1429,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_812_000 picoseconds. - Weight::from_parts(9_626_925, 247) - // Standard Error: 77 - .saturating_add(Weight::from_parts(269, 0).saturating_mul(n.into())) + // Minimum execution time: 10_081_000 picoseconds. + Weight::from_parts(11_186_557, 247) + // Standard Error: 68 + .saturating_add(Weight::from_parts(782, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1342,10 +1444,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_143_000 picoseconds. - Weight::from_parts(9_229_363, 247) - // Standard Error: 77 - .saturating_add(Weight::from_parts(1_198, 0).saturating_mul(n.into())) + // Minimum execution time: 8_758_000 picoseconds. + Weight::from_parts(9_939_492, 247) + // Standard Error: 69 + .saturating_add(Weight::from_parts(1_703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1356,10 +1458,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 7_899_000 picoseconds. - Weight::from_parts(8_591_860, 247) - // Standard Error: 56 - .saturating_add(Weight::from_parts(461, 0).saturating_mul(n.into())) + // Minimum execution time: 8_525_000 picoseconds. + Weight::from_parts(9_522_265, 247) + // Standard Error: 66 + .saturating_add(Weight::from_parts(426, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1370,10 +1472,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_215_000 picoseconds. - Weight::from_parts(10_198_528, 247) - // Standard Error: 75 - .saturating_add(Weight::from_parts(1_521, 0).saturating_mul(n.into())) + // Minimum execution time: 10_603_000 picoseconds. + Weight::from_parts(11_817_752, 247) + // Standard Error: 82 + .saturating_add(Weight::from_parts(1_279, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1382,36 +1484,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_406_000 picoseconds. - Weight::from_parts(1_515_000, 0) + // Minimum execution time: 1_553_000 picoseconds. + Weight::from_parts(1_615_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_782_000 picoseconds. - Weight::from_parts(1_890_000, 0) + // Minimum execution time: 1_932_000 picoseconds. + Weight::from_parts(2_064_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_380_000 picoseconds. - Weight::from_parts(1_422_000, 0) + // Minimum execution time: 1_510_000 picoseconds. + Weight::from_parts(1_545_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_504_000 picoseconds. - Weight::from_parts(1_583_000, 0) + // Minimum execution time: 1_663_000 picoseconds. + Weight::from_parts(1_801_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_045_000 picoseconds. - Weight::from_parts(1_138_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_137_000, 0) } /// The range of component `n` is `[0, 512]`. /// The range of component `o` is `[0, 512]`. @@ -1419,60 +1521,63 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_039_000 picoseconds. - Weight::from_parts(2_317_406, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(283, 0).saturating_mul(n.into())) - // Standard Error: 13 - .saturating_add(Weight::from_parts(347, 0).saturating_mul(o.into())) + // Minimum execution time: 2_446_000 picoseconds. + Weight::from_parts(2_644_525, 0) + // Standard Error: 17 + .saturating_add(Weight::from_parts(113, 0).saturating_mul(n.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(179, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 512]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(2_251_392, 0) - // Standard Error: 17 - .saturating_add(Weight::from_parts(313, 0).saturating_mul(n.into())) + // Minimum execution time: 2_085_000 picoseconds. + Weight::from_parts(2_379_853, 0) + // Standard Error: 19 + .saturating_add(Weight::from_parts(366, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_763_000 picoseconds. - Weight::from_parts(1_951_912, 0) - // Standard Error: 13 - .saturating_add(Weight::from_parts(268, 0).saturating_mul(n.into())) + // Minimum execution time: 1_876_000 picoseconds. + Weight::from_parts(2_073_689, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(376, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_536_000 picoseconds. - Weight::from_parts(1_779_085, 0) - // Standard Error: 14 - .saturating_add(Weight::from_parts(148, 0).saturating_mul(n.into())) + // Minimum execution time: 1_688_000 picoseconds. + Weight::from_parts(1_914_470, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(125, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 512]`. - fn seal_take_transient_storage(n: u32, ) -> Weight { + fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_343_000 picoseconds. - Weight::from_parts(2_587_750, 0) - // Standard Error: 22 - .saturating_add(Weight::from_parts(30, 0).saturating_mul(n.into())) + // Minimum execution time: 2_479_000 picoseconds. + Weight::from_parts(2_758_250, 0) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn seal_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `0` - // Minimum execution time: 9_250_000 picoseconds. - Weight::from_parts(9_637_000, 0) + // Measured: `352` + // Estimated: `3817` + // Minimum execution time: 15_745_000 picoseconds. + Weight::from_parts(16_300_000, 3817) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } + /// Storage: `Revive::AddressSuffix` (r:1 w:0) + /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) /// Storage: `Revive::CodeInfoOf` (r:1 w:0) @@ -1483,17 +1588,17 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1221 + t * (103 ±0)` - // Estimated: `4686 + t * (103 ±0)` - // Minimum execution time: 33_333_000 picoseconds. - Weight::from_parts(34_378_774, 4686) - // Standard Error: 41_131 - .saturating_add(Weight::from_parts(1_756_626, 0).saturating_mul(t.into())) + // Measured: `1309 + t * (140 ±0)` + // Estimated: `4774 + t * (140 ±0)` + // Minimum execution time: 39_639_000 picoseconds. + Weight::from_parts(40_909_376, 4774) + // Standard Error: 54_479 + .saturating_add(Weight::from_parts(1_526_185, 0).saturating_mul(t.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(3, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(Weight::from_parts(4, 0).saturating_mul(i.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 103).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 140).saturating_mul(t.into())) } /// Storage: `Revive::CodeInfoOf` (r:1 w:0) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) @@ -1501,10 +1606,10 @@ impl WeightInfo for () { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1048` - // Estimated: `4513` - // Minimum execution time: 27_096_000 picoseconds. - Weight::from_parts(27_934_000, 4513) + // Measured: `1081` + // Estimated: `4546` + // Minimum execution time: 29_651_000 picoseconds. + Weight::from_parts(31_228_000, 4546) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) @@ -1518,12 +1623,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1257` - // Estimated: `4715` - // Minimum execution time: 118_412_000 picoseconds. - Weight::from_parts(106_130_041, 4715) - // Standard Error: 12 - .saturating_add(Weight::from_parts(4_235, 0).saturating_mul(i.into())) + // Measured: `1327` + // Estimated: `4792` + // Minimum execution time: 126_995_000 picoseconds. + Weight::from_parts(114_028_446, 4792) + // Standard Error: 11 + .saturating_add(Weight::from_parts(3_781, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1532,73 +1637,73 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 614_000 picoseconds. - Weight::from_parts(4_320_897, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_371, 0).saturating_mul(n.into())) + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(973_524, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_048, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_062_000 picoseconds. - Weight::from_parts(4_571_371, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(3_572, 0).saturating_mul(n.into())) + // Minimum execution time: 1_118_000 picoseconds. + Weight::from_parts(795_498, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_260, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 609_000 picoseconds. - Weight::from_parts(4_008_056, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_497, 0).saturating_mul(n.into())) + // Minimum execution time: 647_000 picoseconds. + Weight::from_parts(667_024, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_183, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 608_000 picoseconds. - Weight::from_parts(3_839_383, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_504, 0).saturating_mul(n.into())) + // Minimum execution time: 605_000 picoseconds. + Weight::from_parts(675_568, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1_181, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 43_110_000 picoseconds. - Weight::from_parts(31_941_593, 0) - // Standard Error: 12 - .saturating_add(Weight::from_parts(5_233, 0).saturating_mul(n.into())) + // Minimum execution time: 42_743_000 picoseconds. + Weight::from_parts(26_131_984, 0) + // Standard Error: 15 + .saturating_add(Weight::from_parts(4_867, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_798_000 picoseconds. - Weight::from_parts(49_225_000, 0) + // Minimum execution time: 50_838_000 picoseconds. + Weight::from_parts(52_248_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_576_000 picoseconds. - Weight::from_parts(12_731_000, 0) + // Minimum execution time: 12_605_000 picoseconds. + Weight::from_parts(12_796_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `266` - // Estimated: `3731` - // Minimum execution time: 14_306_000 picoseconds. - Weight::from_parts(15_011_000, 3731) + // Measured: `300` + // Estimated: `3765` + // Minimum execution time: 16_377_000 picoseconds. + Weight::from_parts(16_932_000, 3765) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1606,10 +1711,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `303` - // Estimated: `3768` - // Minimum execution time: 10_208_000 picoseconds. - Weight::from_parts(10_514_000, 3768) + // Measured: `338` + // Estimated: `3803` + // Minimum execution time: 11_499_000 picoseconds. + Weight::from_parts(12_104_000, 3803) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1617,10 +1722,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `303` + // Measured: `338` // Estimated: `3561` - // Minimum execution time: 9_062_000 picoseconds. - Weight::from_parts(9_414_000, 3561) + // Minimum execution time: 10_308_000 picoseconds. + Weight::from_parts(11_000_000, 3561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1629,9 +1734,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_074_000 picoseconds. - Weight::from_parts(9_646_158, 0) - // Standard Error: 58 - .saturating_add(Weight::from_parts(84_694, 0).saturating_mul(r.into())) + // Minimum execution time: 8_162_000 picoseconds. + Weight::from_parts(9_180_011, 0) + // Standard Error: 63 + .saturating_add(Weight::from_parts(84_822, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml index 8705781db002c2a2da19ecd5124f7d8b3d2b124b..9eaa1b68ca8e3c1b4299a0f3740082def8d2a330 100644 --- a/substrate/frame/revive/uapi/Cargo.toml +++ b/substrate/frame/revive/uapi/Cargo.toml @@ -21,7 +21,7 @@ codec = { features = [ ], optional = true, workspace = true } [target.'cfg(target_arch = "riscv32")'.dependencies] -polkavm-derive = { version = "0.12.0" } +polkavm-derive = { version = "0.13.0" } [package.metadata.docs.rs] default-target = ["wasm32-unknown-unknown"] diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 2106b8fb49b7929975cbebba112038ced6c0bfbe..2663d7c2cf0cc2876ac623d98f9391973cfe2cf3 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -245,10 +245,12 @@ pub trait HostFn: private::Sealed { /// - `addr`: The address of the contract. /// - `output`: A reference to the output data buffer to write the code hash. /// - /// # Errors + /// # Note /// - /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]) -> Result; + /// If `addr` is not a contract but the account exists then the hash of empty data + /// `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` is written, + /// otherwise `zero`. + fn code_hash(addr: &[u8; 20], output: &mut [u8; 32]); /// Checks whether there is a value stored under the given key. /// diff --git a/substrate/frame/revive/uapi/src/host/riscv32.rs b/substrate/frame/revive/uapi/src/host/riscv32.rs index 866b0ee8dd1762ad8fafcf820ecce0410a3cb038..c2508198c935855d79fd111db5ac320f0b43b175 100644 --- a/substrate/frame/revive/uapi/src/host/riscv32.rs +++ b/substrate/frame/revive/uapi/src/host/riscv32.rs @@ -74,7 +74,7 @@ mod sys { pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32); pub fn caller(out_ptr: *mut u8); pub fn is_contract(account_ptr: *const u8) -> ReturnCode; - pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8) -> ReturnCode; + pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8); pub fn own_code_hash(out_ptr: *mut u8); pub fn caller_is_origin() -> ReturnCode; pub fn caller_is_root() -> ReturnCode; @@ -528,9 +528,8 @@ impl HostFn for HostFnImpl { ret_val.into() } - fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) -> Result { - let ret_val = unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) }; - ret_val.into() + fn code_hash(address: &[u8; 20], output: &mut [u8; 32]) { + unsafe { sys::code_hash(address.as_ptr(), output.as_mut_ptr()) } } fn own_code_hash(output: &mut [u8; 32]) { diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 285758afbe6db8b214fd788a180d760a01aed263..f6c409833e333c09f66c239a069f23a80854589f 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -61,7 +61,7 @@ use frame_support::{ BoundedVec, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; use sp_consensus_sassafras::{ @@ -131,7 +131,7 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// Amount of slots that each epoch should last. #[pallet::constant] type EpochLength: Get; @@ -1020,7 +1020,8 @@ impl Pallet { pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(_) => true, Err(e) => { error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index d2b329e8a2ba1314e832495a4f1b40cf22ff101f..d7e2fb63dc2f4c285d5ac1960f9310bc357ffd43 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -34,7 +34,7 @@ use sp_core::{ H256, U256, }; use sp_runtime::{ - testing::{Digest, DigestItem, Header, TestXt}, + testing::{Digest, DigestItem, Header}, BuildStorage, }; @@ -48,12 +48,21 @@ impl frame_system::Config for Test { type Block = frame_system::mocking::MockBlock; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = TestXt; + type RuntimeCall = RuntimeCall; + type Extrinsic = frame_system::mocking::MockUncheckedExtrinsic; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + frame_system::mocking::MockUncheckedExtrinsic::::new_bare(call) + } } impl pallet_sassafras::Config for Test { diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 3eecf6d6f9e884e5bf671d781d4918ad91619615..468099010bf97fe5b5dfeedd65f38db88f9da2a9 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -1364,7 +1364,7 @@ impl Pallet { Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(), _ => T::WeightInfo::execute_dispatch_unsigned(), }; - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; // We only allow a scheduled call if it cannot push the weight past the limit. let max_weight = base_weight.saturating_add(call_weight); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index e5fb15cdd07cb2657e599069f803a6cf5a4c50db..ade1095cc504c2f85c6906ee520154a602025af7 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -222,7 +222,12 @@ pub mod runtime { // Types often used in the runtime APIs. pub use sp_core::OpaqueMetadata; + pub use sp_genesis_builder::{ + PresetId, Result as GenesisBuilderResult, DEV_RUNTIME_PRESET, + LOCAL_TESTNET_RUNTIME_PRESET, + }; pub use sp_inherents::{CheckInherentsResult, InherentData}; + pub use sp_keyring::AccountKeyring; pub use sp_runtime::{ApplyExtrinsicResult, ExtrinsicInclusionMode}; } @@ -246,6 +251,7 @@ pub mod runtime { pub use sp_block_builder::*; pub use sp_consensus_aura::*; pub use sp_consensus_grandpa::*; + pub use sp_genesis_builder::*; pub use sp_offchain::*; pub use sp_session::runtime_api::*; pub use sp_transaction_pool::runtime_api::*; @@ -291,8 +297,8 @@ pub mod runtime { /// The block type, which should be fed into [`frame_system::Config`]. /// - /// Should be parameterized with `T: frame_system::Config` and a tuple of `SignedExtension`. - /// When in doubt, use [`SystemSignedExtensionsOf`]. + /// Should be parameterized with `T: frame_system::Config` and a tuple of + /// `TransactionExtension`. When in doubt, use [`SystemTransactionExtensionsOf`]. // Note that this cannot be dependent on `T` for block-number because it would lead to a // circular dependency (self-referential generics). pub type BlockOf = generic::Block>; @@ -306,7 +312,7 @@ pub mod runtime { /// Default set of signed extensions exposed from the `frame_system`. /// /// crucially, this does NOT contain any tx-payment extension. - pub type SystemSignedExtensionsOf = ( + pub type SystemTransactionExtensionsOf = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -396,8 +402,12 @@ pub mod deps { #[cfg(feature = "runtime")] pub use sp_consensus_grandpa; #[cfg(feature = "runtime")] + pub use sp_genesis_builder; + #[cfg(feature = "runtime")] pub use sp_inherents; #[cfg(feature = "runtime")] + pub use sp_keyring; + #[cfg(feature = "runtime")] pub use sp_offchain; #[cfg(feature = "runtime")] pub use sp_storage; diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index dee7d09c9d0c63426fb70fbc45188034d48bae38..ff34cc3a7003b7069ece02d20e0f4b1775041b18 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -21,14 +21,25 @@ use super::*; use crate::Pallet; use alloc::{boxed::Box, vec}; use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; fn assert_last_event(generic_event: crate::Event) { let re: ::RuntimeEvent = generic_event.into(); frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks(where ::RuntimeCall: From>)] +#[benchmarks(where + T: Send + Sync, + ::RuntimeCall: From>, + ::RuntimeCall: Dispatchable + GetDispatchInfo, + <::RuntimeCall as Dispatchable>::PostInfo: Default, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone, +)] mod benchmarks { use super::*; @@ -86,5 +97,26 @@ mod benchmarks { assert_last_event::(Event::KeyRemoved {}); } + #[benchmark] + fn check_only_sudo_account() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); + + let call: ::RuntimeCall = + frame_system::Call::remark { remark: vec![] }.into(); + let info = call.get_dispatch_info(); + let ext = CheckOnlySudoAccount::::new(); + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok( + Default::default() + )) + .unwrap() + .is_ok()); + } + } + impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); } diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index fb7eaf7894806bf816be381c430da95378078961..573de45ba32dbe636ab3628d526fa4800581f4e3 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -21,10 +21,11 @@ use core::{fmt, marker::PhantomData}; use frame_support::{dispatch::DispatchInfo, ensure}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, + impl_tx_ext_default, + traits::{AsSystemOriginSigner, DispatchInfoOf, Dispatchable, TransactionExtension}, transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - UnknownTransaction, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionValidityError, UnknownTransaction, + ValidTransaction, }, }; @@ -59,49 +60,61 @@ impl fmt::Debug for CheckOnlySudoAccount { } impl CheckOnlySudoAccount { - /// Creates new `SignedExtension` to check sudo key. + /// Creates new `TransactionExtension` to check sudo key. pub fn new() -> Self { Self::default() } } -impl SignedExtension for CheckOnlySudoAccount +impl TransactionExtension<::RuntimeCall> + for CheckOnlySudoAccount where - ::RuntimeCall: Dispatchable, + ::RuntimeCall: Dispatchable, + <::RuntimeCall as Dispatchable>::RuntimeOrigin: + AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); + type Implicit = (); type Pre = (); + type Val = (); - fn additional_signed(&self) -> Result { - Ok(()) + fn weight( + &self, + _: &::RuntimeCall, + ) -> frame_support::weights::Weight { + use crate::weights::WeightInfo; + T::WeightInfo::check_only_sudo_account() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + origin: <::RuntimeCall as Dispatchable>::RuntimeOrigin, + _call: &::RuntimeCall, + info: &DispatchInfoOf<::RuntimeCall>, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + ( + ValidTransaction, + Self::Val, + <::RuntimeCall as Dispatchable>::RuntimeOrigin, + ), + TransactionValidityError, + > { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; let sudo_key: T::AccountId = Key::::get().ok_or(UnknownTransaction::CannotLookup)?; ensure!(*who == sudo_key, InvalidTransaction::BadSigner); - Ok(ValidTransaction { - priority: info.weight.ref_time() as TransactionPriority, - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: info.total_weight().ref_time() as TransactionPriority, + ..Default::default() + }, + (), + origin, + )) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(::RuntimeCall; prepare); } diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 07296e90b64859975f164d75cfc056ab63b0c860..66616bf801ebf9714bf718d149e5cd28dff3c4f1 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -85,8 +85,8 @@ //! meant to be used by constructing runtime calls from outside the runtime. //! //! -//! This pallet also defines a [`SignedExtension`](sp_runtime::traits::SignedExtension) called -//! [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are +//! This pallet also defines a [`TransactionExtension`](sp_runtime::traits::TransactionExtension) +//! called [`CheckOnlySudoAccount`] to ensure that only signed transactions by the sudo account are //! accepted by the transaction pool. The intended use of this signed extension is to prevent other //! accounts from spamming the transaction pool for the initial phase of a chain, during which //! developers may only want a sudo account to be able to make transactions. @@ -197,7 +197,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::sudo().saturating_add(dispatch_info.weight), + T::WeightInfo::sudo().saturating_add(dispatch_info.call_weight), dispatch_info.class ) })] @@ -262,7 +262,7 @@ pub mod pallet { #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( - T::WeightInfo::sudo_as().saturating_add(dispatch_info.weight), + T::WeightInfo::sudo_as().saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index 00bb86cc2686f2410d09077d9b388750914eb74c..3ed3bd336f53cdaba274fca3d339ad811a3a71f1 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -108,7 +108,7 @@ fn sudo_unchecked_weight_basics() { let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: Weight::from_parts(1_000, 0) }; let info = sudo_unchecked_weight_call.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1_000, 0)); + assert_eq!(info.call_weight, Weight::from_parts(1_000, 0)); }); } diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index c166ab442d7320cee6a435d21e1c01e1b6425645..ac5557e68a63610f565e40ec38ffa2544b69a814 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -55,6 +55,7 @@ pub trait WeightInfo { fn sudo() -> Weight; fn sudo_as() -> Weight; fn remove_key() -> Weight; + fn check_only_sudo_account() -> Weight; } /// Weights for `pallet_sudo` using the Substrate node and recommended hardware. @@ -102,6 +103,16 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } } // For backwards compatibility and tests. @@ -148,4 +159,14 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 3_416_000 picoseconds. + Weight::from_parts(3_645_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 9e9741ee1619a4184dc7e21d27594165613b2db2..d7da034b3492aabdee969e211a0d505c92c96c63 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -71,6 +71,7 @@ pretty_assertions = { workspace = true } sp-timestamp = { workspace = true } frame-system = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true, default-features = true } +Inflector = { workspace = true } [features] default = ["std"] @@ -113,9 +114,7 @@ try-runtime = [ "sp-debug-derive/force-debug", "sp-runtime/try-runtime", ] -experimental = [ - "frame-support-procedural/experimental", -] +experimental = ["frame-support-procedural/experimental"] # By default some types have documentation, `no-metadata-docs` allows to reduce the documentation # in the metadata. no-metadata-docs = [ diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index c5fe8440d21bebc68347c0d09dab313628e0ccbf..e34c6ac5016a9f8597fae03f6012f1e7d2f79659 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -66,18 +66,16 @@ pub fn expand_outer_inherent( fn create_extrinsics(&self) -> #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { - use #scrate::inherent::ProvideInherent; + use #scrate::{inherent::ProvideInherent, traits::InherentBuilder}; let mut inherents = #scrate::__private::Vec::new(); #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new( + let inherent = <#unchecked_extrinsic as InherentBuilder>::new_inherent( inherent.into(), - None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ - `Some`; qed"); + ); inherents.push(inherent); } @@ -123,7 +121,7 @@ pub fn expand_outer_inherent( for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if #scrate::sp_runtime::traits::Extrinsic::is_signed(xt).unwrap_or(false) { + if !(#scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt)) { break } @@ -161,10 +159,9 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_signed = #scrate::sp_runtime::traits::Extrinsic::is_signed(xt) - .unwrap_or(false); + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt); - if !is_signed { + if is_bare { let call = < #unchecked_extrinsic as ExtrinsicCall >::call(xt); @@ -209,8 +206,9 @@ pub fn expand_outer_inherent( use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; - if #scrate::sp_runtime::traits::Extrinsic::is_signed(ext).unwrap_or(false) { - // Signed extrinsics are never inherents. + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(ext); + if !is_bare { + // Inherents must be bare extrinsics. return false } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 54eb290ca6cf3321729a3f9bba6d10a69e9a547a..c12fc20bc8b89e05c4aa8d1f291d81b094a0acbc 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -101,16 +101,16 @@ pub fn expand_runtime_metadata( let ty = #scrate::__private::scale_info::meta_type::<#extrinsic>(); let address_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureAddress + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Address >(); let call_ty = #scrate::__private::scale_info::meta_type::< - <#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::Call + <#extrinsic as #scrate::traits::ExtrinsicCall>::Call >(); let signature_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::Signature + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Signature >(); let extra_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureExtra + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); #scrate::__private::metadata_ir::MetadataIR { @@ -122,16 +122,20 @@ pub fn expand_runtime_metadata( call_ty, signature_ty, extra_ty, - signed_extensions: < + extensions: < < #extrinsic as #scrate::sp_runtime::traits::ExtrinsicMetadata - >::SignedExtensions as #scrate::sp_runtime::traits::SignedExtension + >::TransactionExtensions + as + #scrate::sp_runtime::traits::TransactionExtension::< + <#runtime as #system_path::Config>::RuntimeCall + > >::metadata() .into_iter() - .map(|meta| #scrate::__private::metadata_ir::SignedExtensionMetadataIR { + .map(|meta| #scrate::__private::metadata_ir::TransactionExtensionMetadataIR { identifier: meta.identifier, ty: meta.ty, - additional_signed: meta.additional_signed, + implicit: meta.implicit, }) .collect(), }, diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 4a14853c04eec327ddcdf403819301cb58f71d39..1c4ab436ad92aaee824a7f2916c32483fe443f6f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -153,6 +153,10 @@ pub fn expand_outer_origin( self.filter = #scrate::__private::Rc::new(#scrate::__private::Box::new(filter)); } + fn set_caller(&mut self, caller: OriginCaller) { + self.caller = caller; + } + fn set_caller_from(&mut self, other: impl Into) { self.caller = other.into().caller; } @@ -301,6 +305,22 @@ pub fn expand_outer_origin( } } + impl #scrate::__private::AsSystemOriginSigner<<#runtime as #system_path::Config>::AccountId> for RuntimeOrigin { + fn as_system_origin_signer(&self) -> Option<&<#runtime as #system_path::Config>::AccountId> { + if let OriginCaller::system(#system_path::Origin::<#runtime>::Signed(ref signed)) = &self.caller { + Some(signed) + } else { + None + } + } + } + + impl #scrate::__private::AsTransactionAuthorizedOrigin for RuntimeOrigin { + fn is_transaction_authorized(&self) -> bool { + !matches!(&self.caller, OriginCaller::system(#system_path::Origin::<#runtime>::None)) + } + } + #pallet_conversions }) } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs index 6531c0e9e07075f2674856740aab2ec786e3f683..1302f86455f2ceeb7af58e46e30b05afccbc44cf 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs @@ -16,6 +16,7 @@ // limitations under the License use crate::construct_runtime::Pallet; +use core::str::FromStr; use proc_macro2::{Ident, TokenStream as TokenStream2}; use quote::quote; @@ -28,7 +29,8 @@ pub fn expand_outer_task( let mut from_impls = Vec::new(); let mut task_variants = Vec::new(); let mut variant_names = Vec::new(); - let mut task_paths = Vec::new(); + let mut task_types = Vec::new(); + let mut cfg_attrs = Vec::new(); for decl in pallet_decls { if decl.find_part("Task").is_none() { continue @@ -37,18 +39,31 @@ pub fn expand_outer_task( let variant_name = &decl.name; let path = &decl.path; let index = decl.index; + let instance = decl.instance.as_ref().map(|instance| quote!(, #path::#instance)); + let task_type = quote!(#path::Task<#runtime_name #instance>); + + let attr = decl.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { + let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); from_impls.push(quote! { - impl From<#path::Task<#runtime_name>> for RuntimeTask { - fn from(hr: #path::Task<#runtime_name>) -> Self { + #attr + impl From<#task_type> for RuntimeTask { + fn from(hr: #task_type) -> Self { RuntimeTask::#variant_name(hr) } } - impl TryInto<#path::Task<#runtime_name>> for RuntimeTask { + #attr + impl TryInto<#task_type> for RuntimeTask { type Error = (); - fn try_into(self) -> Result<#path::Task<#runtime_name>, Self::Error> { + fn try_into(self) -> Result<#task_type, Self::Error> { match self { RuntimeTask::#variant_name(hr) => Ok(hr), _ => Err(()), @@ -58,13 +73,16 @@ pub fn expand_outer_task( }); task_variants.push(quote! { + #attr #[codec(index = #index)] - #variant_name(#path::Task<#runtime_name>), + #variant_name(#task_type), }); variant_names.push(quote!(#variant_name)); - task_paths.push(quote!(#path::Task)); + task_types.push(task_type); + + cfg_attrs.push(attr); } let prelude = quote!(#scrate::traits::tasks::__private); @@ -91,35 +109,50 @@ pub fn expand_outer_task( fn is_valid(&self) -> bool { match self { - #(RuntimeTask::#variant_names(val) => val.is_valid(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.is_valid(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn run(&self) -> Result<(), #scrate::traits::tasks::__private::DispatchError> { match self { - #(RuntimeTask::#variant_names(val) => val.run(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.run(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn weight(&self) -> #scrate::pallet_prelude::Weight { match self { - #(RuntimeTask::#variant_names(val) => val.weight(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.weight(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn task_index(&self) -> u32 { match self { - #(RuntimeTask::#variant_names(val) => val.task_index(),)* + #( + #cfg_attrs + RuntimeTask::#variant_names(val) => val.task_index(), + )* _ => unreachable!(#INCOMPLETE_MATCH_QED), } } fn iter() -> Self::Enumeration { let mut all_tasks = Vec::new(); - #(all_tasks.extend(#task_paths::iter().map(RuntimeTask::from).collect::>());)* + #( + #cfg_attrs + all_tasks.extend(<#task_types>::iter().map(RuntimeTask::from).collect::>()); + )* all_tasks.into_iter() } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index 206ffc1159f719f88a59a0df1c1e9abc37e4a33f..87fb4b8967e6e03f95b26675fc2bbb2c0636d05e 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -372,7 +372,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { >::pays_fee(&__pallet_base_weight, ( #( #args_name, )* )); #frame_support::dispatch::DispatchInfo { - weight: __pallet_weight, + call_weight: __pallet_weight, + extension_weight: Default::default(), class: __pallet_class, pays_fee: __pallet_pays_fee, } diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c6166ff45b1965dcbe297f3853336b5bd85d8ae2..79bf33a828e252c222bc1a95edba45b80d887d29 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -171,7 +171,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { let whitelisted_storage_idents: Vec = def .storages .iter() - .filter_map(|s| s.whitelisted.then_some(s.ident.clone())) + .filter_map(|s| s.whitelisted.then(|| s.ident.clone())) .collect(); let whitelisted_storage_keys_impl = quote::quote![ diff --git a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs index 7201c352d92cd062a2066fa33b4f8224516ae79b..b6346ca8ff34265844d14ca19c9dd8e6514bae2b 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tasks.rs @@ -20,21 +20,25 @@ //! Home of the expansion code for the Tasks API use crate::pallet::{parse::tasks::*, Def}; -use derive_syn_parse::Parse; use inflector::Inflector; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote, ToTokens}; -use syn::{parse_quote, spanned::Spanned, ItemEnum, ItemImpl}; +use syn::{parse_quote_spanned, spanned::Spanned}; impl TaskEnumDef { /// Since we optionally allow users to manually specify a `#[pallet::task_enum]`, in the /// event they _don't_ specify one (which is actually the most common behavior) we have to /// generate one based on the existing [`TasksDef`]. This method performs that generation. - pub fn generate( - tasks: &TasksDef, - type_decl_bounded_generics: TokenStream2, - type_use_generics: TokenStream2, - ) -> Self { + pub fn generate(tasks: &TasksDef, def: &Def) -> Self { + // We use the span of the attribute to indicate that the error comes from code generated + // for the specific section, otherwise the item impl. + let span = tasks + .tasks_attr + .as_ref() + .map_or_else(|| tasks.item_impl.span(), |attr| attr.span()); + + let type_decl_bounded_generics = def.type_decl_bounded_generics(span); + let variants = if tasks.tasks_attr.is_some() { tasks .tasks @@ -58,7 +62,8 @@ impl TaskEnumDef { } else { Vec::new() }; - let mut task_enum_def: TaskEnumDef = parse_quote! { + + parse_quote_spanned! { span => /// Auto-generated enum that encapsulates all tasks defined by this pallet. /// /// Conceptually similar to the [`Call`] enum, but for tasks. This is only @@ -69,33 +74,32 @@ impl TaskEnumDef { #variants, )* } - }; - task_enum_def.type_use_generics = type_use_generics; - task_enum_def + } } } -impl ToTokens for TaskEnumDef { - fn to_tokens(&self, tokens: &mut TokenStream2) { - let item_enum = &self.item_enum; - let ident = &item_enum.ident; - let vis = &item_enum.vis; - let attrs = &item_enum.attrs; - let generics = &item_enum.generics; - let variants = &item_enum.variants; - let scrate = &self.scrate; - let type_use_generics = &self.type_use_generics; - if self.attr.is_some() { +impl TaskEnumDef { + fn expand_to_tokens(&self, def: &Def) -> TokenStream2 { + if let Some(attr) = &self.attr { + let ident = &self.item_enum.ident; + let vis = &self.item_enum.vis; + let attrs = &self.item_enum.attrs; + let generics = &self.item_enum.generics; + let variants = &self.item_enum.variants; + let frame_support = &def.frame_support; + let type_use_generics = &def.type_use_generics(attr.span()); + let type_impl_generics = &def.type_impl_generics(attr.span()); + // `item_enum` is short-hand / generated enum - tokens.extend(quote! { + quote! { #(#attrs)* #[derive( - #scrate::CloneNoBound, - #scrate::EqNoBound, - #scrate::PartialEqNoBound, - #scrate::pallet_prelude::Encode, - #scrate::pallet_prelude::Decode, - #scrate::pallet_prelude::TypeInfo, + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::pallet_prelude::Encode, + #frame_support::pallet_prelude::Decode, + #frame_support::pallet_prelude::TypeInfo, )] #[codec(encode_bound())] #[codec(decode_bound())] @@ -104,32 +108,25 @@ impl ToTokens for TaskEnumDef { #variants #[doc(hidden)] #[codec(skip)] - __Ignore(core::marker::PhantomData, #scrate::Never), + __Ignore(core::marker::PhantomData<(#type_use_generics)>, #frame_support::Never), } - impl core::fmt::Debug for #ident<#type_use_generics> { + impl<#type_impl_generics> core::fmt::Debug for #ident<#type_use_generics> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct(stringify!(#ident)).field("value", self).finish() } } - }); + } } else { // `item_enum` is a manually specified enum (no attribute) - tokens.extend(item_enum.to_token_stream()); + self.item_enum.to_token_stream() } } } -/// Represents an already-expanded [`TasksDef`]. -#[derive(Parse)] -pub struct ExpandedTasksDef { - pub task_item_impl: ItemImpl, - pub task_trait_impl: ItemImpl, -} - -impl ToTokens for TasksDef { - fn to_tokens(&self, tokens: &mut TokenStream2) { - let scrate = &self.scrate; +impl TasksDef { + fn expand_to_tokens(&self, def: &Def) -> TokenStream2 { + let frame_support = &def.frame_support; let enum_ident = syn::Ident::new("Task", self.enum_ident.span()); let enum_arguments = &self.enum_arguments; let enum_use = quote!(#enum_ident #enum_arguments); @@ -160,21 +157,21 @@ impl ToTokens for TasksDef { let task_arg_names = self.tasks.iter().map(|task| &task.arg_names).collect::>(); let impl_generics = &self.item_impl.generics; - tokens.extend(quote! { + quote! { impl #impl_generics #enum_use { #(#task_fn_impls)* } - impl #impl_generics #scrate::traits::Task for #enum_use + impl #impl_generics #frame_support::traits::Task for #enum_use { - type Enumeration = #scrate::__private::IntoIter<#enum_use>; + type Enumeration = #frame_support::__private::IntoIter<#enum_use>; fn iter() -> Self::Enumeration { - let mut all_tasks = #scrate::__private::vec![]; + let mut all_tasks = #frame_support::__private::vec![]; #(all_tasks .extend(#task_iters.map(|(#(#task_arg_names),*)| #enum_ident::#task_fn_idents { #(#task_arg_names: #task_arg_names.clone()),* }) - .collect::<#scrate::__private::Vec<_>>()); + .collect::<#frame_support::__private::Vec<_>>()); )* all_tasks.into_iter() } @@ -193,7 +190,7 @@ impl ToTokens for TasksDef { } } - fn run(&self) -> Result<(), #scrate::pallet_prelude::DispatchError> { + fn run(&self) -> Result<(), #frame_support::pallet_prelude::DispatchError> { match self.clone() { #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => { <#enum_use>::#task_fn_names(#( #task_arg_names, )* ) @@ -203,64 +200,32 @@ impl ToTokens for TasksDef { } #[allow(unused_variables)] - fn weight(&self) -> #scrate::pallet_prelude::Weight { + fn weight(&self) -> #frame_support::pallet_prelude::Weight { match self.clone() { #(#enum_ident::#task_fn_idents { #(#task_arg_names),* } => #task_weights,)* Task::__Ignore(_, _) => unreachable!(), } } } - }); + } } } -/// Expands the [`TasksDef`] in the enclosing [`Def`], if present, and returns its tokens. -/// -/// This modifies the underlying [`Def`] in addition to returning any tokens that were added. -pub fn expand_tasks_impl(def: &mut Def) -> TokenStream2 { - let Some(tasks) = &mut def.tasks else { return quote!() }; - let ExpandedTasksDef { task_item_impl, task_trait_impl } = parse_quote!(#tasks); - quote! { - #task_item_impl - #task_trait_impl - } -} +/// Generate code related to tasks. +pub fn expand_tasks(def: &Def) -> TokenStream2 { + let Some(tasks_def) = &def.tasks else { + return quote!(); + }; -/// Represents a fully-expanded [`TaskEnumDef`]. -#[derive(Parse)] -pub struct ExpandedTaskEnum { - pub item_enum: ItemEnum, - pub debug_impl: ItemImpl, -} + let default_task_enum = TaskEnumDef::generate(&tasks_def, def); -/// Modifies a [`Def`] to expand the underlying [`TaskEnumDef`] if present, and also returns -/// its tokens. A blank [`TokenStream2`] is returned if no [`TaskEnumDef`] has been generated -/// or defined. -pub fn expand_task_enum(def: &mut Def) -> TokenStream2 { - let Some(task_enum) = &mut def.task_enum else { return quote!() }; - let ExpandedTaskEnum { item_enum, debug_impl } = parse_quote!(#task_enum); - quote! { - #item_enum - #debug_impl - } -} + let task_enum = def.task_enum.as_ref().unwrap_or_else(|| &default_task_enum); + + let tasks_expansion = tasks_def.expand_to_tokens(def); + let task_enum_expansion = task_enum.expand_to_tokens(def); -/// Modifies a [`Def`] to expand the underlying [`TasksDef`] and also generate a -/// [`TaskEnumDef`] if applicable. The tokens for these items are returned if they are created. -pub fn expand_tasks(def: &mut Def) -> TokenStream2 { - if let Some(tasks_def) = &def.tasks { - if def.task_enum.is_none() { - def.task_enum = Some(TaskEnumDef::generate( - &tasks_def, - def.type_decl_bounded_generics(tasks_def.item_impl.span()), - def.type_use_generics(tasks_def.item_impl.span()), - )); - } - } - let tasks_extra_output = expand_tasks_impl(def); - let task_enum_extra_output = expand_task_enum(def); quote! { - #tasks_extra_output - #task_enum_extra_output + #tasks_expansion + #task_enum_expansion } } diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs index 1975f059152c81c88c641c80b6013f5ff69f2613..6d53de3133e8978edad7d80c7aeba130f4a6cd4f 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -33,7 +33,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); - let task_part = def.task_enum.as_ref().map(|_| quote::quote!(Task,)); + let task_part = def.tasks.as_ref().map(|_| quote::quote!(Task,)); let storage_part = (!def.storages.is_empty()).then(|| quote::quote!(Storage,)); @@ -85,7 +85,7 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { let call_part_v2 = def.call.as_ref().map(|_| quote::quote!(+ Call)); - let task_part_v2 = def.task_enum.as_ref().map(|_| quote::quote!(+ Task)); + let task_part_v2 = def.tasks.as_ref().map(|_| quote::quote!(+ Task)); let storage_part_v2 = (!def.storages.is_empty()).then(|| quote::quote!(+ Storage)); diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 5036f691690f663939359d3596cd2c582571f1b2..c9a150effccbee6ecf7ebb308837556828c1c071 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -126,11 +126,11 @@ impl Def { }, Some(PalletAttr::RuntimeCall(cw, span)) if call.is_none() => call = Some(call::CallDef::try_from(span, index, item, dev_mode, cw)?), - Some(PalletAttr::Tasks(_)) if tasks.is_none() => { + Some(PalletAttr::Tasks(span)) if tasks.is_none() => { let item_tokens = item.to_token_stream(); // `TasksDef::parse` needs to know if attr was provided so we artificially // re-insert it here - tasks = Some(syn::parse2::(quote::quote! { + tasks = Some(syn::parse2::(quote::quote_spanned! { span => #[pallet::tasks_experimental] #item_tokens })?); @@ -404,6 +404,9 @@ impl Def { if let Some(extra_constants) = &self.extra_constants { instances.extend_from_slice(&extra_constants.instances[..]); } + if let Some(task_enum) = &self.task_enum { + instances.push(task_enum.instance_usage.clone()); + } let mut errors = instances.into_iter().filter_map(|instances| { if instances.has_instance == self.config.has_instance { diff --git a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs index ed860849a4db438d0cf13a5e866733f3d1936da4..5bff64643df12bf8c5201ae1eca2ca1d36a4224a 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/tasks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/tasks.rs @@ -25,8 +25,8 @@ use crate::assert_parse_error_matches; #[cfg(test)] use crate::pallet::parse::tests::simulate_manifest_dir; +use super::helper; use derive_syn_parse::Parse; -use frame_support_procedural_tools::generate_access_from_frame_or_crate; use proc_macro2::TokenStream as TokenStream2; use quote::{quote, ToTokens}; use syn::{ @@ -34,8 +34,8 @@ use syn::{ parse2, spanned::Spanned, token::{Bracket, Paren, PathSep, Pound}, - Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, Path, PathArguments, - Result, TypePath, + Error, Expr, Ident, ImplItem, ImplItemFn, ItemEnum, ItemImpl, LitInt, PathArguments, Result, + TypePath, }; pub mod keywords { @@ -57,8 +57,6 @@ pub struct TasksDef { pub tasks_attr: Option, pub tasks: Vec, pub item_impl: ItemImpl, - /// Path to `frame_support` - pub scrate: Path, pub enum_ident: Ident, pub enum_arguments: PathArguments, } @@ -114,11 +112,7 @@ impl syn::parse::Parse for TasksDef { let enum_ident = last_seg.ident.clone(); let enum_arguments = last_seg.arguments.clone(); - // We do this here because it would be improper to do something fallible like this at - // the expansion phase. Fallible stuff should happen during parsing. - let scrate = generate_access_from_frame_or_crate("frame-support")?; - - Ok(TasksDef { tasks_attr, item_impl, tasks, scrate, enum_ident, enum_arguments }) + Ok(TasksDef { tasks_attr, item_impl, tasks, enum_ident, enum_arguments }) } } @@ -146,12 +140,11 @@ pub type PalletTaskEnumAttr = PalletTaskAttr; /// Parsing for a manually-specified (or auto-generated) task enum, optionally including the /// attached `#[pallet::task_enum]` attribute. -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TaskEnumDef { pub attr: Option, pub item_enum: ItemEnum, - pub scrate: Path, - pub type_use_generics: TokenStream2, + pub instance_usage: helper::InstanceUsage, } impl syn::parse::Parse for TaskEnumDef { @@ -163,13 +156,10 @@ impl syn::parse::Parse for TaskEnumDef { None => None, }; - // We do this here because it would be improper to do something fallible like this at - // the expansion phase. Fallible stuff should happen during parsing. - let scrate = generate_access_from_frame_or_crate("frame-support")?; - - let type_use_generics = quote!(T); + let instance_usage = + helper::check_type_def_gen(&item_enum.generics, item_enum.ident.span())?; - Ok(TaskEnumDef { attr, item_enum, scrate, type_use_generics }) + Ok(TaskEnumDef { attr, item_enum, instance_usage }) } } @@ -896,7 +886,7 @@ fn test_parse_task_enum_def_non_task_name() { simulate_manifest_dir("../../examples/basic", || { parse2::(quote! { #[pallet::task_enum] - pub enum Something { + pub enum Something { Foo } }) @@ -921,7 +911,7 @@ fn test_parse_task_enum_def_missing_attr_allowed() { fn test_parse_task_enum_def_missing_attr_alternate_name_allowed() { simulate_manifest_dir("../../examples/basic", || { parse2::(quote! { - pub enum Foo { + pub enum Foo { Red, } }) @@ -951,7 +941,7 @@ fn test_parse_task_enum_def_wrong_item() { assert_parse_error_matches!( parse2::(quote! { #[pallet::task_enum] - pub struct Something; + pub struct Something; }), "expected `enum`" ); diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 351ba3a15efcc9db4ce684190aa53268f068813b..3678f958980abc0f43b3bc599324e91e7b1aa90e 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -26,7 +26,9 @@ use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::SignedExtension, + traits::{ + Dispatchable, ExtensionPostDispatchWeightHandler, RefundWeight, TransactionExtension, + }, DispatchError, RuntimeDebug, }; use sp_weights::Weight; @@ -236,14 +238,23 @@ impl<'a> OneOrMany for &'a [DispatchClass] { /// A bundle of static information collected from the `#[pallet::weight]` attributes. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DispatchInfo { - /// Weight of this transaction. - pub weight: Weight, + /// Weight of this transaction's call. + pub call_weight: Weight, + /// Weight of this transaction's extension. + pub extension_weight: Weight, /// Class of this transaction. pub class: DispatchClass, /// Does this transaction pay fees. pub pays_fee: Pays, } +impl DispatchInfo { + /// Returns the weight used by this extrinsic's extension and call when applied. + pub fn total_weight(&self) -> Weight { + self.call_weight.saturating_add(self.extension_weight) + } +} + /// A `Dispatchable` function (aka transaction) that can carry some static information along with /// it, using the `#[pallet::weight]` attribute. pub trait GetDispatchInfo { @@ -268,7 +279,8 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc .calc_actual_weight(info) } -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default +/// weight. pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { match result { Ok(post_info) => post_info, @@ -290,15 +302,15 @@ pub struct PostDispatchInfo { impl PostDispatchInfo { /// Calculate how much (if any) weight was not used by the `Dispatchable`. pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { - info.weight - self.calc_actual_weight(info) + info.total_weight() - self.calc_actual_weight(info) } /// Calculate how much weight was actually spent by the `Dispatchable`. pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { if let Some(actual_weight) = self.actual_weight { - actual_weight.min(info.weight) + actual_weight.min(info.total_weight()) } else { - info.weight + info.total_weight() } } @@ -368,39 +380,28 @@ where } /// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic +impl> GetDispatchInfo + for UncheckedExtrinsic where - Call: GetDispatchInfo, - Extra: SignedExtension, + Call: GetDispatchInfo + Dispatchable, { fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() + let mut info = self.function.get_dispatch_info(); + info.extension_weight = self.extension_weight(); + info } } /// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic +impl> GetDispatchInfo + for CheckedExtrinsic where Call: GetDispatchInfo, { fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo - for sp_runtime::testing::TestXt -{ - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_parts(self.encode().len() as _, 0), - pays_fee: Pays::Yes, - class: self.call.get_dispatch_info().class, - } + let mut info = self.function.get_dispatch_info(); + info.extension_weight = self.extension_weight(); + info } } @@ -579,6 +580,28 @@ impl ClassifyDispatch for (Weight, DispatchClass, Pays) { } } +impl RefundWeight for PostDispatchInfo { + fn refund(&mut self, weight: Weight) { + if let Some(actual_weight) = self.actual_weight.as_mut() { + actual_weight.saturating_reduce(weight); + } + } +} + +impl ExtensionPostDispatchWeightHandler for PostDispatchInfo { + fn set_extension_weight(&mut self, info: &DispatchInfo) { + let actual_weight = self + .actual_weight + .unwrap_or(info.call_weight) + .saturating_add(info.extension_weight); + self.actual_weight = Some(actual_weight); + } +} + +impl ExtensionPostDispatchWeightHandler<()> for PostDispatchInfo { + fn set_extension_weight(&mut self, _: &()) {} +} + // TODO: Eventually remove these impl ClassifyDispatch for u64 { @@ -752,6 +775,19 @@ mod weight_tests { pub fn f21(_origin: OriginFor) -> DispatchResult { unimplemented!(); } + + #[pallet::weight(1000)] + pub fn f99(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + + #[pallet::weight(1000)] + pub fn f100(_origin: OriginFor) -> DispatchResultWithPostInfo { + Ok(crate::dispatch::PostDispatchInfo { + actual_weight: Some(Weight::from_parts(500, 0)), + pays_fee: Pays::Yes, + }) + } } pub mod pallet_prelude { @@ -801,57 +837,61 @@ mod weight_tests { fn weights_are_correct() { // #[pallet::weight(1000)] let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((1000, DispatchClass::Mandatory))] let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((1000, Pays::No))] let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); // #[pallet::weight((1000, DispatchClass::Operational, Pays::No))] let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(1000, 0)); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); // #[pallet::weight(((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes))] let info = Call::::f11 { a: 13, eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(150, 0)); // 13*10 + 20 + assert_eq!(info.total_weight(), Weight::from_parts(150, 0)); // 13*10 + 20 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight((0, DispatchClass::Operational, Pays::Yes))] let info = Call::::f12 { a: 10, eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::zero()); + assert_eq!(info.total_weight(), Weight::zero()); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight(T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + // Weight::from_all(10_000))] let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(12300, 10000)); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.total_weight(), Weight::from_parts(12300, 10000)); // 100*3 + 1000*2 + 10_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[pallet::weight(T::DbWeight::get().reads_writes(6, 5) + Weight::from_all(40_000))] let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_parts(45600, 40000)); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.total_weight(), Weight::from_parts(45600, 40000)); // 100*6 + 1000*5 + 40_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); } #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!( extract_actual_weight(&Ok(from_actual_ref_time(Some(7))), &pre), Weight::from_parts(7, 0) @@ -871,7 +911,11 @@ mod weight_tests { #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!( extract_actual_weight(&Ok(from_actual_ref_time(Some(1250))), &pre), Weight::from_parts(1000, 0) @@ -887,7 +931,11 @@ mod weight_tests { #[test] fn extract_actual_pays_fee_works() { - let pre = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre = DispatchInfo { + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), + ..Default::default() + }; assert_eq!(extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(7))), &pre), Pays::Yes); assert_eq!( extract_actual_pays_fee(&Ok(from_actual_ref_time(Some(1000)).into()), &pre), @@ -920,7 +968,8 @@ mod weight_tests { ); let pre = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), pays_fee: Pays::No, ..Default::default() }; @@ -931,6 +980,26 @@ mod weight_tests { Pays::No ); } + + #[test] + fn weight_accrue_works() { + let mut post_dispatch = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(1100, 25)), + pays_fee: Pays::Yes, + }; + post_dispatch.refund(Weight::from_parts(100, 15)); + assert_eq!( + post_dispatch, + PostDispatchInfo { + actual_weight: Some(Weight::from_parts(1000, 10)), + pays_fee: Pays::Yes + } + ); + + let mut post_dispatch = PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }; + post_dispatch.refund(Weight::from_parts(100, 15)); + assert_eq!(post_dispatch, PostDispatchInfo { actual_weight: None, pays_fee: Pays::Yes }); + } } #[cfg(test)] @@ -1107,3 +1176,407 @@ mod per_dispatch_class_tests { ); } } + +#[cfg(test)] +mod test_extensions { + use codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{ + impl_tx_ext_default, + traits::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, PostDispatchInfoOf, + TransactionExtension, + }, + transaction_validity::TransactionValidityError, + }; + use sp_weights::Weight; + + use super::{DispatchResult, PostDispatchInfo}; + + /// Test extension that refunds half its cost if the preset inner flag is set. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct HalfCostIf(pub bool); + + impl TransactionExtension for HalfCostIf { + const IDENTIFIER: &'static str = "HalfCostIf"; + type Implicit = (); + type Val = (); + type Pre = bool; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(100, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + if pre { + Ok(Weight::from_parts(50, 0)) + } else { + Ok(Weight::zero()) + } + } + impl_tx_ext_default!(RuntimeCall; validate); + } + + /// Test extension that refunds its cost if the actual post dispatch weight up until this point + /// in the extension pipeline is less than the preset inner `ref_time` amount. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct FreeIfUnder(pub u64); + + impl TransactionExtension for FreeIfUnder + where + RuntimeCall: Dispatchable, + { + const IDENTIFIER: &'static str = "FreeIfUnder"; + type Implicit = (); + type Val = (); + type Pre = u64; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(200, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + if let Some(actual) = post_info.actual_weight { + if pre > actual.ref_time() { + return Ok(Weight::from_parts(200, 0)); + } + } + Ok(Weight::zero()) + } + impl_tx_ext_default!(RuntimeCall; validate); + } + + /// Test extension that sets its actual post dispatch `ref_time` weight to the preset inner + /// amount. + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] + pub struct ActualWeightIs(pub u64); + + impl TransactionExtension for ActualWeightIs { + const IDENTIFIER: &'static str = "ActualWeightIs"; + type Implicit = (); + type Val = (); + type Pre = u64; + + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(300, 0) + } + + fn prepare( + self, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(self.0) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Ok(Weight::from_parts(300u64.saturating_sub(pre), 0)) + } + impl_tx_ext_default!(RuntimeCall; validate); + } +} + +#[cfg(test)] +// Do not complain about unused `dispatch` and `dispatch_aux`. +#[allow(dead_code)] +mod extension_weight_tests { + use crate::assert_ok; + + use super::*; + use sp_core::parameter_types; + use sp_runtime::{ + generic::{self, ExtrinsicFormat}, + traits::{Applyable, BlakeTwo256, DispatchTransaction, TransactionExtension}, + }; + use sp_weights::RuntimeDbWeight; + use test_extensions::{ActualWeightIs, FreeIfUnder, HalfCostIf}; + + use super::weight_tests::frame_system; + use frame_support::construct_runtime; + + pub type TxExtension = (HalfCostIf, FreeIfUnder, ActualWeightIs); + pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + pub type Header = generic::Header; + pub type Block = generic::Block; + pub type AccountId = u64; + pub type Balance = u32; + pub type BlockNumber = u32; + + construct_runtime!( + pub enum ExtRuntime { + System: frame_system, + } + ); + + impl frame_system::Config for ExtRuntime { + type Block = Block; + type AccountId = AccountId; + type Balance = Balance; + type BaseCallFilter = crate::traits::Everything; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type RuntimeTask = RuntimeTask; + type DbWeight = DbWeight; + type PalletInfo = PalletInfo; + } + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + pub struct ExtBuilder {} + + impl Default for ExtBuilder { + fn default() -> Self { + Self {} + } + } + + impl ExtBuilder { + pub fn build(self) -> sp_io::TestExternalities { + let mut ext = sp_io::TestExternalities::new(Default::default()); + ext.execute_with(|| {}); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + }) + } + } + + #[test] + fn no_post_dispatch_with_no_refund() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f99 {}); + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1500), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); + let res = call.dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + assert!(post_info.actual_weight.is_none()); + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + assert!(post_info.actual_weight.is_none()); + }); + } + + #[test] + fn no_post_dispatch_refunds_when_dispatched() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f99 {}); + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(100), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + let post_info = + ext.dispatch_transaction(Some(0).into(), call, &info, 0).unwrap().unwrap(); + // 1000 call weight + 50 + 200 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1250, 0))); + }); + } + + #[test] + fn post_dispatch_with_refunds() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f100 {}); + // First testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(2000), ActualWeightIs(0)); + let uxt = UncheckedExtrinsic::new_signed(call.clone(), 0, (), ext.clone()); + assert_eq!(uxt.extension_weight(), Weight::from_parts(600, 0)); + + let mut info = call.get_dispatch_info(); + assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 0 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(600, 0))); + + // Second testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1100), ActualWeightIs(200)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 200 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1000, 0))); + + // Third testcase + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(1060), ActualWeightIs(200)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 50 + 0 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(750, 0))); + + // Fourth testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(100), ActualWeightIs(300)); + let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); + let res = call.clone().dispatch(Some(0).into()); + let mut post_info = res.unwrap(); + // 500 actual call weight + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); + // add the 600 worst case extension weight + post_info.set_extension_weight(&info); + // extension weight should be refunded + assert_ok!(>::post_dispatch( + pre, + &info, + &mut post_info, + 0, + &Ok(()), + )); + // 500 actual call weight + 100 + 200 + 300 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1100, 0))); + }); + } + + #[test] + fn checked_extrinsic_apply() { + ExtBuilder::default().build_and_execute(|| { + let call = RuntimeCall::System(frame_system::Call::::f100 {}); + // First testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(2000), ActualWeightIs(0)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext.clone()), + function: call.clone(), + }; + assert_eq!(xt.extension_weight(), Weight::from_parts(600, 0)); + let mut info = call.get_dispatch_info(); + assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); + info.extension_weight = ext.weight(&call); + assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 0 + 0 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(600, 0))); + + // Second testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(1100), ActualWeightIs(200)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 200 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1000, 0))); + + // Third testcase + let ext: TxExtension = (HalfCostIf(true), FreeIfUnder(1060), ActualWeightIs(200)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 50 + 0 + 200 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(750, 0))); + + // Fourth testcase + let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(100), ActualWeightIs(300)); + let xt = CheckedExtrinsic { + format: ExtrinsicFormat::Signed(0, ext), + function: call.clone(), + }; + let post_info = xt.apply::(&info, 0).unwrap().unwrap(); + // 500 actual call weight + 100 + 200 + 300 + assert_eq!(post_info.actual_weight, Some(Weight::from_parts(1100, 0))); + }); + } +} diff --git a/substrate/frame/support/src/generate_genesis_config.rs b/substrate/frame/support/src/generate_genesis_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc21e76c74273b3879ccba8edbaf039197cdb951 --- /dev/null +++ b/substrate/frame/support/src/generate_genesis_config.rs @@ -0,0 +1,951 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Helper macro allowing to construct JSON representation of partially initialized structs. + +use serde_json::Value; +extern crate alloc; +use alloc::{borrow::Cow, format, string::String}; + +/// Represents the initialization method of a field within a struct. +/// +/// This enum provides information about how it was initialized and the field name (as a `String`). +/// +/// Intended to be used in `build_struct_json_patch` macro. +#[derive(Debug)] +pub enum InitializedField<'a> { + /// The field was partially initialized (e.g., specific fields within the struct were set + /// manually). + Partial(Cow<'a, str>), + /// The field was fully initialized (e.g., using `new()` or `default()` like methods). + Full(Cow<'a, str>), +} + +impl<'a> InitializedField<'a> { + /// Returns a name of the field. + pub fn get_name(&'a self) -> &'a str { + match self { + Self::Partial(s) | Self::Full(s) => s, + } + } + + /// Injects a prefix to the field name. + pub fn add_prefix(&mut self, prefix: &str) { + match self { + Self::Partial(s) | Self::Full(s) => *s = format!("{prefix}.{s}").into(), + }; + } + + /// Creates new partial field instiance. + pub fn partial(s: &'a str) -> Self { + Self::Partial(s.into()) + } + + /// Creates new full field instiance. + pub fn full(s: &'a str) -> Self { + Self::Full(s.into()) + } +} + +impl PartialEq for InitializedField<'_> { + fn eq(&self, other: &String) -> bool { + #[inline] + /// We need to respect the `camelCase` naming for field names. This means that + /// `"camelCaseKey"` should be considered equal to `"camel_case_key"`. This + /// function implements this comparison. + fn compare_keys(ident_chars: core::str::Chars, camel_chars: core::str::Chars) -> bool { + ident_chars + .filter(|c| *c != '_') + .map(|c| c.to_ascii_uppercase()) + .eq(camel_chars.map(|c| c.to_ascii_uppercase())) + } + match self { + InitializedField::Partial(field_name) | InitializedField::Full(field_name) => + field_name == other || compare_keys(field_name.chars(), other.chars()), + } + } +} + +/// Recursively removes keys from provided `json_value` object, retaining only specified keys. +/// +/// This function modifies the provided `json_value` in-place, keeping only the keys listed in +/// `keys_to_retain`. The keys are matched recursively by combining the current key with +/// the `current_root`, allowing for nested field retention. +/// +/// Keys marked as `Full`, are retained as-is. For keys marked as `Partial`, the +/// function recurses into nested objects to retain matching subfields. +/// +/// Function respects the `camelCase` serde_json attribute for structures. This means that +/// `"camelCaseKey"` key will be retained in JSON blob if `"camel_case_key"` exists in +/// `keys_to_retain`. +/// +/// Intended to be used from `build_struct_json_patch` macro. +pub fn retain_initialized_fields( + json_value: &mut Value, + keys_to_retain: &[InitializedField], + current_root: String, +) { + if let serde_json::Value::Object(ref mut map) = json_value { + map.retain(|key, value| { + let current_key = + if current_root.is_empty() { key.clone() } else { format!("{current_root}.{key}") }; + match keys_to_retain.iter().find(|key| **key == current_key) { + Some(InitializedField::Full(_)) => true, + Some(InitializedField::Partial(_)) => { + retain_initialized_fields(value, keys_to_retain, current_key.clone()); + true + }, + None => false, + } + }) + } +} + +/// Creates a JSON patch for given `struct_type`, supporting recursive field initialization. +/// +/// This macro creates a default `struct_type`, initializing specified fields (which can be nested) +/// with the provided values. Any fields not explicitly given are initialized with their default +/// values. The macro then serializes the fully initialized structure into a JSON blob, retaining +/// only the fields that were explicitly provided, either partially or fully initialized. +/// +/// Using this macro prevents errors from manually creating JSON objects, such as typos or +/// inconsistencies with the `struct_type` structure, by relying on the actual +/// struct definition. This ensures the generated JSON is valid and reflects any future changes +/// to the structure. +/// +/// # Example +/// +/// ```rust +/// use frame_support::build_struct_json_patch; +/// #[derive(Default, serde::Serialize, serde::Deserialize)] +/// #[serde(rename_all = "camelCase")] +/// struct RuntimeGenesisConfig { +/// a_field: u32, +/// b_field: B, +/// c_field: u32, +/// } +/// +/// #[derive(Default, serde::Serialize, serde::Deserialize)] +/// #[serde(rename_all = "camelCase")] +/// struct B { +/// i_field: u32, +/// j_field: u32, +/// } +/// impl B { +/// fn new() -> Self { +/// Self { i_field: 0, j_field: 2 } +/// } +/// } +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// a_field: 66, +/// }), +/// serde_json::json!({ +/// "aField": 66, +/// }) +/// ); +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// //"partial" initialization of `b_field` +/// b_field: B { +/// i_field: 2, +/// } +/// }), +/// serde_json::json!({ +/// "bField": {"iField": 2} +/// }) +/// ); +/// +/// assert_eq!( +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// a_field: 66, +/// //"full" initialization of `b_field` +/// b_field: B::new() +/// }), +/// serde_json::json!({ +/// "aField": 66, +/// "bField": {"iField": 0, "jField": 2} +/// }) +/// ); +/// ``` +/// +/// In this example: +/// ```ignore +/// build_struct_json_patch! ( RuntimeGenesisConfig { +/// b_field: B { +/// i_field: 2, +/// } +/// }), +/// ``` +/// `b_field` is partially initialized, it will be expanded to: +/// ```ignore +/// RuntimeGenesisConfig { +/// b_field { +/// i_field: 2, +/// ..Default::default() +/// }, +/// ..Default::default() +/// } +/// ``` +/// While all other fields are initialized with default values. The macro serializes this, retaining +/// only the provided fields. +#[macro_export] +macro_rules! build_struct_json_patch { + ( + $($struct_type:ident)::+ { $($tail:tt)* } + ) => { + { + let mut keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + #[allow(clippy::needless_update)] + let struct_instance = $crate::build_struct_json_patch!($($struct_type)::+, keys @ { $($tail)* }); + let mut json_value = + $crate::__private::serde_json::to_value(struct_instance).expect("serialization to json should work. qed"); + $crate::generate_genesis_config::retain_initialized_fields(&mut json_value, &keys, Default::default()); + json_value + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ { $($tail:tt)* }) => { + $($struct_type)::+ { + ..$crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $keyi:ident : $value:tt } ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + $all_keys.push( + $crate::generate_genesis_config::InitializedField::full(concat!(stringify!($key), ".", stringify!($keyi))) + ); + $($type)::+ { + $keyi:$value, + ..Default::default() + } + }, + ..Default::default() + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* } ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + let mut inner_keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let value = $crate::build_struct_json_patch!($($type)::+, inner_keys @ { $($body)* }); + for i in inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.extend(inner_keys); + value + }, + ..Default::default() + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $($type:ident)::+ { $($body:tt)* }, $($tail:tt)* ) => { + $($struct_type)::+ { + $key : { + $all_keys.push($crate::generate_genesis_config::InitializedField::partial(stringify!($key))); + let mut inner_keys = $crate::__private::Vec::<$crate::generate_genesis_config::InitializedField>::default(); + let value = $crate::build_struct_json_patch!($($type)::+, inner_keys @ { $($body)* }); + for i in inner_keys.iter_mut() { + i.add_prefix(stringify!($key)); + }; + $all_keys.extend(inner_keys); + value + }, + .. $crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr, $($tail:tt)* ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $value + }, + ..$crate::build_struct_json_patch!($($struct_type)::+, $all_keys @ $($tail)*) + } + }; + ($($struct_type:ident)::+, $all_keys:ident @ $key:ident: $value:expr ) => { + $($struct_type)::+ { + $key: { + $all_keys.push($crate::generate_genesis_config::InitializedField::full(stringify!($key))); + $value + }, + ..Default::default() + } + }; + + ($($struct_type:ident)::+, $all_keys:ident @ $(,)?) => { + $($struct_type)::+ { ..Default::default() } + }; +} + +#[cfg(test)] +mod test { + mod nested_mod { + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + pub struct InsideMod { + pub a: u32, + pub b: u32, + } + + pub mod nested_mod2 { + pub mod nested_mod3 { + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + pub struct InsideMod3 { + pub a: u32, + pub b: u32, + pub s: super::super::InsideMod, + } + } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct TestStruct { + a: u32, + b: u32, + s: S, + s3: S3, + t3: S3, + i: Nested1, + e: E, + t: nested_mod::InsideMod, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3, + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct S { + x: u32, + } + + impl S { + fn new(c: u32) -> Self { + Self { x: c } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct E(u8); + + #[derive(Default, Debug, serde::Serialize, serde::Deserialize)] + enum SomeEnum { + #[default] + A, + B(T), + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct S3 { + x: u32, + y: u32, + z: u32, + } + + impl S3 { + fn new(c: u32) -> Self { + Self { x: c, y: c, z: c } + } + + fn new_from_s(s: S) -> Self { + Self { x: s.x, ..Default::default() } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested3 { + a: u32, + b: u32, + s: S, + v: Vec<(u32, u32, u32, SomeEnum)>, + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested2 { + a: u32, + iii: Nested3, + v: Vec, + s3: S3, + } + + impl Nested2 { + fn new(a: u32) -> Self { + Nested2 { + a, + v: vec![a, a, a], + iii: Nested3 { a, b: a, ..Default::default() }, + s3: S3 { x: a, ..Default::default() }, + } + } + } + + #[derive(Debug, Default, serde::Serialize, serde::Deserialize)] + struct Nested1 { + a: u32, + ii: Nested2, + } + + macro_rules! test { + ($($struct:ident)::+ { $($v:tt)* }, { $($j:tt)* } ) => {{ + println!("--"); + let expected = serde_json::json!({ $($j)* }); + println!("json: {}", serde_json::to_string_pretty(&expected).unwrap()); + let value = build_struct_json_patch!($($struct)::+ { $($v)* }); + println!("gc: {}", serde_json::to_string_pretty(&value).unwrap()); + assert_eq!(value, expected); + }}; + } + + #[test] + fn test_generate_config_macro() { + let t = 5; + const C: u32 = 5; + test!(TestStruct { b: 5 }, { "b": 5 }); + #[allow(unused_braces)] + { + test!(TestStruct { b: { 4 + 34 } } , { "b": 38 }); + } + test!(TestStruct { s: S { x: 5 } }, { "s": { "x": 5 } }); + test!( + TestStruct { s: S::new(C) }, + { + "s": { "x": 5 } + } + ); + test!( + TestStruct { s: S { x: t } }, + { + "s": { "x": t } + } + ); + test!( + TestStruct { + b: 5, + s: S { x: t } + }, + { + "b": 5, + "s": { "x": 5 } + } + ); + test!( + TestStruct { s: S::new(C), b: 5 }, + { + "s": { "x": 5 }, "b": 5 + } + ); + test!( + TestStruct { s3: S3 { x: t } }, + { + "s3": { "x": 5 } + } + ); + test!( + TestStruct { + s3: S3 { x: t, y: 2 } + }, + { + "s3": { "x": 5, "y": 2 } + } + ); + // // + test!( + TestStruct { + s3: S3 { x: t, y: 2 }, + t3: S3 { x: 2 } + }, + { + "s3": { "x": t, "y": 2 }, + "t3": { "x": 2 } + } + + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { iii: Nested3 { a: 2 } } + } + } + , + { + "i": { + "ii": { "iii": { "a": 2 } } + } + } + + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) } + } + } + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5} } + } + } + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { s: S::new(C), a: 2 } + }, + a: 44 + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5} } + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2::new(66), + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "a": 66, + "s3": { "x":66, "y": 0, "z": 0 }, + "iii": { "a": 66,"b":66, "s": { "x": 0 }, "v": Vec::::default() }, + "v": vec![66,66,66] + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + a: 66, + s3: S3 { x: 66 }, + iii: Nested3 { + a: 66,b:66 + }, + v: vec![66,66,66] + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4 + }, + { + "i": { + "ii": { + "a": 66, + "s3": { "x":66, }, + "iii": { "a": 66,"b":66, }, + "v": vec![66,66,66] + }, + "a": 44 + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) }, + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + s3: S3::new(5), + iii: Nested3 { a: 2, s: S::new(C) }, + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + "s3": {"x": 5, "y": 5, "z": 5 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + test!( + TestStruct { + a: 3, + s3: S3 { x: 5 }, + b: 4, + i: Nested1 { + ii: Nested2 { + iii: Nested3 { a: 2, s: S::new(C) }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + } + }, + { + "i": { + "ii": { + "iii": { "a": 2, "s": { "x": 5 } }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + let i = [0u32, 1u32, 2u32]; + test!( + TestStruct { + i: Nested1 { + ii: Nested2 { + iii: Nested3 { + a: 2, + s: S::new(C), + v: i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + }, + a: 3, + s3: S3 { x: 5 }, + b: 4, + }, + + { + "i": { + "ii": { + "iii": { + "a": 2, + "s": { "x": 5 }, + "v": i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + "a": 3, + "s3": { "x": 5 }, + "b": 4 + } + ); + } + + #[test] + fn test_generate_config_macro_with_nested_mods() { + test!( + TestStruct { t: nested_mod::InsideMod { a: 32 } }, + { + "t" : { "a": 32 } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { a: 32 } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32 } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3 { + a: 32, + s: nested_mod::InsideMod { a: 34 }, + } + }, + { + "t" : { "a": 32 }, + "u" : { "a": 32, "s": { "a": 34 } } + } + ); + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3::default() + }, + { + "t" : { "a": 32 }, + "u" : { "a": 0, "b": 0, "s": { "a": 0, "b": 0} } + } + ); + + let i = [0u32, 1u32, 2u32]; + const C: u32 = 5; + test!( + TestStruct { + t: nested_mod::InsideMod { a: 32 }, + u: nested_mod::nested_mod2::nested_mod3::InsideMod3::default(), + i: Nested1 { + ii: Nested2 { + iii: Nested3 { + a: 2, + s: S::new(C), + v: i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + s3: S3::new_from_s(S { x: 4 }) + }, + a: 44, + }, + }, + { + "t" : { "a": 32 }, + "u" : { "a": 0, "b": 0, "s": { "a": 0, "b": 0} } , + "i": { + "ii": { + "iii": { + "a": 2, + "s": { "x": 5 }, + "v": i.iter() + .map(|x| (*x, 2 * x, 100 + x, SomeEnum::::A)) + .collect::>(), + }, + "s3": {"x": 4, "y": 0, "z": 0 } + }, + "a" : 44, + }, + } + ); + } +} + +#[cfg(test)] +mod retain_keys_test { + use super::*; + use serde_json::json; + + macro_rules! check_initialized_field_eq_cc( + ( $s:literal ) => { + let field = InitializedField::full($s); + let cc = inflector::cases::camelcase::to_camel_case($s); + println!("field: {:?}, cc: {}", field, cc); + assert_eq!(field,cc); + } ; + ( &[ $f:literal $(, $r:literal)* ]) => { + let field = InitializedField::full( + concat!( $f $(,".",$r)+ ) + ); + let cc = [ $f $(,$r)+ ].into_iter() + .map(|s| inflector::cases::camelcase::to_camel_case(s)) + .collect::>() + .join("."); + println!("field: {:?}, cc: {}", field, cc); + assert_eq!(field,cc); + } ; + ); + + #[test] + fn test_initialized_field_eq_cc_string() { + check_initialized_field_eq_cc!("a_"); + check_initialized_field_eq_cc!("abc"); + check_initialized_field_eq_cc!("aBc"); + check_initialized_field_eq_cc!("aBC"); + check_initialized_field_eq_cc!("ABC"); + check_initialized_field_eq_cc!("2abs"); + check_initialized_field_eq_cc!("2Abs"); + check_initialized_field_eq_cc!("2ABs"); + check_initialized_field_eq_cc!("2aBs"); + check_initialized_field_eq_cc!("AlreadyCamelCase"); + check_initialized_field_eq_cc!("alreadyCamelCase"); + check_initialized_field_eq_cc!("C"); + check_initialized_field_eq_cc!("1a"); + check_initialized_field_eq_cc!("_1a"); + check_initialized_field_eq_cc!("a_b"); + check_initialized_field_eq_cc!("_a_b"); + check_initialized_field_eq_cc!("a___b"); + check_initialized_field_eq_cc!("__a_b"); + check_initialized_field_eq_cc!("_a___b_C"); + check_initialized_field_eq_cc!("__A___B_C"); + check_initialized_field_eq_cc!(&["a_b", "b_c"]); + check_initialized_field_eq_cc!(&["al_pha", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "_a___b_C"]); + check_initialized_field_eq_cc!(&["first_field", "al_pha_", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "__2nd_field", "_a___b_C"]); + check_initialized_field_eq_cc!(&["al_pha_", "__2nd3and_field", "_a___b_C"]); + check_initialized_field_eq_cc!(&["_a1", "_a2", "_a3_"]); + } + + #[test] + fn test01() { + let mut v = json!({ + "a":1 + }); + let e = v.clone(); + retain_initialized_fields(&mut v, &[InitializedField::full("a")], String::default()); + assert_eq!(e, v); + } + + #[test] + fn test02() { + let mut v = json!({ + "a":1 + }); + retain_initialized_fields(&mut v, &[InitializedField::full("b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test03() { + let mut v = json!({}); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test04() { + let mut v = json!({}); + retain_initialized_fields(&mut v, &[InitializedField::full("b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test05() { + let mut v = json!({ + "a":1 + }); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test06() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + retain_initialized_fields(&mut v, &[], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test07() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + retain_initialized_fields(&mut v, &[InitializedField::full("a.b")], String::default()); + assert_eq!(Value::Object(Default::default()), v); + } + + #[test] + fn test08() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + let e = json!({ + "a": { + "b":1, + } + }); + retain_initialized_fields( + &mut v, + &[InitializedField::partial("a"), InitializedField::full("a.b")], + String::default(), + ); + assert_eq!(e, v); + } + + #[test] + fn test09() { + let mut v = json!({ + "a": { + "b":1, + "c":2 + } + }); + let e = json!({ + "a": { + "b":1, + "c":2, + } + }); + retain_initialized_fields(&mut v, &[InitializedField::full("a")], String::default()); + assert_eq!(e, v); + } +} diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 1f2ec71b191f7b965f03ddb55af4127cbb010cee..2e7ea0a07d7d5d497be8926ad4310e6c3e3144a8 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -53,6 +53,7 @@ pub mod __private { pub use paste; pub use scale_info; pub use serde; + pub use serde_json; pub use sp_core::{Get, OpaqueMetadata, Void}; pub use sp_crypto_hashing_proc_macro; pub use sp_inherents; @@ -63,7 +64,8 @@ pub mod __private { #[cfg(feature = "std")] pub use sp_runtime::{bounded_btree_map, bounded_vec}; pub use sp_runtime::{ - traits::Dispatchable, DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, + traits::{AsSystemOriginSigner, AsTransactionAuthorizedOrigin, Dispatchable}, + DispatchError, RuntimeDebug, StateVersion, TransactionOutcome, }; #[cfg(feature = "std")] pub use sp_state_machine::BasicExternalities; @@ -1695,8 +1697,8 @@ pub mod pallet_macros { /// [`ValidateUnsigned`](frame_support::pallet_prelude::ValidateUnsigned) for /// type `Pallet`, and some optional where clause. /// - /// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used - /// to add some specific logic for transaction validation. + /// NOTE: There is also the [`sp_runtime::traits::TransactionExtension`] trait that can be + /// used to add some specific logic for transaction validation. /// /// ## Macro expansion /// @@ -2587,9 +2589,12 @@ sp_core::generate_feature_enabled_macro!(try_runtime_enabled, feature = "try-run sp_core::generate_feature_enabled_macro!(try_runtime_or_std_enabled, any(feature = "try-runtime", feature = "std"), $); sp_core::generate_feature_enabled_macro!(try_runtime_and_std_not_enabled, all(not(feature = "try-runtime"), not(feature = "std")), $); -// Helper for implementing GenesisBuilder runtime API +/// Helper for implementing GenesisBuilder runtime API pub mod genesis_builder_helper; +/// Helper for generating the `RuntimeGenesisConfig` instance for presets. +pub mod generate_genesis_config; + #[cfg(test)] mod test { // use super::*; diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 905d6143e4f1c9ba97c73963fd97b165e51adb06..3fdf8d6edc9585d0027130e90c768da4fea03fdb 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -825,14 +825,14 @@ impl SteppedMigrations for T { fn nth_id(n: u32) -> Option> { n.is_zero() - .then_some(T::id().encode()) + .then(|| T::id().encode()) .defensive_proof("nth_id should only be called with n==0") } fn nth_max_steps(n: u32) -> Option> { // It should be generally fine to call with n>0, but the code should not attempt to. n.is_zero() - .then_some(T::max_steps()) + .then(|| T::max_steps()) .defensive_proof("nth_max_steps should only be called with n==0") } diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 631225b9a327c6ff467db9815b9539215ad39ca7..635036d488dfb63f4758cc847f4064de3fe25f8b 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -60,10 +60,10 @@ pub use misc::{ AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, - SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, - WrapperKeepOpaque, WrapperOpaque, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, InherentBuilder, + IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, + PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, TypedGet, + UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; diff --git a/substrate/frame/support/src/traits/dispatch.rs b/substrate/frame/support/src/traits/dispatch.rs index 7dc8d3e4f5a6e954c5eb726b257f1d2d73a29dc6..dbdf0885dd24a0c507552761e941d5b85c335129 100644 --- a/substrate/frame/support/src/traits/dispatch.rs +++ b/substrate/frame/support/src/traits/dispatch.rs @@ -482,7 +482,7 @@ pub trait OriginTrait: Sized { type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; + type PalletsOrigin: Send + Sync + Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -496,6 +496,14 @@ pub trait OriginTrait: Sized { /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into); + /// Replace the caller with caller from the other origin + fn set_caller(&mut self, caller: Self::PalletsOrigin); + + /// Replace the caller with caller from the other origin + fn set_caller_from_signed(&mut self, caller_account: Self::AccountId) { + self.set_caller(Self::PalletsOrigin::from(RawOrigin::Signed(caller_account))) + } + /// Filter the call if caller is not root, if false is returned then the call must be filtered /// out. /// @@ -544,6 +552,17 @@ pub trait OriginTrait: Sized { fn as_system_ref(&self) -> Option<&RawOrigin> { self.caller().as_system_ref() } + + /// Extract a reference to the signer, if that's what the caller is. + fn as_signer(&self) -> Option<&Self::AccountId> { + self.caller().as_system_ref().and_then(|s| { + if let RawOrigin::Signed(ref who) = s { + Some(who) + } else { + None + } + }) + } } #[cfg(test)] diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 4d3b122daf67bf78e775c6ab54e6b6d91be5a3ed..a914b3a914c1dcca032903e384748a9ff02549cf 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -919,32 +919,82 @@ pub trait IsInherent { } /// An extrinsic on which we can get access to call. -pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { +pub trait ExtrinsicCall: sp_runtime::traits::ExtrinsicLike { + type Call; + /// Get the call of the extrinsic. fn call(&self) -> &Self::Call; } -#[cfg(feature = "std")] -impl ExtrinsicCall for sp_runtime::testing::TestXt +impl ExtrinsicCall + for sp_runtime::generic::UncheckedExtrinsic where - Call: codec::Codec + Sync + Send + TypeInfo, + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, Extra: TypeInfo, { - fn call(&self) -> &Self::Call { - &self.call + type Call = Call; + + fn call(&self) -> &Call { + &self.function } } -impl ExtrinsicCall +/// Interface for types capable of constructing an inherent extrinsic. +pub trait InherentBuilder: ExtrinsicCall { + /// Create a new inherent from a given call. + fn new_inherent(call: Self::Call) -> Self; +} + +impl InherentBuilder for sp_runtime::generic::UncheckedExtrinsic where Address: TypeInfo, Call: TypeInfo, Signature: TypeInfo, - Extra: sp_runtime::traits::SignedExtension + TypeInfo, + Extra: TypeInfo, { - fn call(&self) -> &Self::Call { - &self.function + fn new_inherent(call: Self::Call) -> Self { + Self::new_bare(call) + } +} + +/// Interface for types capable of constructing a signed transaction. +pub trait SignedTransactionBuilder: ExtrinsicCall { + type Address; + type Signature; + type Extension; + + /// Create a new signed transaction from a given call and extension using the provided signature + /// data. + fn new_signed_transaction( + call: Self::Call, + signed: Self::Address, + signature: Self::Signature, + tx_ext: Self::Extension, + ) -> Self; +} + +impl SignedTransactionBuilder + for sp_runtime::generic::UncheckedExtrinsic +where + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, + Extension: TypeInfo, +{ + type Address = Address; + type Signature = Signature; + type Extension = Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self::new_signed(call, signed, signature, tx_ext) } } diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 5c12c082305f4da612041b4bb92db1f415e11d24..2187ee22b395c39d2becf8070355e54387f6ef82 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -59,10 +59,7 @@ std = [ "sp-version/std", "test-pallet/std", ] -experimental = [ - "frame-support/experimental", - "frame-system/experimental", -] +experimental = ["frame-support/experimental", "frame-system/experimental"] try-runtime = [ "frame-executive/try-runtime", "frame-support/try-runtime", diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index d8dc7bd45bc66bb097373a441a5d1f0a4f9da1d9..9608fa58e3c985ad649afbccfbde8ca6bc45404b 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -32,8 +32,9 @@ error[E0599]: no function or associated item named `create_inherent` found for s | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `create_inherent`, perhaps you need to implement it: - candidate #1: `ProvideInherent` + = note: the following traits define an item `create_inherent`, perhaps you need to implement one of them: + candidate #1: `CreateInherent` + candidate #2: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope diff --git a/substrate/frame/support/test/tests/enum_deprecation.rs b/substrate/frame/support/test/tests/enum_deprecation.rs index ed9b2b5a735d177d35c88c01d652021e3dcda698..c1167dfe339ce0a013c0d9e7cb742a8161ddab75 100644 --- a/substrate/frame/support/test/tests/enum_deprecation.rs +++ b/substrate/frame/support/test/tests/enum_deprecation.rs @@ -132,8 +132,12 @@ impl pallet::Config for Runtime { pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + sp_runtime::testing::UintAuthorityId, + frame_system::CheckNonZeroSender, +>; frame_support::construct_runtime!( pub struct Runtime { diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 3d6aa1d83749e447269fd97e54a6215ca9409b95..b0b83f7724997d2f8d0e489d761d7dc523d63f5a 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -27,19 +27,21 @@ use frame_support::{ storage::{unhashed, unhashed::contains_prefixed_key}, traits::{ ConstU32, GetCallIndex, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, - OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, - UnfilteredDispatchable, + OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, SignedTransactionBuilder, + StorageVersion, UnfilteredDispatchable, }, weights::{RuntimeDbWeight, Weight}, OrdNoBound, PartialOrdNoBound, }; +use frame_system::offchain::{CreateSignedTransaction, CreateTransactionBase, SigningTypes}; use scale_info::{meta_type, TypeInfo}; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, }; use sp_runtime::{ - traits::{Dispatchable, Extrinsic as ExtrinsicT, SignaturePayload as SignaturePayloadT}, + testing::UintAuthorityId, + traits::{Block as BlockT, Dispatchable}, DispatchError, ModuleError, }; @@ -751,8 +753,51 @@ impl pallet5::Config for Runtime { pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = - sp_runtime::testing::TestXt>; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< + u64, + RuntimeCall, + UintAuthorityId, + frame_system::CheckNonZeroSender, +>; +pub type UncheckedSignaturePayload = sp_runtime::generic::UncheckedSignaturePayload< + u64, + UintAuthorityId, + frame_system::CheckNonZeroSender, +>; + +impl SigningTypes for Runtime { + type Public = UintAuthorityId; + type Signature = UintAuthorityId; +} + +impl CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl CreateSignedTransaction for Runtime +where + RuntimeCall: From, +{ + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: RuntimeCall, + _public: UintAuthorityId, + account: u64, + nonce: u64, + ) -> Option { + Some(UncheckedExtrinsic::new_signed( + call, + nonce, + account.into(), + frame_system::CheckNonZeroSender::new(), + )) + } +} frame_support::construct_runtime!( pub struct Runtime { @@ -814,7 +859,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: frame_support::weights::Weight::from_parts(3, 0), + call_weight: frame_support::weights::Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -902,10 +948,8 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); - let expected = vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }]; + let expected = + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {}))]; assert_eq!(expected, inherents); let block = Block::new( @@ -917,14 +961,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 0, + })), ], ); @@ -939,14 +980,11 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 0, + bar: 0, + })), ], ); @@ -960,10 +998,9 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }], + vec![UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + }))], ); let mut inherent = InherentData::new(); @@ -978,10 +1015,12 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: Some((1, Default::default())), - }], + vec![UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo_no_post_info {}), + 1, + 1.into(), + Default::default(), + )], ); let mut inherent = InherentData::new(); @@ -997,14 +1036,13 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), ], ); @@ -1019,18 +1057,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { + foo: 0, + })), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1045,18 +1079,17 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), - signature: None, - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - signature: Some((1, Default::default())), - }, - UncheckedExtrinsic { - call: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), - signature: None, - }, + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { + foo: 1, + bar: 1, + })), + UncheckedExtrinsic::new_signed( + RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), + 1, + 1.into(), + Default::default(), + ), + UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), ], ); @@ -1838,18 +1871,22 @@ fn metadata() { } let extrinsic = ExtrinsicMetadata { - version: 4, + version: 5, signed_extensions: vec![SignedExtensionMetadata { identifier: "UnitSignedExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], - address_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), - call_ty: meta_type::<::Call>(), + address_ty: meta_type::< + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Address + >(), + call_ty: meta_type::<>::RuntimeCall>(), signature_ty: meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Signature + >(), + extra_ty: meta_type::< + <<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Extension >(), - extra_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), }; let outer_enums = OuterEnums { @@ -1929,21 +1966,28 @@ fn metadata_ir_pallet_runtime_docs() { fn extrinsic_metadata_ir_types() { let ir = Runtime::metadata_ir().extrinsic; - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), ir.address_ty); + assert_eq!( + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Address>(), + ir.address_ty + ); assert_eq!(meta_type::(), ir.address_ty); - assert_eq!(meta_type::<::Call>(), ir.call_ty); + assert_eq!( + meta_type::<>::RuntimeCall>(), + ir.call_ty + ); assert_eq!(meta_type::(), ir.call_ty); assert_eq!( - meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature, - >(), + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Signature>(), ir.signature_ty ); - assert_eq!(meta_type::<()>(), ir.signature_ty); + assert_eq!(meta_type::(), ir.signature_ty); - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); + assert_eq!( + meta_type::<<<::Block as BlockT>::Extrinsic as SignedTransactionBuilder>::Extension>(), + ir.extra_ty + ); assert_eq!(meta_type::>(), ir.extra_ty); } diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 09a49617044da4a24b3adc4607a8c0c4b144f6e8..2e4baae1db7cf0dc7876d896e0b71b25959fed76 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -360,7 +360,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -372,7 +373,8 @@ fn call_expand() { assert_eq!( call_foo.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes } @@ -940,9 +942,9 @@ fn metadata() { let extrinsic = ExtrinsicMetadata { ty: scale_info::meta_type::(), - version: 4, + version: 5, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: scale_info::meta_type::<()>(), additional_signed: scale_info::meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs index 234e220f49d8901afd3abc3c24bd947c708f63fb..bc66c09de7e80dddf4e710161b569e81f9b8909a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/task_valid.rs @@ -39,5 +39,31 @@ mod pallet { } } +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(_i: u32, _j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::get(); + Ok(()) + } + } +} + fn main() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs new file mode 100644 index 0000000000000000000000000000000000000000..52ae19dcb02d614cd51ae87c0ba85298b18f809f --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::task_enum] + pub enum Task {} + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for Task {} +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1dc9e3d4aa11db16db3ea5a6c2709c73c8d55641 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen.stderr @@ -0,0 +1,5 @@ +error: Invalid generic declaration, trait is defined with instance but generic use none + --> tests/pallet_ui/task_invalid_gen.rs:32:11 + | +32 | pub enum Task {} + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs new file mode 100644 index 0000000000000000000000000000000000000000..56392cbad2dc8c0d4a5fd0e38b9aab29f22b287d --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet_with_instance { + use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, u32, ValueQuery>; + + #[pallet::task_enum] + pub enum Task {} + + #[pallet::tasks_experimental] + impl frame_support::traits::Task for Task {} +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr new file mode 100644 index 0000000000000000000000000000000000000000..448825e601556a74c7837a7ccb38aa0ba3a97e55 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/task_invalid_gen2.stderr @@ -0,0 +1,13 @@ +error: Invalid type def generics: expected `T` or `T: Config` or `T, I = ()` or `T: Config, I: 'static = ()` + --> tests/pallet_ui/task_invalid_gen2.rs:32:11 + | +32 | pub enum Task {} + | ^^^^ + +error: unexpected end of input, expected `T` + --> tests/pallet_ui/task_invalid_gen2.rs:18:1 + | +18 | #[frame_support::pallet(dev_mode)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 06c2b5b7071c1357b3b6bfb1b699662fc10f0c20..5335e08837e4adb2735fd3beecd21f8023be7778 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -25,7 +25,10 @@ use codec::MaxEncodedLen; use frame_support::{ derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, }; -use frame_system::limits::{BlockLength, BlockWeights}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + DispatchEventInfo, +}; use scale_info::TypeInfo; use sp_core::sr25519; use sp_runtime::{ @@ -533,8 +536,13 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = - frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + let event = frame_system::Event::::ExtrinsicSuccess { + dispatch_info: DispatchEventInfo { + weight: Default::default(), + class: Default::default(), + pays_fee: Default::default(), + }, + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); @@ -624,7 +632,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::operational {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), + extension_weight: Default::default(), class: DispatchClass::Operational, pays_fee: Pays::Yes }, @@ -633,7 +642,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes }, @@ -894,7 +904,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 4233db21e2031baf136bed1e6317bf244d3b21b8..7b92073a82b1a789ca2b33296893fcb8d62a3a62 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -25,7 +25,10 @@ use codec::MaxEncodedLen; use frame_support::{ derive_impl, parameter_types, traits::PalletInfo as _, weights::RuntimeDbWeight, }; -use frame_system::limits::{BlockLength, BlockWeights}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + DispatchEventInfo, +}; use scale_info::TypeInfo; use sp_core::sr25519; use sp_runtime::{ @@ -533,8 +536,13 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = - frame_system::Event::::ExtrinsicSuccess { dispatch_info: Default::default() }; + let event = frame_system::Event::::ExtrinsicSuccess { + dispatch_info: DispatchEventInfo { + weight: Default::default(), + class: Default::default(), + pays_fee: Default::default(), + }, + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); @@ -624,7 +632,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::operational {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), + extension_weight: Default::default(), class: DispatchClass::Operational, pays_fee: Pays::Yes }, @@ -633,7 +642,8 @@ fn call_weight_should_attach_to_call_enum() { assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), DispatchInfo { - weight: Weight::from_parts(3, 0), + call_weight: Weight::from_parts(3, 0), + extension_weight: Default::default(), class: DispatchClass::Normal, pays_fee: Pays::Yes }, @@ -894,7 +904,7 @@ fn test_metadata() { ty: meta_type::(), version: 4, signed_extensions: vec![SignedExtensionMetadata { - identifier: "UnitSignedExtension", + identifier: "UnitTransactionExtension", ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], diff --git a/substrate/frame/support/test/tests/tasks.rs b/substrate/frame/support/test/tests/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..97e58388362bb66e12539d922c8c01ce1f6fdb67 --- /dev/null +++ b/substrate/frame/support/test/tests/tasks.rs @@ -0,0 +1,135 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(feature = "experimental")] + +#[frame_support::pallet(dev_mode)] +mod my_pallet { + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, (u32, u64), ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(i: u32, j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::put((i, j)); + Ok(()) + } + } +} + +// Another pallet for which we won't implement the default instance. +#[frame_support::pallet(dev_mode)] +mod my_pallet_2 { + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeStorage = StorageValue<_, (u32, u64), ValueQuery>; + + #[pallet::tasks_experimental] + impl, I> Pallet { + #[pallet::task_index(0)] + #[pallet::task_condition(|i, j| i == 0u32 && j == 2u64)] + #[pallet::task_list(vec![(0u32, 2u64), (2u32, 4u64)].iter())] + #[pallet::task_weight(0.into())] + fn foo(i: u32, j: u64) -> frame_support::pallet_prelude::DispatchResult { + >::put((i, j)); + Ok(()) + } + } +} + +type BlockNumber = u32; +type AccountId = u64; +type Header = sp_runtime::generic::Header; +type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +type Block = sp_runtime::generic::Block; + +frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + MyPallet: my_pallet, + MyPallet2: my_pallet::, + #[cfg(feature = "frame-feature-testing")] + MyPallet3: my_pallet::, + MyPallet4: my_pallet_2::, + } +); + +// NOTE: Needed for derive_impl expansion +use frame_support::derive_impl; +#[frame_support::derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; + type AccountId = AccountId; +} + +impl my_pallet::Config for Runtime {} + +impl my_pallet::Config for Runtime {} + +#[cfg(feature = "frame-feature-testing")] +impl my_pallet::Config for Runtime {} + +impl my_pallet_2::Config for Runtime {} + +fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + RuntimeGenesisConfig::default().build_storage().unwrap().into() +} + +#[test] +fn tasks_work() { + new_test_ext().execute_with(|| { + use frame_support::instances::{Instance1, Instance2}; + + let task = RuntimeTask::MyPallet(my_pallet::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet::SomeStorage::::get(), (0, 2)); + + let task = RuntimeTask::MyPallet2(my_pallet::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet::SomeStorage::::get(), (0, 2)); + + let task = + RuntimeTask::MyPallet4(my_pallet_2::Task::::Foo { i: 0u32, j: 2u64 }); + + frame_support::assert_ok!(System::do_task(RuntimeOrigin::signed(1), task.clone(),)); + assert_eq!(my_pallet_2::SomeStorage::::get(), (0, 2)); + }); +} diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c6626030e227dd8ed41c6f6a63bc49c95121cf2 --- /dev/null +++ b/substrate/frame/system/benchmarking/src/extensions.rs @@ -0,0 +1,248 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Benchmarks for System Extensions + +#![cfg(feature = "runtime-benchmarks")] + +use alloc::vec; +use frame_benchmarking::{account, v2::*, BenchmarkError}; +use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::Weight, +}; +use frame_system::{ + pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce, + CheckSpecVersion, CheckTxVersion, CheckWeight, Config, ExtensionsWeightInfo, Pallet as System, + RawOrigin, +}; +use sp_runtime::{ + generic::Era, + traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, Get, + }, +}; + +pub struct Pallet(System); + +#[benchmarks(where + T: Send + Sync, + T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + AsTransactionAuthorizedOrigin + Clone) +] +mod benchmarks { + use super::*; + + #[benchmark] + fn check_genesis() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckGenesis::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + + Ok(()) + } + + #[benchmark] + fn check_mortality_mortal_transaction() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::mortal(16, 256)); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_mortality_immortal_transaction() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckMortality::::from(Era::immortal()); + let block_number: BlockNumberFor = 17u32.into(); + System::::set_block_number(block_number); + let prev_block: BlockNumberFor = 16u32.into(); + let default_hash: T::Hash = Default::default(); + frame_system::BlockHash::::insert(prev_block, default_hash); + let genesis_block: BlockNumberFor = 0u32.into(); + frame_system::BlockHash::::insert(genesis_block, default_hash); + let caller = account("caller", 0, 0); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_non_zero_sender() -> Result<(), BenchmarkError> { + let len = 0_usize; + let ext = CheckNonZeroSender::::new(); + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_nonce() -> Result<(), BenchmarkError> { + let caller: T::AccountId = account("caller", 0, 0); + let mut info = frame_system::AccountInfo::default(); + info.nonce = 1u32.into(); + info.providers = 1; + let expected_nonce = info.nonce + 1u32.into(); + frame_system::Account::::insert(caller.clone(), info); + let len = 0_usize; + let ext = CheckNonce::::from(1u32.into()); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, len, |_| { + Ok(().into()) + }) + .unwrap() + .unwrap(); + } + + let updated_info = frame_system::Account::::get(caller.clone()); + assert_eq!(updated_info.nonce, expected_nonce); + Ok(()) + } + + #[benchmark] + fn check_spec_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckSpecVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_tx_version() -> Result<(), BenchmarkError> { + let len = 0_usize; + let caller = account("caller", 0, 0); + let info = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + + #[block] + { + CheckTxVersion::::new() + .test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(().into())) + .unwrap() + .unwrap(); + } + Ok(()) + } + + #[benchmark] + fn check_weight() -> Result<(), BenchmarkError> { + let caller = account("caller", 0, 0); + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let extension_weight = ::ExtensionsWeightInfo::check_weight(); + let info = DispatchInfo { + call_weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0), + extension_weight, + class: DispatchClass::Normal, + ..Default::default() + }; + let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)), + pays_fee: Default::default(), + }; + let len = 0_usize; + let base_extrinsic = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + + let ext = CheckWeight::::new(); + + let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0); + frame_system::BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(initial_block_weight, DispatchClass::Normal); + }); + + #[block] + { + ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, |_| Ok(post_info)) + .unwrap() + .unwrap(); + } + + assert_eq!( + System::::block_weight().total(), + initial_block_weight + + base_extrinsic + + post_info.actual_weight.unwrap().saturating_add(extension_weight), + ); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index f66d20ac8aed928521f6021cded6cd9c93110f49..dc3c7420317d620ab5c3ae61674a32d9d0179b49 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; +pub mod extensions; #[cfg(feature = "runtime-benchmarks")] pub mod inner; diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index 42e4aa0eaf4b70b930f16da4145f62a8137cc883..6b126619ce5bfab57882868e19c936cbdec0eb27 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use codec::Encode; -use frame_support::derive_impl; +use frame_support::{derive_impl, weights::Weight}; use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -32,9 +32,45 @@ frame_support::construct_runtime!( } ); +pub struct MockWeights; +impl frame_system::ExtensionsWeightInfo for MockWeights { + fn check_genesis() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_mortality_mortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_mortality_immortal_transaction() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_non_zero_sender() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_nonce() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_spec_version() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_tx_version() -> Weight { + Weight::from_parts(10, 0) + } + + fn check_weight() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; + type ExtensionsWeightInfo = MockWeights; } impl crate::Config for Test {} diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 000ec56da64f3f96eaf521ed6510b17020ce81fe..881faa2c0eafad75001defeb5a6c73fc71b07f65 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -19,7 +19,8 @@ use crate::{pallet_prelude::BlockNumberFor, Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension, Zero}, + impl_tx_ext_default, + traits::{TransactionExtension, Zero}, transaction_validity::TransactionValidityError, }; @@ -46,30 +47,24 @@ impl core::fmt::Debug for CheckGenesis { } impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. + /// Creates new `TransactionExtension` to check genesis hash. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtension for CheckGenesis { const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { + type Implicit = T::Hash; + fn implicit(&self) -> Result { Ok(>::block_hash(BlockNumberFor::::zero())) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + // All transactions will always read the hash of the genesis block, so to avoid + // charging this multiple times in a block we manually set the proof size to 0. + ::check_genesis().set_proof_size(0) } + impl_tx_ext_default!(T::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 6666c4812fbc383e48b6e11b6b8c5c286d126c5d..7da5521f353d7981b057cafd6abccb4da0637d73 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -20,10 +20,9 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ generic::Era, - traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, + impl_tx_ext_default, + traits::{DispatchInfoOf, SaturatedConversion, TransactionExtension, ValidateResult}, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; /// Check for transaction mortality. @@ -57,29 +56,11 @@ impl core::fmt::Debug for CheckMortality { } } -impl SignedExtension for CheckMortality { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = T::Hash; - type Pre = (); +impl TransactionExtension for CheckMortality { const IDENTIFIER: &'static str = "CheckMortality"; + type Implicit = T::Hash; - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { + fn implicit(&self) -> Result { let current_u64 = >::block_number().saturated_into::(); let n = self.0.birth(current_u64).saturated_into::>(); if !>::contains_key(n) { @@ -88,16 +69,41 @@ impl SignedExtension for CheckMortality { Ok(>::block_hash(n)) } } + type Pre = (); + type Val = (); + + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + if self.0.is_immortal() { + // All immortal transactions will always read the hash of the genesis block, so to avoid + // charging this multiple times in a block we manually set the proof size to 0. + ::check_mortality_immortal_transaction() + .set_proof_size(0) + } else { + ::check_mortality_mortal_transaction() + } + } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(( + ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }, + (), + origin, + )) } + impl_tx_ext_default!(T::RuntimeCall; prepare); } #[cfg(test)] @@ -109,23 +115,21 @@ mod tests { weights::Weight, }; use sp_core::H256; + use sp_runtime::traits::DispatchTransaction; #[test] fn signed_ext_check_era_should_work() { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)) - .additional_signed() - .err() - .unwrap(), + CheckMortality::::from(Era::mortal(4, 2)).implicit().err().unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); // correct System::set_block_number(13); >::insert(12, H256::repeat_byte(1)); - assert!(CheckMortality::::from(Era::mortal(4, 12)).additional_signed().is_ok()); + assert!(CheckMortality::::from(Era::mortal(4, 12)).implicit().is_ok()); }) } @@ -133,7 +137,8 @@ mod tests { fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; @@ -145,7 +150,10 @@ mod tests { System::set_block_number(17); >::insert(16, H256::repeat_byte(1)); - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + assert_eq!( + ext.validate_only(Some(1).into(), CALL, &normal, len).unwrap().0.longevity, + 15 + ); }) } } diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 06dc2bf177ac9500ab492cb585a8cbb7589397d6..ec8c12b790d2d647f4f8fccfb765fba93bf9f018 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -18,13 +18,12 @@ use crate::Config; use codec::{Decode, Encode}; use core::marker::PhantomData; -use frame_support::{dispatch::DispatchInfo, DefaultNoBound}; +use frame_support::{traits::OriginTrait, DefaultNoBound}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, + impl_tx_ext_default, + traits::{DispatchInfoOf, TransactionExtension}, + transaction_validity::InvalidTransaction, }; /// Check to ensure that the sender is not the zero address. @@ -45,66 +44,80 @@ impl core::fmt::Debug for CheckNonZeroSender { } impl CheckNonZeroSender { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckNonZeroSender -where - T::RuntimeCall: Dispatchable, -{ - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtension for CheckNonZeroSender { const IDENTIFIER: &'static str = "CheckNonZeroSender"; + type Implicit = (); + type Val = (); + type Pre = (); - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_non_zero_sender() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { - if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { - return Err(TransactionValidityError::Invalid(InvalidTransaction::BadSigner)) + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> sp_runtime::traits::ValidateResult { + if let Some(who) = origin.as_signer() { + if who.using_encoded(|d| d.iter().all(|x| *x == 0)) { + return Err(InvalidTransaction::BadSigner.into()) + } } - Ok(ValidTransaction::default()) + Ok((Default::default(), (), origin)) } + impl_tx_ext_default!(T::RuntimeCall; prepare); } #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::{assert_ok, dispatch::DispatchInfo}; + use sp_runtime::{ + traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}, + transaction_validity::TransactionValidityError, + }; #[test] fn zero_account_ban_works() { new_test_ext().execute_with(|| { let info = DispatchInfo::default(); let len = 0_usize; - assert_noop!( - CheckNonZeroSender::::new().validate(&0, CALL, &info, len), - InvalidTransaction::BadSigner + assert_eq!( + CheckNonZeroSender::::new() + .validate_only(Some(0).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::from(InvalidTransaction::BadSigner) ); - assert_ok!(CheckNonZeroSender::::new().validate(&1, CALL, &info, len)); + assert_ok!(CheckNonZeroSender::::new().validate_only( + Some(1).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_origin_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + let (_, _, origin) = CheckNonZeroSender::::new() + .validate(None.into(), CALL, &info, len, (), CALL) + .unwrap(); + assert!(!origin.is_transaction_authorized()); }) } } diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index 3535870d1b5959186c25e94615a59b41b9e5be87..d96d2c2c0662ad2bb3e3209bf94edb12a67747e8 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -18,23 +18,32 @@ use crate::Config; use alloc::vec; use codec::{Decode, Encode}; -use frame_support::dispatch::DispatchInfo; +use frame_support::{dispatch::DispatchInfo, RuntimeDebugNoBound}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, One, SignedExtension, Zero}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, + TransactionExtension, ValidateResult, Zero, + }, transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, - ValidTransaction, + InvalidTransaction, TransactionLongevity, TransactionValidityError, ValidTransaction, }, + DispatchResult, Saturating, }; +use sp_weights::Weight; /// Nonce check and increment to give replay protection for transactions. /// /// # Transaction Validity /// /// This extension affects `requires` and `provides` tags of validity, but DOES NOT -/// set the `priority` field. Make sure that AT LEAST one of the signed extension sets +/// set the `priority` field. Make sure that AT LEAST one of the transaction extension sets /// some kind of priority upon validating transactions. +/// +/// The preparation step assumes that the nonce information has not changed since the validation +/// step. This means that other extensions ahead of `CheckNonce` in the pipeline must not alter the +/// nonce during their own preparation step, or else the transaction may be rejected during dispatch +/// or lead to an inconsistent account state. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckNonce(#[codec(compact)] pub T::Nonce); @@ -58,83 +67,122 @@ impl core::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce +/// Operation to perform from `validate` to `prepare` in [`CheckNonce`] transaction extension. +#[derive(RuntimeDebugNoBound)] +pub enum Val { + /// Account and its nonce to check for. + CheckNonce((T::AccountId, T::Nonce)), + /// Weight to refund. + Refund(Weight), +} + +/// Operation to perform from `prepare` to `post_dispatch_details` in [`CheckNonce`] transaction +/// extension. +#[derive(RuntimeDebugNoBound)] +pub enum Pre { + /// The transaction extension weight should not be refunded. + NonceChecked, + /// The transaction extension weight should be refunded. + Refund(Weight), +} + +impl TransactionExtension for CheckNonce where T::RuntimeCall: Dispatchable, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = crate::Account::::get(who); - if account.providers.is_zero() && account.sufficients.is_zero() { - // Nonce storage not paid for - return Err(InvalidTransaction::Payment.into()) - } - if self.0 != account.nonce { - return Err(if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - } - .into()) - } - account.nonce += T::Nonce::one(); - crate::Account::::insert(who, account); - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_nonce() } fn validate( &self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((Default::default(), Val::Refund(self.weight(call)), origin)) + }; let account = crate::Account::::get(who); if account.providers.is_zero() && account.sufficients.is_zero() { // Nonce storage not paid for - return InvalidTransaction::Payment.into() + return Err(InvalidTransaction::Payment.into()) } if self.0 < account.nonce { - return InvalidTransaction::Stale.into() + return Err(InvalidTransaction::Stale.into()) } - let provides = vec![Encode::encode(&(who, self.0))]; + let provides = vec![Encode::encode(&(&who, self.0))]; let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] + vec![Encode::encode(&(&who, self.0.saturating_sub(One::one())))] } else { vec![] }; - Ok(ValidTransaction { + let validity = ValidTransaction { priority: 0, requires, provides, longevity: TransactionLongevity::max_value(), propagate: true, - }) + }; + + Ok((validity, Val::CheckNonce((who.clone(), account.nonce)), origin)) + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + let (who, mut nonce) = match val { + Val::CheckNonce((who, nonce)) => (who, nonce), + Val::Refund(weight) => return Ok(Pre::Refund(weight)), + }; + + // `self.0 < nonce` already checked in `validate`. + if self.0 > nonce { + return Err(InvalidTransaction::Future.into()) + } + nonce += T::Nonce::one(); + crate::Account::::mutate(who, |account| account.nonce = nonce); + Ok(Pre::NonceChecked) + } + + fn post_dispatch_details( + pre: Self::Pre, + _info: &DispatchInfo, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + match pre { + Pre::NonceChecked => Ok(Weight::zero()), + Pre::Refund(weight) => Ok(weight), + } } } #[cfg(test)] mod tests { use super::*; - use crate::mock::{new_test_ext, Test, CALL}; - use frame_support::{assert_noop, assert_ok}; + use crate::mock::{new_test_ext, RuntimeCall, Test, CALL}; + use frame_support::{ + assert_ok, assert_storage_noop, dispatch::GetDispatchInfo, traits::OriginTrait, + }; + use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction}; #[test] fn signed_ext_check_nonce_works() { @@ -152,22 +200,45 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert_noop!( - CheckNonce::(0u64.into()).validate(&1, CALL, &info, len), - InvalidTransaction::Stale - ); - assert_noop!( - CheckNonce::(0u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Stale - ); + assert_storage_noop!({ + assert_eq!( + CheckNonce::(0u64.into()) + .validate_only(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) + ); + assert_eq!( + CheckNonce::(0u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Stale) + ); + }); // correct - assert_ok!(CheckNonce::(1u64.into()).validate(&1, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(1).into(), + CALL, + &info, + len + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(1).into(), + CALL, + &info, + len + )); // future - assert_ok!(CheckNonce::(5u64.into()).validate(&1, CALL, &info, len)); - assert_noop!( - CheckNonce::(5u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Future + assert_ok!(CheckNonce::(5u64.into()).validate_only( + Some(1).into(), + CALL, + &info, + len + )); + assert_eq!( + CheckNonce::(5u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Future) ); }) } @@ -198,20 +269,133 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // Both providers and sufficients zero - assert_noop!( - CheckNonce::(1u64.into()).validate(&1, CALL, &info, len), - InvalidTransaction::Payment - ); - assert_noop!( - CheckNonce::(1u64.into()).pre_dispatch(&1, CALL, &info, len), - InvalidTransaction::Payment - ); + assert_storage_noop!({ + assert_eq!( + CheckNonce::(1u64.into()) + .validate_only(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) + ); + assert_eq!( + CheckNonce::(1u64.into()) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Payment) + ); + }); // Non-zero providers - assert_ok!(CheckNonce::(1u64.into()).validate(&2, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&2, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(2).into(), + CALL, + &info, + len + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(2).into(), + CALL, + &info, + len + )); // Non-zero sufficients - assert_ok!(CheckNonce::(1u64.into()).validate(&3, CALL, &info, len)); - assert_ok!(CheckNonce::(1u64.into()).pre_dispatch(&3, CALL, &info, len)); + assert_ok!(CheckNonce::(1u64.into()).validate_only( + Some(3).into(), + CALL, + &info, + len + )); + assert_ok!(CheckNonce::(1u64.into()).validate_and_prepare( + Some(3).into(), + CALL, + &info, + len + )); + }) + } + + #[test] + fn unsigned_check_nonce_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo::default(); + let len = 0_usize; + let (_, val, origin) = CheckNonce::(1u64.into()) + .validate(None.into(), CALL, &info, len, (), CALL) + .unwrap(); + assert!(!origin.is_transaction_authorized()); + assert_ok!(CheckNonce::(1u64.into()).prepare(val, &origin, CALL, &info, len)); + }) + } + + #[test] + fn check_nonce_preserves_account_data() { + new_test_ext().execute_with(|| { + crate::Account::::insert( + 1, + crate::AccountInfo { + nonce: 1u64.into(), + consumers: 0, + providers: 1, + sufficients: 0, + data: 0, + }, + ); + let info = DispatchInfo::default(); + let len = 0_usize; + // run the validation step + let (_, val, origin) = CheckNonce::(1u64.into()) + .validate(Some(1).into(), CALL, &info, len, (), CALL) + .unwrap(); + // mutate `AccountData` for the caller + crate::Account::::mutate(1, |info| { + info.data = 42; + }); + // run the preparation step + assert_ok!(CheckNonce::(1u64.into()).prepare(val, &origin, CALL, &info, len)); + // only the nonce should be altered by the preparation step + let expected_info = crate::AccountInfo { + nonce: 2u64.into(), + consumers: 0, + providers: 1, + sufficients: 0, + data: 42, + }; + assert_eq!(crate::Account::::get(1), expected_info); + }) + } + + #[test] + fn check_nonce_skipped_and_refund_for_other_origins() { + new_test_ext().execute_with(|| { + let ext = CheckNonce::(1u64.into()); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = crate::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); }) } } diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index ee7e6f2efd001a981c56f840bf089718b6d823f3..ff86c6cd469508c3a6c0e5c6ba2ceb3bd3112c7a 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -19,7 +19,7 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, traits::TransactionExtension, transaction_validity::TransactionValidityError, }; @@ -46,30 +46,24 @@ impl core::fmt::Debug for CheckSpecVersion { } impl CheckSpecVersion { - /// Create new `SignedExtension` to check runtime version. + /// Create new `TransactionExtension` to check runtime version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckSpecVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtension<::RuntimeCall> + for CheckSpecVersion +{ const IDENTIFIER: &'static str = "CheckSpecVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().spec_version) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_spec_version() } + impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 15983c2cd088b3b0deeeccf21a679d648da9be42..e3b7dfe7c928c25a2743ca85263a7d6789157c24 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -19,7 +19,7 @@ use crate::{Config, Pallet}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, SignedExtension}, + impl_tx_ext_default, traits::TransactionExtension, transaction_validity::TransactionValidityError, }; @@ -46,29 +46,24 @@ impl core::fmt::Debug for CheckTxVersion { } impl CheckTxVersion { - /// Create new `SignedExtension` to check transaction version. + /// Create new `TransactionExtension` to check transaction version. pub fn new() -> Self { Self(core::marker::PhantomData) } } -impl SignedExtension for CheckTxVersion { - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = u32; - type Pre = (); +impl TransactionExtension<::RuntimeCall> + for CheckTxVersion +{ const IDENTIFIER: &'static str = "CheckTxVersion"; - - fn additional_signed(&self) -> Result { + type Implicit = u32; + fn implicit(&self) -> Result { Ok(>::runtime_version().transaction_version) } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + type Val = (); + type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_tx_version() } + impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 22da2a5b9872545e53f33481a5595e57e91cc8e3..131057f54a781f69fd8fec44b582ed94e8b6159f 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -23,8 +23,10 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, + traits::{ + DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, ValidateResult, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, DispatchResult, }; use sp_weights::Weight; @@ -50,11 +52,11 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight.any_gt(max) => { + Some(max) if info.total_weight().any_gt(max) => { log::debug!( target: LOG_TARGET, "Extrinsic {} is greater than the max extrinsic {}", - info.weight, + info.total_weight(), max, ); @@ -89,43 +91,73 @@ where } } - /// Creates new `SignedExtension` to check weight of the extrinsic. + /// Creates new `TransactionExtension` to check weight of the extrinsic. pub fn new() -> Self { Self(Default::default()) } + /// Do the validate checks. This can be applied to both signed and unsigned. + /// + /// It only checks that the block weight and length limit will not exceed. + /// + /// Returns the transaction validity and the next block length, to be used in `prepare`. + pub fn do_validate( + info: &DispatchInfoOf, + len: usize, + ) -> Result<(ValidTransaction, u32), TransactionValidityError> { + // If they return `Ok`, then it is below the limit. + let next_len = Self::check_block_length(info, len)?; + // during validation we skip block limit check. Since the `validate_transaction` + // call runs on an empty block anyway, by this we prevent `on_initialize` weight + // consumption from causing false negatives. + Self::check_extrinsic_weight(info)?; + + Ok((Default::default(), next_len)) + } + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. /// /// It checks and notes the new weight and length. - pub fn do_pre_dispatch( + pub fn do_prepare( info: &DispatchInfoOf, len: usize, + next_len: u32, ) -> Result<(), TransactionValidityError> { - let next_len = Self::check_block_length(info, len)?; - let all_weight = Pallet::::block_weight(); let maximum_weight = T::BlockWeights::get(); let next_weight = calculate_consumed_weight::(&maximum_weight, all_weight, info, len)?; - Self::check_extrinsic_weight(info)?; + // Extrinsic weight already checked in `validate`. crate::AllExtrinsicsLen::::put(next_len); crate::BlockWeight::::put(next_weight); Ok(()) } - /// Do the validate checks. This can be applied to both signed and unsigned. - /// - /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { - // ignore the next length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - // during validation we skip block limit check. Since the `validate_transaction` - // call runs on an empty block anyway, by this we prevent `on_initialize` weight - // consumption from causing false negatives. - Self::check_extrinsic_weight(info)?; + pub fn do_post_dispatch( + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + ) -> Result<(), TransactionValidityError> { + let unspent = post_info.calc_unspent(info); + if unspent.any_gt(Weight::zero()) { + crate::BlockWeight::::mutate(|current_weight| { + current_weight.reduce(unspent, info.class); + }) + } - Ok(Default::default()) + log::trace!( + target: LOG_TARGET, + "Used block weight: {:?}", + crate::BlockWeight::::get(), + ); + + log::trace!( + target: LOG_TARGET, + "Used block length: {:?}", + Pallet::::all_extrinsics_len(), + ); + + Ok(()) } } @@ -143,7 +175,7 @@ where { // Also Consider extrinsic length as proof weight. let extrinsic_weight = info - .weight + .total_weight() .saturating_add(maximum_weight.get(info.class).base_extrinsic) .saturating_add(Weight::from_parts(0, len as u64)); let limit_per_class = maximum_weight.get(info.class); @@ -201,83 +233,78 @@ where Ok(all_weight) } -impl SignedExtension for CheckWeight +impl TransactionExtension for CheckWeight where T::RuntimeCall: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; + type Implicit = (); + type Pre = (); + type Val = u32; /* next block length */ - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + ::check_weight() } - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + fn validate( + &self, + origin: T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let (validity, next_len) = Self::do_validate(info, len)?; + Ok((validity, next_len, origin)) } - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + ) -> Result { + Self::do_prepare(info, len, val) } - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + fn post_dispatch_details( + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Self::do_post_dispatch(info, post_info)?; + Ok(Weight::zero()) + } + + fn bare_validate( + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) + ) -> frame_support::pallet_prelude::TransactionValidity { + Ok(Self::do_validate(info, len)?.0) } - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, + fn bare_validate_and_prepare( + _call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) + ) -> Result<(), TransactionValidityError> { + let (_, next_len) = Self::do_validate(info, len)?; + Self::do_prepare(info, len, next_len) } - fn post_dispatch( - _pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, _len: usize, _result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - let unspent = post_info.calc_unspent(info); - if unspent.any_gt(Weight::zero()) { - crate::BlockWeight::::mutate(|current_weight| { - current_weight.reduce(unspent, info.class); - }) - } - - log::trace!( - target: LOG_TARGET, - "Used block weight: {:?}", - crate::BlockWeight::::get(), - ); - - log::trace!( - target: LOG_TARGET, - "Used block length: {:?}", - Pallet::::all_extrinsics_len(), - ); - - Ok(()) + Self::do_post_dispatch(info, post_info) } } @@ -302,6 +329,7 @@ mod tests { }; use core::marker::PhantomData; use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; + use sp_runtime::traits::DispatchTransaction; fn block_weights() -> crate::limits::BlockWeights { ::BlockWeights::get() @@ -327,7 +355,7 @@ mod tests { fn check(call: impl FnOnce(&DispatchInfo, usize)) { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: Weight::MAX, + call_weight: Weight::MAX, class: DispatchClass::Mandatory, ..Default::default() }; @@ -338,7 +366,8 @@ mod tests { } check(|max, len| { - assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); + let next_len = CheckWeight::::check_block_length(max, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(max, len, next_len)); assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); @@ -351,7 +380,7 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + call_weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + Weight::from_parts(1, 0), class: DispatchClass::Normal, ..Default::default() @@ -374,11 +403,14 @@ mod tests { .unwrap_or_else(|| weights.max_block); let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; - let weight = operational_limit - base_weight; - let okay = - DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; + let call_weight = operational_limit - base_weight; + let okay = DispatchInfo { + call_weight, + class: DispatchClass::Operational, + ..Default::default() + }; let max = DispatchInfo { - weight: weight + Weight::from_parts(1, 0), + call_weight: call_weight + Weight::from_parts(1, 0), class: DispatchClass::Operational, ..Default::default() }; @@ -410,18 +442,20 @@ mod tests { // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 246 to produce a full block (-10 for base) let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. @@ -434,19 +468,21 @@ mod tests { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); + let next_len = CheckWeight::::check_block_length(&rest_operational, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&rest_operational, len, next_len)); // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_parts(266, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); @@ -458,27 +494,30 @@ mod tests { // An on_initialize takes up the whole block! (Every time!) System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Mandatory); let dispatch_normal = DispatchInfo { - weight: Weight::from_parts(251, 0), + call_weight: Weight::from_parts(251, 0), class: DispatchClass::Normal, ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_parts(246, 0), + call_weight: Weight::from_parts(246, 0), class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; + let next_len = CheckWeight::::check_block_length(&dispatch_normal, len).unwrap(); assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + CheckWeight::::do_prepare(&dispatch_normal, len, next_len), InvalidTransaction::ExhaustsResources ); + let next_len = + CheckWeight::::check_block_length(&dispatch_operational, len).unwrap(); // Thank goodness we can still do an operational transaction to possibly save the // blockchain. - assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); + assert_ok!(CheckWeight::::do_prepare(&dispatch_operational, len, next_len)); // Not too much though assert_err!( - CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + CheckWeight::::do_prepare(&dispatch_operational, len, next_len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -489,9 +528,11 @@ mod tests { #[test] fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: Weight::from_parts(100, 0), ..Default::default() }; + let normal = + DispatchInfo { call_weight: Weight::from_parts(100, 0), ..Default::default() }; let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -503,21 +544,35 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); // will fit. - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert_err!( - CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), - InvalidTransaction::ExhaustsResources + assert_eq!( + CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &normal, len) + .unwrap_err(), + InvalidTransaction::ExhaustsResources.into() ); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &op, + len + )); }) } @@ -528,7 +583,12 @@ mod tests { let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + tx, + s, + ); if f { assert!(r.is_err()) } else { @@ -542,7 +602,8 @@ mod tests { // Operational ones don't have this limit. let op = DispatchInfo { - weight: Weight::zero(), + call_weight: Weight::zero(), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -557,12 +618,13 @@ mod tests { fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: Weight::from_parts(100, 0), ..Default::default() }; + let small = + DispatchInfo { call_weight: Weight::from_parts(100, 0), ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = - DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; + DispatchInfo { call_weight: normal_limit - base_extrinsic, ..Default::default() }; let big = DispatchInfo { - weight: normal_limit - base_extrinsic + Weight::from_parts(1, 0), + call_weight: normal_limit - base_extrinsic + Weight::from_parts(1, 0), ..Default::default() }; let len = 0_usize; @@ -571,7 +633,12 @@ mod tests { BlockWeight::::mutate(|current_weight| { current_weight.set(s, DispatchClass::Normal) }); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + let r = CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + i, + len, + ); if f { assert!(r.is_err()) } else { @@ -589,7 +656,8 @@ mod tests { fn signed_ext_check_weight_refund_works() { new_test_ext().execute_with(|| { // This is half of the max block weight - let info = DispatchInfo { weight: Weight::from_parts(512, 0), ..Default::default() }; + let info = + DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() }; let post_info = PostDispatchInfo { actual_weight: Some(Weight::from_parts(128, 0)), pays_fee: Default::default(), @@ -604,14 +672,17 @@ mod tests { .set(Weight::from_parts(256, 0) - base_extrinsic, DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), - info.weight + Weight::from_parts(256, 0) + info.total_weight() + Weight::from_parts(256, 0) ); - assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + assert_ok!(CheckWeight::::post_dispatch_details( + pre, &info, &post_info, len, @@ -627,7 +698,8 @@ mod tests { #[test] fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: Weight::from_parts(512, 0), ..Default::default() }; + let info = + DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() }; let post_info = PostDispatchInfo { actual_weight: Some(Weight::from_parts(700, 0)), pays_fee: Default::default(), @@ -639,16 +711,19 @@ mod tests { current_weight.set(Weight::from_parts(128, 0), DispatchClass::Normal); }); - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); + let pre = CheckWeight::(PhantomData) + .validate_and_prepare(Some(1).into(), CALL, &info, len) + .unwrap() + .0; assert_eq!( BlockWeight::::get().total(), - info.weight + + info.total_weight() + Weight::from_parts(128, 0) + block_weights().get(DispatchClass::Normal).base_extrinsic, ); - assert_ok!(CheckWeight::::post_dispatch( - Some(pre), + assert_ok!(CheckWeight::::post_dispatch_details( + pre, &info, &post_info, len, @@ -656,7 +731,7 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + + info.total_weight() + Weight::from_parts(128, 0) + block_weights().get(DispatchClass::Normal).base_extrinsic, ); @@ -667,12 +742,17 @@ mod tests { fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { let weights = block_weights(); - let free = DispatchInfo { weight: Weight::zero(), ..Default::default() }; + let free = DispatchInfo { call_weight: Weight::zero(), ..Default::default() }; let len = 0_usize; // Initial weight from `weights.base_block` assert_eq!(System::block_weight().total(), weights.base_block); - assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); + assert_ok!(CheckWeight::(PhantomData).validate_and_prepare( + Some(1).into(), + CALL, + &free, + len + )); assert_eq!( System::block_weight().total(), weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block @@ -687,18 +767,20 @@ mod tests { // Max normal is 768 (75%) // Max mandatory is unlimited let max_normal = - DispatchInfo { weight: Weight::from_parts(753, 0), ..Default::default() }; + DispatchInfo { call_weight: Weight::from_parts(753, 0), ..Default::default() }; let mandatory = DispatchInfo { - weight: Weight::from_parts(1019, 0), + call_weight: Weight::from_parts(1019, 0), class: DispatchClass::Mandatory, ..Default::default() }; let len = 0_usize; - assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); + let next_len = CheckWeight::::check_block_length(&max_normal, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&max_normal, len, next_len)); assert_eq!(System::block_weight().total(), Weight::from_parts(768, 0)); - assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); + let next_len = CheckWeight::::check_block_length(&mandatory, len).unwrap(); + assert_ok!(CheckWeight::::do_prepare(&mandatory, len, next_len)); assert_eq!(block_weight_limit(), Weight::from_parts(1024, u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_parts(1024 + 768, 0)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); @@ -729,13 +811,13 @@ mod tests { // fits into reserved let mandatory1 = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Mandatory, ..Default::default() }; // does not fit into reserved and the block is full. let mandatory2 = DispatchInfo { - weight: Weight::from_parts(6, 0), + call_weight: Weight::from_parts(6, 0), class: DispatchClass::Mandatory, ..Default::default() }; @@ -778,13 +860,13 @@ mod tests { }); let normal = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Normal, ..Default::default() }; let mandatory = DispatchInfo { - weight: Weight::from_parts(5, 0), + call_weight: Weight::from_parts(5, 0), class: DispatchClass::Mandatory, ..Default::default() }; @@ -798,7 +880,7 @@ mod tests { ) .unwrap(); - assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.weight); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), normal.total_weight()); let consumed = calculate_consumed_weight::<::RuntimeCall>( &maximum_weight, @@ -807,7 +889,7 @@ mod tests { 0, ) .unwrap(); - assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.weight); + assert_eq!(consumed.total().saturating_sub(all_weight.total()), mandatory.total_weight()); // Using non zero length extrinsics. let consumed = calculate_consumed_weight::<::RuntimeCall>( @@ -820,7 +902,7 @@ mod tests { // Must account for the len in the proof size assert_eq!( consumed.total().saturating_sub(all_weight.total()), - normal.weight.add_proof_size(100) + normal.total_weight().add_proof_size(100) ); let consumed = calculate_consumed_weight::<::RuntimeCall>( @@ -833,7 +915,7 @@ mod tests { // Must account for the len in the proof size assert_eq!( consumed.total().saturating_sub(all_weight.total()), - mandatory.weight.add_proof_size(100) + mandatory.total_weight().add_proof_size(100) ); // Using oversized zero length extrinsics. diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs index a88c9fbf96ebdac86d9c45a8e5b07020c3325bb0..d79104d224035450f9ca69ba7d4b502b9ff0289d 100644 --- a/substrate/frame/system/src/extensions/mod.rs +++ b/substrate/frame/system/src/extensions/mod.rs @@ -22,3 +22,6 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; +pub mod weights; + +pub use weights::WeightInfo; diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1c0136ae78023ad7855823e409dcaec4b9e31b9a --- /dev/null +++ b/substrate/frame/system/src/extensions/weights.rs @@ -0,0 +1,217 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system_extensions +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/system/src/extensions/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `frame_system_extensions`. +pub trait WeightInfo { + fn check_genesis() -> Weight; + fn check_mortality_mortal_transaction() -> Weight; + fn check_mortality_immortal_transaction() -> Weight; + fn check_non_zero_sender() -> Weight; + fn check_nonce() -> Weight; + fn check_spec_version() -> Weight; + fn check_tx_version() -> Weight; + fn check_weight() -> Weight; +} + +/// Weights for `frame_system_extensions` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 3_876_000 picoseconds. + Weight::from_parts(4_160_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 6_296_000 picoseconds. + Weight::from_parts(6_523_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 449_000 picoseconds. + Weight::from_parts(527_000, 0) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 5_689_000 picoseconds. + Weight::from_parts(6_000_000, 3593) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 399_000 picoseconds. + Weight::from_parts(461_000, 0) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 390_000 picoseconds. + Weight::from_parts(439_000, 0) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1489` + // Minimum execution time: 4_375_000 picoseconds. + Weight::from_parts(4_747_000, 1489) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index a5c5f1ed2e47032338ff5424ea483d70d68e64df..02d61921741c02d3ac3f39f2df48e4af6213e39a 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -169,7 +169,7 @@ pub use extensions::{ check_genesis::CheckGenesis, check_mortality::CheckMortality, check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, - check_weight::CheckWeight, + check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo, }; // Backward compatible re-export. pub use extensions::check_mortality::CheckMortality as CheckEra; @@ -261,6 +261,19 @@ where check_version: bool, } +/// Information about the dispatch of a call, to be displayed in the +/// [`ExtrinsicSuccess`](Event::ExtrinsicSuccess) and [`ExtrinsicFailed`](Event::ExtrinsicFailed) +/// events. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct DispatchEventInfo { + /// Weight of this transaction. + pub weight: Weight, + /// Class of this transaction. + pub class: DispatchClass, + /// Does this transaction pay fees. + pub pays_fee: Pays, +} + #[frame_support::pallet] pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; @@ -303,6 +316,7 @@ pub mod pallet { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type ExtensionsWeightInfo = (); type SS58Prefix = (); type Version = (); type BlockWeights = (); @@ -375,6 +389,9 @@ pub mod pallet { /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + /// Weight information for the extensions of this pallet. + type ExtensionsWeightInfo = (); + /// This is used as an identifier of the chain. type SS58Prefix = (); @@ -582,8 +599,12 @@ pub mod pallet { /// All resources should be cleaned up associated with the given account. type OnKilledAccount: OnKilledAccount; + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo: WeightInfo; + /// Weight information for the transaction extensions of this pallet. + type ExtensionsWeightInfo: extensions::WeightInfo; + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is @@ -833,9 +854,9 @@ pub mod pallet { #[pallet::event] pub enum Event { /// An extrinsic completed successfully. - ExtrinsicSuccess { dispatch_info: DispatchInfo }, + ExtrinsicSuccess { dispatch_info: DispatchEventInfo }, /// An extrinsic failed. - ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchInfo }, + ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchEventInfo }, /// `:code` was updated. CodeUpdated, /// A new account was created. @@ -921,6 +942,7 @@ pub mod pallet { /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] + #[pallet::whitelist_storage] pub type AllExtrinsicsLen = StorageValue<_, u32>; /// Map of block numbers to block hashes. @@ -2025,13 +2047,15 @@ impl Pallet { /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. /// The emitted event contains the post-dispatch corrected weight including /// the base-weight for its dispatch class. - pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info) + pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, info: DispatchInfo) { + let weight = extract_actual_weight(r, &info) .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); - info.pays_fee = extract_actual_pays_fee(r, &info); + let class = info.class; + let pays_fee = extract_actual_pays_fee(r, &info); + let dispatch_event_info = DispatchEventInfo { weight, class, pays_fee }; Self::deposit_event(match r { - Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, + Ok(_) => Event::ExtrinsicSuccess { dispatch_info: dispatch_event_info }, Err(err) => { log::trace!( target: LOG_TARGET, @@ -2039,7 +2063,10 @@ impl Pallet { Self::block_number(), err, ); - Event::ExtrinsicFailed { dispatch_error: err.error, dispatch_info: info } + Event::ExtrinsicFailed { + dispatch_error: err.error, + dispatch_info: dispatch_event_info, + } }, }); diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index 1f72ea2d37452c88742b427ea621658a20b43a3a..bedfdded818350007f27f1b52850c54bf7f2e525 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -58,9 +58,10 @@ use alloc::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use codec::Encode; +use scale_info::TypeInfo; use sp_runtime::{ app_crypto::RuntimeAppPublic, - traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, + traits::{ExtrinsicLike, IdentifyAccount, One}, RuntimeDebug, }; @@ -75,29 +76,18 @@ pub struct ForAny {} /// For submitting unsigned transactions, `submit_unsigned_transaction` /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. -pub struct SubmitTransaction, OverarchingCall> { - _phantom: core::marker::PhantomData<(T, OverarchingCall)>, +pub struct SubmitTransaction, RuntimeCall> { + _phantom: core::marker::PhantomData<(T, RuntimeCall)>, } impl SubmitTransaction where - T: SendTransactionTypes, + T: CreateTransactionBase, { - /// Submit transaction onchain by providing the call and an optional signature - pub fn submit_transaction( - call: >::OverarchingCall, - signature: Option<::SignaturePayload>, - ) -> Result<(), ()> { - let xt = T::Extrinsic::new(call, signature).ok_or(())?; + /// A convenience method to submit an extrinsic onchain. + pub fn submit_transaction(xt: T::Extrinsic) -> Result<(), ()> { sp_io::offchain::submit_transaction(xt.encode()) } - - /// A convenience method to submit an unsigned transaction onchain. - pub fn submit_unsigned_transaction( - call: >::OverarchingCall, - ) -> Result<(), ()> { - SubmitTransaction::::submit_transaction(call, None) - } } /// Provides an implementation for signing transaction payloads. @@ -284,7 +274,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -310,7 +300,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -457,25 +447,32 @@ pub trait SigningTypes: crate::Config { type Signature: Clone + PartialEq + core::fmt::Debug + codec::Codec + scale_info::TypeInfo; } -/// A definition of types required to submit transactions from within the runtime. -pub trait SendTransactionTypes { - /// The extrinsic type expected by the runtime. - type Extrinsic: ExtrinsicT + codec::Encode; +/// Common interface for the `CreateTransaction` trait family to unify the `Call` type. +pub trait CreateTransactionBase { + /// The extrinsic. + type Extrinsic: ExtrinsicLike + Encode; + /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. - type OverarchingCall: From + codec::Encode; + type RuntimeCall: From + Encode; } -/// Create signed transaction. -/// -/// This trait is meant to be implemented by the runtime and is responsible for constructing -/// a payload to be signed and contained within the extrinsic. -/// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). -/// Note that the result can be altered by inspecting the `Call` (for instance adjusting -/// fees, or mortality depending on the `pallet` being called). +/// Interface for creating a transaction. +pub trait CreateTransaction: CreateTransactionBase { + /// The extension. + type Extension: TypeInfo; + + /// Create a transaction using the call and the desired transaction extension. + fn create_transaction( + call: >::RuntimeCall, + extension: Self::Extension, + ) -> Self::Extrinsic; +} + +/// Interface for creating an old-school signed transaction. pub trait CreateSignedTransaction: - SendTransactionTypes + SigningTypes + CreateTransactionBase + SigningTypes { /// Attempt to create signed extrinsic data that encodes call from given account. /// @@ -483,12 +480,18 @@ pub trait CreateSignedTransaction: /// in any way it wants. /// Returns `None` if signed extrinsic could not be created (either because signing failed /// or because of any other runtime-specific reason). - fn create_transaction>( - call: Self::OverarchingCall, + fn create_signed_transaction>( + call: >::RuntimeCall, public: Self::Public, account: Self::AccountId, nonce: Self::Nonce, - ) -> Option<(Self::OverarchingCall, ::SignaturePayload)>; + ) -> Option; +} + +/// Interface for creating an inherent. +pub trait CreateInherent: CreateTransactionBase { + /// Create an inherent. + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic; } /// A message signer. @@ -516,7 +519,7 @@ pub trait SignMessage { /// Submit a signed transaction to the transaction pool. pub trait SendSignedTransaction< - T: SigningTypes + CreateSignedTransaction, + T: CreateSignedTransaction, C: AppCrypto, LocalCall, > @@ -547,13 +550,14 @@ pub trait SendSignedTransaction< account.id, account_data.nonce, ); - let (call, signature) = T::create_transaction::( + let transaction = T::create_signed_transaction::( call.into(), account.public.clone(), account.id.clone(), account_data.nonce, )?; - let res = SubmitTransaction::::submit_transaction(call, Some(signature)); + + let res = SubmitTransaction::::submit_transaction(transaction); if res.is_ok() { // increment the nonce. This is fine, since the code should always @@ -567,7 +571,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction, LocalCall> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -590,7 +594,8 @@ pub trait SendUnsignedTransaction Option> { - Some(SubmitTransaction::::submit_unsigned_transaction(call.into())) + let xt = T::create_inherent(call.into()); + Some(SubmitTransaction::::submit_transaction(xt)) } } @@ -630,9 +635,15 @@ mod tests { type Extrinsic = TestXt; - impl SendTransactionTypes for TestRuntime { + impl CreateTransactionBase for TestRuntime { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl CreateInherent for TestRuntime { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } } #[derive(codec::Encode, codec::Decode)] @@ -693,7 +704,7 @@ mod tests { let _tx3 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -724,7 +735,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -758,7 +769,7 @@ mod tests { let _tx2 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } @@ -790,7 +801,7 @@ mod tests { let tx1 = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx1 = Extrinsic::decode(&mut &*tx1).unwrap(); - assert_eq!(tx1.signature, None); + assert!(tx1.is_inherent()); }); } } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index aa1094e3fe4800cc3268d14611fdb927d4247967..6b903f5b7e79eccbeacc6180e2828ea74897f72d 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -266,7 +266,10 @@ fn deposit_event_should_work() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + dispatch_info: DispatchEventInfo { + weight: normal_base, + ..Default::default() + } } .into(), topics: vec![] @@ -275,7 +278,10 @@ fn deposit_event_should_work() { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + dispatch_info: DispatchEventInfo { + weight: normal_base, + ..Default::default() + } } .into(), topics: vec![] @@ -300,7 +306,8 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { let normal_base = ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; - let pre_info = DispatchInfo { weight: Weight::from_parts(1000, 0), ..Default::default() }; + let pre_info = + DispatchInfo { call_weight: Weight::from_parts(1000, 0), ..Default::default() }; System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(300))), pre_info); System::note_applied_extrinsic(&Ok(from_actual_ref_time(Some(1000))), pre_info); System::note_applied_extrinsic( @@ -356,7 +363,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { .base_extrinsic; assert!(normal_base != operational_base, "Test pre-condition violated"); let pre_info = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), class: DispatchClass::Operational, ..Default::default() }; @@ -367,7 +374,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(300, 0).saturating_add(normal_base), ..Default::default() }, @@ -378,7 +385,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), ..Default::default() }, @@ -389,7 +396,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), ..Default::default() }, @@ -400,10 +407,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(3), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -412,10 +419,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(4), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -424,10 +431,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(5), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -436,10 +443,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(6), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(500, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -449,7 +456,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(7), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(999, 0).saturating_add(normal_base), ..Default::default() }, @@ -461,10 +468,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(8), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(1000, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -474,10 +481,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(9), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(800, 0).saturating_add(normal_base), pays_fee: Pays::Yes, - ..Default::default() + class: Default::default(), }, } .into(), @@ -487,10 +494,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(10), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(800, 0).saturating_add(normal_base), pays_fee: Pays::No, - ..Default::default() + class: Default::default(), }, } .into(), @@ -499,10 +506,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(11), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { + dispatch_info: DispatchEventInfo { weight: Weight::from_parts(300, 0).saturating_add(operational_base), class: DispatchClass::Operational, - ..Default::default() + pays_fee: Default::default(), }, } .into(), diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 4161a97f3cde6324ab5ac924f8694c4a015acd46..afa03ceb12eb8942e540119c5f5a898bd6859566 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -21,6 +21,7 @@ codec = { features = [ ], workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } sp-core = { workspace = true } @@ -35,6 +36,7 @@ pallet-balances = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-balances/std", @@ -44,6 +46,13 @@ std = [ "sp-io/std", "sp-runtime/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index e6a60e9c850fa1fdfd9f04b7e7f09f7b95903ff2..7c98d157f6ffd5b7e89645aae4d8a8af90c38c49 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -18,6 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies sp-runtime = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-asset-conversion = { workspace = true } @@ -36,6 +37,7 @@ pallet-balances = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "pallet-asset-conversion/std", @@ -48,6 +50,16 @@ std = [ "sp-runtime/std", "sp-storage/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md index eccba773673e690a6d415a4c61d267ba75a1c12e..fcd1527526e98ecf45c7979bb40a2d59aad93452 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..97eff03d849d02bb49f7ab63ccacd6294a18a7c0 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/benchmarking.rs @@ -0,0 +1,127 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Conversion Tx Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; + +#[benchmarks(where + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, + T::AssetId: Send + Sync, + BalanceOf: Send + + Sync + + From, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = account("caller", 0, 0); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u64.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::zero(), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u64.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // Submit a lower post info weight to trigger the refund path. + let post_info = + PostDispatchInfo { actual_weight: Some(Weight::from_parts(5, 0)), pays_fee: Pays::Yes }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + + let tip = 10u64.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // Submit a lower post info weight to trigger the refund path. + let post_info = + PostDispatchInfo { actual_weight: Some(Weight::from_parts(5, 0)), pays_fee: Pays::Yes }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index 825a35e621382d65ba60c804b258b866be6b8f1a..787f6b122e86e873bafd6ab32d5cbe0d85e35697 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -20,8 +20,8 @@ //! //! ## Overview //! -//! This pallet provides a `SignedExtension` with an optional `AssetId` that specifies the asset -//! to be used for payment (defaulting to the native token on `None`). It expects an +//! This pallet provides a `TransactionExtension` with an optional `AssetId` that specifies the +//! asset to be used for payment (defaulting to the native token on `None`). It expects an //! [`OnChargeAssetTransaction`] implementation analogous to [`pallet-transaction-payment`]. The //! included [`SwapAssetAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the //! fee amount by converting the fee calculated by [`pallet-transaction-payment`] in the native @@ -31,7 +31,7 @@ //! //! This pallet does not have any dispatchable calls or storage. It wraps FRAME's Transaction //! Payment pallet and functions as a replacement. This means you should include both pallets in -//! your `construct_runtime` macro, but only include this pallet's [`SignedExtension`] +//! your `construct_runtime` macro, but only include this pallet's [`TransactionExtension`] //! ([`ChargeAssetTxPayment`]). //! //! ## Terminology @@ -50,21 +50,29 @@ use frame_support::{ traits::IsType, DefaultNoBound, }; -use pallet_transaction_payment::OnChargeTransaction; +use pallet_transaction_payment::{ChargeTransactionPayment, OnChargeTransaction}; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, RefundWeight, + TransactionExtension, ValidateResult, Zero, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; mod payment; -use frame_support::traits::tokens::AssetId; +use frame_support::{pallet_prelude::Weight, traits::tokens::AssetId}; pub use payment::*; +pub use weights::WeightInfo; /// Balance type alias for balances of the chain's native asset. pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; @@ -112,11 +120,30 @@ pub mod pallet { Balance = BalanceOf, AssetId = Self::AssetId, >; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + /// Benchmark helper + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + Self::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -168,9 +195,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -189,6 +215,28 @@ where .map(|payment| (fee, InitialPayment::Native(payment))) } } + + /// Fee withdrawal logic dry-run that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok(()) + } else if let Some(asset_id) = &self.asset_id { + T::OnChargeAssetTransaction::can_withdraw_fee(who, asset_id.clone(), fee.into()) + } else { + as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, self.tip, + ) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } } impl core::fmt::Debug for ChargeAssetTxPayment { @@ -202,108 +250,179 @@ impl core::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + initial_payment: InitialPayment, + // weight used by the extension + weight: Weight, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl TransactionExtension for ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync, + BalanceOf: Send + Sync + From, T::AssetId: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - Self::AccountId, - // imbalance resulting from withdrawing the fee - InitialPayment, - ); + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let Some(who) = origin.as_system_origin_signer() else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)) + }; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + self.can_withdraw_fee(&who, call, info, fee)?; let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let validity = ValidTransaction { priority, ..Default::default() }; + let val = Val::Charge { tip: self.tip, who: who.clone(), fee }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { tip, who, initial_payment, weight: self.weight(call) }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee, - tip, - already_withdrawn, - )?; - pallet_transaction_payment::Pallet::::deposit_fee_paid_event( - who, actual_fee, tip, - ); - }, - InitialPayment::Asset((asset_id, already_withdrawn)) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee, - tip, - asset_id.clone(), - already_withdrawn, - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } - } + ) -> Result { + let (tip, who, initial_payment, extension_weight) = match pre { + Pre::Charge { tip, who, initial_payment, weight } => + (tip, who, initial_payment, weight), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; - Ok(()) + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, + info, + &actual_post_info, + actual_fee, + tip, + already_withdrawn, + )?; + pallet_transaction_payment::Pallet::::deposit_fee_paid_event( + who, actual_fee, tip, + ); + Ok(unspent_weight) + }, + InitialPayment::Asset((asset_id, already_withdrawn)) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + let converted_fee = T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, + info, + &actual_post_info, + actual_fee, + tip, + asset_id.clone(), + already_withdrawn, + )?; + + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip, + asset_id, + }); + + Ok(unspent_weight) + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) + }, + } } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index acfd43d0a7cb9520ac14974f439eca7530e7b3a8..a86b86c223ef3289e3d41d69d87a83b15658dfe1 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -145,6 +145,14 @@ impl OnUnbalanced::AccountId, } } +pub struct MockTxPaymentWeights; + +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + pub struct DealWithFungiblesFees; impl OnUnbalanced> for DealWithFungiblesFees { fn on_unbalanceds( @@ -167,8 +175,8 @@ impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = FungibleAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = MockTxPaymentWeights; } type AssetId = u32; @@ -266,9 +274,95 @@ impl pallet_asset_conversion::Config for Runtime { } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_asset_tx_payment_zero() -> Weight { + Weight::from_parts(0, 0) + } + + fn charge_asset_tx_payment_native() -> Weight { + Weight::from_parts(15, 0) + } + + fn charge_asset_tx_payment_asset() -> Weight { + Weight::from_parts(20, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type AssetId = NativeOrWithId; type OnChargeAssetTransaction = SwapAssetAdapter; + type WeightInfo = MockWeights; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait, NativeOrWithId> for Helper { + fn create_asset_id_parameter(id: u32) -> (NativeOrWithId, NativeOrWithId) { + (NativeOrWithId::WithId(id), NativeOrWithId::WithId(id)) + } + + fn setup_balances_and_pool(asset_id: NativeOrWithId, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let NativeOrWithId::WithId(asset_idx) = asset_id.clone() else { unimplemented!() }; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_idx.into(), + 42, /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = 12; + assert_ok!(Balances::force_set_balance(RuntimeOrigin::root(), lp_provider, u64::MAX / 2)); + let lp_provider_account = ::Lookup::unlookup(lp_provider); + assert_ok!(Assets::mint_into(asset_idx, &lp_provider_account, u64::MAX / 2)); + + let token_1 = Box::new(NativeOrWithId::Native); + let token_2 = Box::new(asset_id); + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider), + token_1.clone(), + token_2.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider), + token_1, + token_2, + (u32::MAX / 8).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider_account, + )); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_idx, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_idx, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs index dc7faecd56084d924c6e0a57fa0c6052acb4bd38..05182c3c9ee65d6102b39f2b704a2337a8ab8116 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/payment.rs @@ -23,7 +23,7 @@ use frame_support::{ defensive, ensure, traits::{ fungibles, - tokens::{Balance, Fortitude, Precision, Preservation}, + tokens::{Balance, Fortitude, Precision, Preservation, WithdrawConsequence}, Defensive, OnUnbalanced, SameOrOther, }, unsigned::TransactionValidityError, @@ -56,6 +56,15 @@ pub trait OnChargeAssetTransaction { tip: Self::Balance, ) -> Result; + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the tip. + fn can_withdraw_fee( + who: &T::AccountId, + asset_id: Self::AssetId, + fee: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// Refund any overpaid fees and deposit the corrected amount. /// The actual fee gets calculated once the transaction is executed. /// @@ -162,6 +171,51 @@ where Ok((fee_credit, asset_fee)) } + /// Dry run of swap & withdraw the predicted fee from the transaction origin. + /// + /// Note: The `fee` already includes the tip. + /// + /// Returns an error if the total amount in native currency can't be exchanged for `asset_id`. + fn can_withdraw_fee( + who: &T::AccountId, + asset_id: Self::AssetId, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + if asset_id == A::get() { + // The `asset_id` is the target asset, we do not need to swap. + match F::can_withdraw(asset_id.clone(), who, fee) { + WithdrawConsequence::BalanceLow | + WithdrawConsequence::UnknownAsset | + WithdrawConsequence::Underflow | + WithdrawConsequence::Overflow | + WithdrawConsequence::Frozen => + return Err(TransactionValidityError::from(InvalidTransaction::Payment)), + WithdrawConsequence::Success | + WithdrawConsequence::ReducedToZero(_) | + WithdrawConsequence::WouldDie => return Ok(()), + } + } + + let asset_fee = + S::quote_price_tokens_for_exact_tokens(asset_id.clone(), A::get(), fee, true) + .ok_or(InvalidTransaction::Payment)?; + + // Ensure we can withdraw enough `asset_id` for the swap. + match F::can_withdraw(asset_id.clone(), who, asset_fee) { + WithdrawConsequence::BalanceLow | + WithdrawConsequence::UnknownAsset | + WithdrawConsequence::Underflow | + WithdrawConsequence::Overflow | + WithdrawConsequence::Frozen => + return Err(TransactionValidityError::from(InvalidTransaction::Payment)), + WithdrawConsequence::Success | + WithdrawConsequence::ReducedToZero(_) | + WithdrawConsequence::WouldDie => {}, + }; + + Ok(()) + } + fn correct_and_deposit_fee( who: &T::AccountId, _dispatch_info: &DispatchInfoOf<::RuntimeCall>, diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index aab657199533ec2714ee2b14813044e02e83d205..4312aa9a452f809a01782a10c290d594e1fbbf24 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -17,19 +17,23 @@ use super::*; use frame_support::{ assert_ok, - dispatch::{DispatchInfo, PostDispatchInfo}, + dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, traits::{ fungible::{Inspect, NativeOrWithId}, fungibles::{Inspect as FungiblesInspect, Mutate}, tokens::{Fortitude, Precision, Preservation}, + OriginTrait, }, weights::Weight, }; use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -92,7 +96,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -161,35 +165,45 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(WEIGHT_5), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_5); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_5), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &default_post_info(), len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + let extension_weight = ext.weight(CALL); + info.extension_weight = extension_weight; + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); + let call_actual_weight = WEIGHT_50; + let post_info = post_info_from_weight( + info.call_weight + .saturating_sub(call_actual_weight) + .saturating_add(extension_weight), + ); + // The extension weight refund should be taken into account in `post_dispatch`. + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 15 - 5); }); } @@ -240,8 +254,8 @@ fn transaction_payment_in_asset_possible() { let fee_in_asset = input_quote.unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -255,12 +269,12 @@ fn transaction_payment_in_asset_possible() { amount: fee_in_asset, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), // estimated tx weight &default_post_info(), // weight actually used == estimated len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); @@ -298,12 +312,8 @@ fn transaction_payment_in_asset_fails_if_no_pool_for_that_asset() { assert_eq!(Assets::balance(asset_id, caller), balance); let len = 10; - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())).pre_dispatch( - &caller, - CALL, - &info_from_weight(WEIGHT_5), - len, - ); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len); // As there is no pool in the dex set up for this asset, conversion should fail. assert!(pre.is_err()); @@ -353,8 +363,8 @@ fn transaction_payment_without_fee() { assert_eq!(input_quote, Some(201)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // assert that native balance is not used @@ -371,12 +381,12 @@ fn transaction_payment_without_fee() { .unwrap(); assert_eq!(refund, 199); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); // caller should get refunded @@ -419,24 +429,29 @@ fn asset_transaction_payment_with_tip_and_refund() { let weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let ext_weight = ext.weight(CALL); let len = 10; - let fee_in_native = base_weight + weight + len as u64 + tip; + let fee_in_native = base_weight + weight + ext_weight.ref_time() + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( NativeOrWithId::WithId(asset_id), NativeOrWithId::Native, fee_in_native, true, ); - assert_eq!(input_quote, Some(1206)); + assert_eq!(input_quote, Some(1407)); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let final_weight = 50; - let expected_fee = fee_in_native - final_weight - tip; + let weight_refund = weight - final_weight; + let ext_weight_refund = ext_weight - MockWeights::charge_asset_tx_payment_asset(); + let expected_fee = fee_in_native - weight_refund - ext_weight_refund.ref_time() - tip; let expected_token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( NativeOrWithId::Native, NativeOrWithId::WithId(asset_id), @@ -451,12 +466,13 @@ fn asset_transaction_payment_with_tip_and_refund() { amount: fee_in_asset, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), + let post_info = post_info_from_weight(WEIGHT_50.saturating_add(ext_weight)); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); assert_eq!(TipUnbalancedAmount::get(), tip); @@ -522,18 +538,18 @@ fn payment_from_account_with_only_assets() { .unwrap(); assert_eq!(fee_in_asset, 201); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_5), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_weight(WEIGHT_5), len) .unwrap(); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(WEIGHT_5), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); assert_eq!(Balances::free_balance(caller), 0); @@ -578,18 +594,18 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // there will be no conversion when the fee is zero { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies there are no fees assert_eq!(Assets::balance(asset_id, caller), balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); } @@ -604,17 +620,22 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { ) .unwrap(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); }); @@ -654,14 +675,16 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ + let Pre::Charge { initial_payment, .. } = &pre else { + panic!("Expected Charge"); + }; let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -670,63 +693,12 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 1000; - - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); }); @@ -749,25 +721,34 @@ fn fee_with_native_asset_passed_with_id() { assert_eq!(Balances::free_balance(caller), caller_balance); let tip = 10; - let weight = 100; + let call_weight = 100; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 5; - let initial_fee = base_weight + weight + len as u64 + tip; + let initial_fee = + base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) - .unwrap(); + let mut info = info_from_weight(WEIGHT_100); + info.extension_weight = extension_weight; + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Balances::free_balance(caller), caller_balance - initial_fee); let final_weight = 50; + // No refunds from the extension weight itself. let expected_fee = initial_fee - final_weight; - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), - &post_info_from_weight(WEIGHT_50), - len, - &Ok(()) - )); + let post_info = post_info_from_weight(WEIGHT_50.saturating_add(extension_weight)); + assert_eq!( + ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info_from_weight(WEIGHT_100), + &post_info, + len, + &Ok(()), + ) + .unwrap(), + Weight::zero() + ); assert_eq!(Balances::free_balance(caller), caller_balance - expected_fee); @@ -809,10 +790,13 @@ fn transfer_add_and_remove_account() { assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 100; + let call_weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 10; - let fee_in_native = base_weight + weight + len as u64 + tip; + let fee_in_native = + base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( NativeOrWithId::WithId(asset_id), NativeOrWithId::Native, @@ -822,8 +806,10 @@ fn transfer_add_and_remove_account() { assert!(!input_quote.unwrap().is_zero()); let fee_in_asset = input_quote.unwrap(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .pre_dispatch(&caller, CALL, &info_from_weight(WEIGHT_100), len) + let mut info = info_from_weight(WEIGHT_100); + info.extension_weight = extension_weight; + let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) + .validate_and_prepare(Some(caller).into(), CALL, &info, len) .unwrap(); assert_eq!(Assets::balance(asset_id, &caller), balance - fee_in_asset); @@ -838,7 +824,8 @@ fn transfer_add_and_remove_account() { Fortitude::Force )); - let final_weight = 50; + // Actual call weight + actual extension weight. + let final_weight = 50 + 20; let final_fee_in_native = fee_in_native - final_weight - tip; let token_refund = AssetConversion::quote_price_exact_tokens_for_tokens( NativeOrWithId::Native, @@ -851,12 +838,12 @@ fn transfer_add_and_remove_account() { // make sure the refund amount is enough to create the account. assert!(token_refund >= min_balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(WEIGHT_100), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &post_info_from_weight(WEIGHT_50), len, - &Ok(()) + &Ok(()), )); // fee paid with no refund. @@ -867,3 +854,40 @@ fn transfer_add_and_remove_account() { assert_eq!(Assets::balance(asset_id, caller), 0); }); } + +#[test] +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = ChargeAssetTxPayment::::from(0, None); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) +} diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..f95e49f80730318d4957538ded3812c661009be0 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_conversion_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_conversion_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(694_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 34_410_000 picoseconds. + Weight::from_parts(35_263_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `888` + // Estimated: `6208` + // Minimum execution time: 112_432_000 picoseconds. + Weight::from_parts(113_992_000, 6208) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 8d39dea8c62bce344d04c6178ab1d861a6999ec7..89fe5bfe7a42fd32cfd6c46fa485636de7c63c01 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -64,6 +64,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] try-runtime = [ diff --git a/substrate/frame/transaction-payment/asset-tx-payment/README.md b/substrate/frame/transaction-payment/asset-tx-payment/README.md index fc860347d85fa37fc98890b5d4b2f56722040a8e..933ce13b0ee6f7a23110772a82b51ab15fc7dd2a 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/README.md +++ b/substrate/frame/transaction-payment/asset-tx-payment/README.md @@ -16,6 +16,6 @@ asset. ### Integration This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). License: Apache-2.0 diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..25902bf452b2c6ad03004215411c04f849af0d4f --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/benchmarking.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Asset Tx Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + pallet_prelude::*, +}; +use frame_system::RawOrigin; +use sp_runtime::traits::{ + AsSystemOriginSigner, AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable, +}; + +#[benchmarks(where + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, + AssetBalanceOf: Send + Sync, + BalanceOf: Send + Sync + From + IsType>, + ChargeAssetIdOf: Send + Sync, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, + Credit: IsType>, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_asset_tx_payment_zero() { + let caller: T::AccountId = account("caller", 0, 0); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(0u32.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::zero(), + extension_weight: Weight::zero(), + class: DispatchClass::Normal, + pays_fee: Pays::No, + }; + let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }; + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_native() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, _) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool(fun_asset_id, caller.clone()); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(10u32.into(), None); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller).into(), &call, &info, 0, |_| Ok(post_info)) + .unwrap() + .is_ok()); + } + } + + #[benchmark] + fn charge_asset_tx_payment_asset() { + let caller: T::AccountId = account("caller", 0, 0); + let (fun_asset_id, asset_id) = ::BenchmarkHelper::create_asset_id_parameter(1); + ::BenchmarkHelper::setup_balances_and_pool( + fun_asset_id.clone(), + caller.clone(), + ); + let tip = 10u32.into(); + let ext: ChargeAssetTxPayment = ChargeAssetTxPayment::from(tip, Some(asset_id)); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let info = DispatchInfo { + call_weight: Weight::from_parts(10, 0), + extension_weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 0, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 97f1116993fc166dd6036ae88a790eaf3cb8e34f..25aa272ba01b5aa58ba2ed6dcca4c5fe9cad30a7 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -31,13 +31,14 @@ //! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means //! you should include both pallets in your `construct_runtime` macro, but only include this -//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). +//! pallet's [`TransactionExtension`] ([`ChargeAssetTxPayment`]). #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, + pallet_prelude::Weight, traits::{ tokens::{ fungibles::{Balanced, Credit, Inspect}, @@ -50,10 +51,11 @@ use frame_support::{ use pallet_transaction_payment::OnChargeTransaction; use scale_info::TypeInfo; use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + traits::{ + AsSystemOriginSigner, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, RefundWeight, + TransactionExtension, Zero, }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, }; #[cfg(test)] @@ -61,8 +63,14 @@ mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; +pub mod weights; + pub use payment::*; +pub use weights::WeightInfo; /// Type aliases used for interaction with `OnChargeTransaction`. pub(crate) type OnChargeTransactionOf = @@ -118,11 +126,30 @@ pub mod pallet { type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. type OnChargeAssetTransaction: OnChargeAssetTransaction; + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + /// Benchmark helper + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelperTrait< + Self::AccountId, + <::Fungibles as Inspect>::AssetId, + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId, + >; } #[pallet::pallet] pub struct Pallet(_); + #[cfg(feature = "runtime-benchmarks")] + /// Helper trait to benchmark the `ChargeAssetTxPayment` transaction extension. + pub trait BenchmarkHelperTrait { + /// Returns the `AssetId` to be used in the liquidity pool by the benchmarking code. + fn create_asset_id_parameter(id: u32) -> (FunAssetIdParameter, AssetIdParameter); + /// Create a liquidity pool for a given asset and sufficiently endow accounts to benchmark + /// the extension. + fn setup_balances_and_pool(asset_id: FunAssetIdParameter, account: AccountId); + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -170,9 +197,8 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); if fee.is_zero() { Ok((fee, InitialPayment::Nothing)) @@ -194,6 +220,35 @@ where .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) } } + + /// Fee withdrawal logic dry-run that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + fee: BalanceOf, + ) -> Result<(), TransactionValidityError> { + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok(()) + } else if let Some(asset_id) = self.asset_id { + T::OnChargeAssetTransaction::can_withdraw_fee( + who, + call, + info, + asset_id, + fee.into(), + self.tip.into(), + ) + } else { + as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, self.tip, + ) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } } impl core::fmt::Debug for ChargeAssetTxPayment { @@ -207,106 +262,184 @@ impl core::fmt::Debug for ChargeAssetTxPayment { } } -impl SignedExtension for ChargeAssetTxPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + initial_payment: InitialPayment, + // asset_id for the transaction payment + asset_id: Option>, + // weight used by the extension + weight: Weight, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl TransactionExtension for ChargeAssetTxPayment where T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync, BalanceOf: Send + Sync + From + IsType>, ChargeAssetIdOf: Send + Sync, Credit: IsType>, + ::RuntimeOrigin: AsSystemOriginSigner + Clone, { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - Self::AccountId, - // imbalance resulting from withdrawing the fee - InitialPayment, - // asset_id for the transaction payment - Option>, - ); + type Implicit = (); + type Val = Val; + type Pre = Pre; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let Some(who) = origin.as_system_origin_signer() else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)) + }; + // Non-mutating call of `compute_fee` to calculate the fee used in the transaction priority. + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + self.can_withdraw_fee(&who, call, info, fee)?; let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) + let val = Val::Charge { tip: self.tip, who: who.clone(), fee }; + let validity = ValidTransaction { priority, ..Default::default() }; + Ok((validity, val, origin)) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment, self.asset_id)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call of `withdraw_fee` to actually charge for the transaction. + let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { + tip, + who, + initial_payment, + asset_id: self.asset_id, + weight: self.weight(call), + }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, initial_payment, asset_id)) = pre { - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - Some((tip, who, already_withdrawn)), + ) -> Result { + let (tip, who, initial_payment, asset_id, extension_weight) = match pre { + Pre::Charge { tip, who, initial_payment, asset_id, weight } => + (tip, who, initial_payment, asset_id, weight), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; + + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + // Take into account the weight used by this extension before calculating the + // refund. + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch_details( + pallet_transaction_payment::Pre::Charge { + tip, + who, + imbalance: already_withdrawn, + }, + info, + &actual_post_info, + len, + result, + )?; + Ok(unspent_weight) + }, + InitialPayment::Asset(already_withdrawn) => { + let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); + let mut actual_post_info = *post_info; + actual_post_info.refund(unspent_weight); + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, + info, + &actual_post_info, + tip, + ); + + let (converted_fee, converted_tip) = + T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, info, - post_info, - len, - result, + &actual_post_info, + actual_fee.into(), + tip.into(), + already_withdrawn.into(), )?; - }, - InitialPayment::Asset(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - - let (converted_fee, converted_tip) = - T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - already_withdrawn.into(), - )?; - Pallet::::deposit_event(Event::::AssetTxFeePaid { - who, - actual_fee: converted_fee, - tip: converted_tip, - asset_id, - }); - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be - // non-zero here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just - // move ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } + Pallet::::deposit_event(Event::::AssetTxFeePaid { + who, + actual_fee: converted_fee, + tip: converted_tip, + asset_id, + }); + Ok(unspent_weight) + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be + // non-zero here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just + // move ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) + }, } - - Ok(()) } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index e84df1e4eb91b67a1cdb1477a6f5eac9b1b5180f..fce029bb4bfc7e7d3f3541c247b47331fc8f3e49 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -105,14 +105,22 @@ impl WeightToFeeT for TransactionByteFee { } } +pub struct MockTxPaymentWeights; + +impl pallet_transaction_payment::WeightInfo for MockTxPaymentWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + #[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)] impl pallet_transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = FungibleAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; - type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = MockTxPaymentWeights; } type AssetId = u32; @@ -168,6 +176,23 @@ impl HandleCredit for CreditToBlockAuthor { } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_asset_tx_payment_zero() -> Weight { + Weight::from_parts(0, 0) + } + + fn charge_asset_tx_payment_native() -> Weight { + Weight::from_parts(15, 0) + } + + fn charge_asset_tx_payment_asset() -> Weight { + Weight::from_parts(20, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; @@ -175,4 +200,56 @@ impl Config for Runtime { pallet_assets::BalanceToAssetBalance, CreditToBlockAuthor, >; + type WeightInfo = MockWeights; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = Helper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + let base_weight = 5; + let balance_factor = 100; + crate::tests::ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(Weight::from_parts(base_weight, 0)) + .build() +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct Helper; + +#[cfg(feature = "runtime-benchmarks")] +impl BenchmarkHelperTrait for Helper { + fn create_asset_id_parameter(id: u32) -> (u32, u32) { + (id.into(), id.into()) + } + + fn setup_balances_and_pool(asset_id: u32, account: u64) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + use sp_runtime::traits::StaticLookup; + let min_balance = 1; + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + asset_id.into(), + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&account, u32::MAX.into()); + + let beneficiary = ::Lookup::unlookup(account); + let balance = 1000; + + assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, account), balance); + } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs index 2486474bad45b7ad9a293491631acfbc3ef4e47f..2074b1476f45ae9187778a3c846e0b21a1097176 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -56,6 +56,18 @@ pub trait OnChargeAssetTransaction { tip: Self::Balance, ) -> Result; + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -140,6 +152,32 @@ where .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) } + /// Ensure payment of the transaction fees can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + _tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + // We don't know the precision of the underlying asset. Because the converted fee could be + // less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum + // fee. + let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() }; + let converted_fee = CON::to_asset_balance(fee, asset_id) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))? + .max(min_converted_fee); + let can_withdraw = + >::can_withdraw(asset_id, who, converted_fee); + if can_withdraw != WithdrawConsequence::Success { + return Err(InvalidTransaction::Payment.into()) + } + Ok(()) + } + /// Hand the fee and the tip over to the `[HandleCredit]` implementation. /// Since the predicted fee might have been too high, parts of the fee may be refunded. /// diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 098ecf11dd92fd41cf1ad3bcddaa679da1d6c0fb..cd694c3e81a7c5c1db3b2748da584684f4af786d 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -17,15 +17,18 @@ use super::*; use frame_support::{ assert_ok, - dispatch::{DispatchInfo, PostDispatchInfo}, + dispatch::{DispatchInfo, GetDispatchInfo, PostDispatchInfo}, pallet_prelude::*, - traits::fungibles::Mutate, + traits::{fungibles::Mutate, OriginTrait}, weights::Weight, }; use frame_system as system; use mock::{ExtrinsicBaseWeight, *}; use pallet_balances::Call as BalancesCall; -use sp_runtime::{traits::StaticLookup, BuildStorage}; +use sp_runtime::{ + traits::{DispatchTransaction, StaticLookup}, + BuildStorage, +}; const CALL: &::RuntimeCall = &RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); @@ -88,7 +91,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -116,35 +119,49 @@ fn transaction_payment_in_native_possible() { .build() .execute_with(|| { let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) - .unwrap(); + let mut info = info_from_weight(Weight::from_parts(5, 0)); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, &default_post_info(), len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) - .unwrap(); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), + let call_actual_weight = Weight::from_parts(50, 0); + // The extension weight refund should be taken into account in `post_dispatch`. + let post_info = post_info_from_weight(call_actual_weight.saturating_add( + ChargeAssetTxPayment::::from(5 /* tipped */, None).weight(CALL), + )); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + assert_eq!( + post_info.actual_weight, + Some( + call_actual_weight + .saturating_add(MockWeights::charge_asset_tx_payment_native()) + ) + ); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 15 - 5); }); } @@ -181,8 +198,13 @@ fn transaction_payment_in_asset_possible() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -196,12 +218,12 @@ fn transaction_payment_in_asset_possible() { amount: fee, })); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); // check that the block author gets rewarded @@ -246,8 +268,13 @@ fn transaction_payment_without_fee() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -255,12 +282,12 @@ fn transaction_payment_without_fee() { assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); // caller should be refunded assert_eq!(Assets::balance(asset_id, caller), balance); @@ -298,14 +325,16 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_eq!(Assets::balance(asset_id, caller), balance); let weight = 100; let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id)); + let ext_weight = ext.weight(CALL); let len = 10; // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit - let fee_with_tip = - (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) - .unwrap(); + let fee_with_tip = (base_weight + weight + ext_weight.ref_time() + len as u64 + tip) * + min_balance / ExistentialDeposit::get(); + let mut info = info_from_weight(Weight::from_parts(weight, 0)); + info.extension_weight = ext_weight; + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { @@ -315,15 +344,22 @@ fn asset_transaction_payment_with_tip_and_refund() { })); let final_weight = 50; - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_weight(Weight::from_parts(final_weight, 0)), + let mut post_info = post_info_from_weight(Weight::from_parts(final_weight, 0)); + post_info + .actual_weight + .as_mut() + .map(|w| w.saturating_accrue(MockWeights::charge_asset_tx_payment_asset())); + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, + &info, + &post_info, len, - &Ok(()) + &Ok(()), )); - let final_fee = - fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); + let final_fee = fee_with_tip - + (weight - final_weight + ext_weight.ref_time() - + MockWeights::charge_asset_tx_payment_asset().ref_time()) * + min_balance / ExistentialDeposit::get(); assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); @@ -367,19 +403,24 @@ fn payment_from_account_with_only_assets() { // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - fee); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - fee); assert_eq!(Balances::free_balance(caller), 0); @@ -400,7 +441,12 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); // create the non-sufficient asset @@ -414,7 +460,12 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len + ) .is_err()); }); } @@ -452,33 +503,38 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { // naive fee calculation would round down to zero assert_eq!(fee, 0); { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` still implies no fees assert_eq!(Assets::balance(asset_id, caller), balance); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::No), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); } - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_parts(weight, 0)), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare( + Some(caller).into(), + CALL, + &info_from_weight(Weight::from_parts(weight, 0)), + len, + ) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_weight(Weight::from_parts(weight, 0)), &default_post_info(), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance - 1); }); @@ -516,12 +572,14 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); // calculated fee is greater than 0 assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + let (pre, _) = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .validate_and_prepare(Some(caller).into(), CALL, &info_from_pays(Pays::No), len) .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment, _asset_id) = ⪯ + let Pre::Charge { initial_payment, .. } = &pre else { + panic!("Expected Charge"); + }; let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -530,62 +588,50 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - Some(pre), + assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( + pre, &info_from_pays(Pays::No), &post_info_from_pays(Pays::Yes), len, - &Ok(()) + &Ok(()), )); assert_eq!(Assets::balance(asset_id, caller), balance); }); } #[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(Weight::from_parts(base_weight, 0)) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - RuntimeOrigin::root(), - asset_id.into(), - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = ChargeAssetTxPayment::::from(0, None); - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(Weight::from_parts(weight, 0)), - len, - ) - .unwrap(); + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); - assert_eq!(Assets::balance(asset_id, caller), balance); + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - None, - &info_from_weight(Weight::from_parts(weight, 0)), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + as TransactionExtension>::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..1af1c94177d26b3316d506d1419a3d6cb052d4c2 --- /dev/null +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs @@ -0,0 +1,146 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_asset_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/asset-tx-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_asset_tx_payment`. +pub trait WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight; + fn charge_asset_tx_payment_native() -> Weight; + fn charge_asset_tx_payment_asset() -> Weight; +} + +/// Weights for `pallet_asset_tx_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 542_000 picoseconds. + Weight::from_parts(597_000, 0) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 33_162_000 picoseconds. + Weight::from_parts(34_716_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `747` + // Estimated: `3675` + // Minimum execution time: 44_230_000 picoseconds. + Weight::from_parts(45_297_000, 3675) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 3ab38743bafdd8b4e8f86bb1f5f0ad902f83ffd4..d6ac648cefd4ca983117685e6182a080148accd7 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -21,7 +21,7 @@ //! //! ## Overview //! -//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! It does this by wrapping an existing [`TransactionExtension`] implementation (e.g. //! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the //! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom //! event is emitted instead. Otherwise, the extension is applied as usual. @@ -31,7 +31,7 @@ //! //! This pallet wraps an existing transaction payment pallet. This means you should both pallets //! in your [`construct_runtime`](frame_support::construct_runtime) macro and -//! include this pallet's [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the +//! include this pallet's [`TransactionExtension`] ([`SkipCheckIfFeeless`]) that would accept the //! existing one as an argument. #![cfg_attr(not(feature = "std"), no_std)] @@ -40,11 +40,14 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::{CheckIfFeeless, DispatchResult}, traits::{IsType, OriginTrait}, + weights::Weight, }; use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + traits::{ + DispatchInfoOf, DispatchOriginOf, PostDispatchInfoOf, TransactionExtension, ValidateResult, + }, + transaction_validity::TransactionValidityError, }; #[cfg(test)] @@ -71,11 +74,11 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A transaction fee was skipped. - FeeSkipped { who: T::AccountId }, + FeeSkipped { origin: ::PalletsOrigin }, } } -/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +/// A [`TransactionExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct SkipCheckIfFeeless(pub S, core::marker::PhantomData); @@ -104,67 +107,83 @@ impl From for SkipCheckIfFeeless { } } -impl> SignedExtension - for SkipCheckIfFeeless +pub enum Intermediate { + /// The wrapped extension should be applied. + Apply(T), + /// The wrapped extension should be skipped. + Skip(O), +} +use Intermediate::*; + +impl> + TransactionExtension for SkipCheckIfFeeless where - S::Call: CheckIfFeeless>, + T::RuntimeCall: CheckIfFeeless>, { - type AccountId = T::AccountId; - type Call = S::Call; - type AdditionalSigned = S::AdditionalSigned; - type Pre = (Self::AccountId, Option<::Pre>); // From the outside this extension should be "invisible", because it just extends the wrapped // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward // the identifier of the wrapped extension to let wallets see this extension as it would only be // the wrapped extension itself. const IDENTIFIER: &'static str = S::IDENTIFIER; + type Implicit = S::Implicit; + + fn implicit(&self) -> Result { + self.0.implicit() + } + type Val = + Intermediate as OriginTrait>::PalletsOrigin>; + type Pre = + Intermediate as OriginTrait>::PalletsOrigin>; - fn additional_signed(&self) -> Result { - self.0.additional_signed() + fn weight(&self, call: &T::RuntimeCall) -> frame_support::weights::Weight { + self.0.weight(call) } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok(ValidTransaction::default()) + self_implicit: S::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult { + if call.is_feeless(&origin) { + Ok((Default::default(), Skip(origin.caller().clone()), origin)) } else { - self.0.validate(who, call, info, len) + let (x, y, z) = + self.0.validate(origin, call, info, len, self_implicit, inherited_implication)?; + Ok((x, Apply(y), z)) } } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + val: Self::Val, + origin: &DispatchOriginOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, ) -> Result { - if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { - Ok((who.clone(), None)) - } else { - Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + match val { + Apply(val) => self.0.prepare(val, origin, call, info, len).map(Apply), + Skip(origin) => Ok(Skip(origin)), } } - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some(pre) = pre { - if let Some(pre) = pre.1 { - S::post_dispatch(Some(pre), info, post_info, len, result)?; - } else { - Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); - } + ) -> Result { + match pre { + Apply(pre) => S::post_dispatch_details(pre, info, post_info, len, result), + Skip(origin) => { + Pallet::::deposit_event(Event::::FeeSkipped { origin }); + Ok(Weight::zero()) + }, } - Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs index d6d600f24e77cf840db1fae796fc8edad2133f91..83f7b7dfe2b526c0b4221f0eae799fd56d993ff7 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -18,9 +18,12 @@ use crate as pallet_skip_feeless_payment; use frame_support::{derive_impl, parameter_types}; use frame_system as system; +use sp_runtime::{ + traits::{DispatchOriginOf, TransactionExtension}, + transaction_validity::ValidTransaction, +}; type Block = frame_system::mocking::MockBlock; -type AccountId = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { @@ -32,42 +35,45 @@ impl Config for Runtime { } parameter_types! { - pub static PreDispatchCount: u32 = 0; + pub static PrepareCount: u32 = 0; pub static ValidateCount: u32 = 0; } #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] pub struct DummyExtension; -impl SignedExtension for DummyExtension { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl TransactionExtension for DummyExtension { const IDENTIFIER: &'static str = "DummyExtension"; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + type Val = (); + type Pre = (); + + fn weight(&self, _: &RuntimeCall) -> Weight { + Weight::zero() } fn validate( &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + origin: DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { ValidateCount::mutate(|c| *c += 1); - Ok(Default::default()) + Ok((ValidTransaction::default(), (), origin)) } - fn pre_dispatch( + fn prepare( self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, + _val: Self::Val, + _origin: &DispatchOriginOf, + _call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, ) -> Result { - PreDispatchCount::mutate(|c| *c += 1); + PrepareCount::mutate(|c| *c += 1); Ok(()) } } diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs index adee52d6b3cea63055b8cb6627cbd239477cc2d6..666844c883bd6e017103d16b17e81224e80da6d3 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -15,23 +15,24 @@ use super::*; use crate::mock::{ - pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall, ValidateCount, + pallet_dummy::Call, DummyExtension, PrepareCount, Runtime, RuntimeCall, ValidateCount, }; use frame_support::dispatch::DispatchInfo; +use sp_runtime::traits::DispatchTransaction; #[test] fn skip_feeless_payment_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); - assert_eq!(PreDispatchCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); - assert_eq!(PreDispatchCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); } #[test] @@ -40,13 +41,42 @@ fn validate_works() { let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate(&0, &call, &DispatchInfo::default(), 0) + .validate_only(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 0); let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); SkipCheckIfFeeless::::from(DummyExtension) - .validate(&0, &call, &DispatchInfo::default(), 0) + .validate_only(Some(0).into(), &call, &DispatchInfo::default(), 0) .unwrap(); assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 0); +} + +#[test] +fn validate_prepare_works() { + assert_eq!(ValidateCount::get(), 0); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 1); + assert_eq!(PrepareCount::get(), 1); + + // Changes from previous prepare calls persist. + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .validate_and_prepare(Some(0).into(), &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(ValidateCount::get(), 2); + assert_eq!(PrepareCount::get(), 2); } diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..c5f87fb8c12cb13b608e4a969ca4bd93c2aaae61 --- /dev/null +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Transaction Payment Pallet's transaction extension + +extern crate alloc; + +use super::*; +use crate::Pallet; +use frame_benchmarking::v2::*; +use frame_support::dispatch::{DispatchInfo, PostDispatchInfo}; +use frame_system::{EventRecord, RawOrigin}; +use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, Dispatchable}; + +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + +#[benchmarks(where + T: Config, + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, + T::RuntimeCall: Dispatchable, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn charge_transaction_payment() { + let caller: T::AccountId = account("caller", 0, 0); + >::endow_account( + &caller, + >::minimum_balance() * 1000u32.into(), + ); + let tip = >::minimum_balance(); + let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); + let inner = frame_system::Call::remark { remark: alloc::vec![] }; + let call = T::RuntimeCall::from(inner); + let extension_weight = ext.weight(&call); + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + extension_weight, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let mut post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(10, 0)), + pays_fee: Pays::Yes, + }; + + #[block] + { + assert!(ext + .test_run(RawOrigin::Signed(caller.clone()).into(), &call, &info, 10, |_| Ok( + post_info + )) + .unwrap() + .is_ok()); + } + + post_info.actual_weight.as_mut().map(|w| w.saturating_accrue(extension_weight)); + let actual_fee = Pallet::::compute_actual_fee(10, &info, &post_info, tip); + assert_last_event::( + Event::::TransactionFeePaid { who: caller, actual_fee, tip }.into(), + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index c17ab393b5d3b2a05cc3a52745a83183e164205f..711189be8d071e34584285ce067053f82a4ff8e8 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -56,28 +56,32 @@ use frame_support::{ }, traits::{Defensive, EstimateCallFee, Get}, weights::{Weight, WeightToFee}, + RuntimeDebugNoBound, }; pub use pallet::*; pub use payment::*; use sp_runtime::{ traits::{ Convert, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, SaturatedConversion, - Saturating, SignedExtension, Zero, - }, - transaction_validity::{ - TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, + Saturating, TransactionExtension, Zero, }, + transaction_validity::{TransactionPriority, TransactionValidityError, ValidTransaction}, FixedPointNumber, FixedU128, Perbill, Perquintill, RuntimeDebug, }; pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; +pub use weights::WeightInfo; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + mod payment; mod types; +pub mod weights; /// Fee multiplier. pub type Multiplier = FixedU128; @@ -334,6 +338,7 @@ pub mod pallet { type RuntimeEvent = (); type FeeMultiplierUpdate = (); type OperationalFeeMultiplier = (); + type WeightInfo = (); } } @@ -386,6 +391,9 @@ pub mod pallet { /// transactions. #[pallet::constant] type OperationalFeeMultiplier: Get; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; } #[pallet::type_value] @@ -496,7 +504,7 @@ impl Pallet { /// /// All dispatchables must be annotated with weight and will have some fee info. This function /// always returns. - pub fn query_info( + pub fn query_info( unchecked_extrinsic: Extrinsic, len: u32, ) -> RuntimeDispatchInfo> @@ -510,20 +518,20 @@ impl Pallet { // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee(len, &dispatch_info, 0u32.into()) - } else { - // Unsigned extrinsics have no partial fee. + let partial_fee = if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no partial fee. 0u32.into() + } else { + Self::compute_fee(len, &dispatch_info, 0u32.into()) }; - let DispatchInfo { weight, class, .. } = dispatch_info; + let DispatchInfo { class, .. } = dispatch_info; - RuntimeDispatchInfo { weight, class, partial_fee } + RuntimeDispatchInfo { weight: dispatch_info.total_weight(), class, partial_fee } } /// Query the detailed fee of a given `call`. - pub fn query_fee_details( + pub fn query_fee_details( unchecked_extrinsic: Extrinsic, len: u32, ) -> FeeDetails> @@ -534,11 +542,11 @@ impl Pallet { let tip = 0u32.into(); - if unchecked_extrinsic.is_signed().unwrap_or(false) { - Self::compute_fee_details(len, &dispatch_info, tip) - } else { - // Unsigned extrinsics have no inclusion fee. + if unchecked_extrinsic.is_bare() { + // Bare extrinsics have no inclusion fee. FeeDetails { inclusion_fee: None, tip } + } else { + Self::compute_fee_details(len, &dispatch_info, tip) } } @@ -548,10 +556,10 @@ impl Pallet { T::RuntimeCall: Dispatchable + GetDispatchInfo, { let dispatch_info = ::get_dispatch_info(&call); - let DispatchInfo { weight, class, .. } = dispatch_info; + let DispatchInfo { class, .. } = dispatch_info; RuntimeDispatchInfo { - weight, + weight: dispatch_info.total_weight(), class, partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), } @@ -589,7 +597,7 @@ impl Pallet { where T::RuntimeCall: Dispatchable, { - Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) + Self::compute_fee_raw(len, info.total_weight(), tip, info.pays_fee, info.class) } /// Compute the actual post dispatch fee for a particular transaction. @@ -722,7 +730,7 @@ where who: &T::AccountId, call: &T::RuntimeCall, info: &DispatchInfoOf, - len: usize, + fee: BalanceOf, ) -> Result< ( BalanceOf, @@ -731,7 +739,6 @@ where TransactionValidityError, > { let tip = self.0; - let fee = Pallet::::compute_fee(len as u32, info, tip); <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( who, call, info, fee, tip, @@ -739,6 +746,22 @@ where .map(|i| (fee, i)) } + fn can_withdraw_fee( + &self, + who: &T::AccountId, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + len: usize, + ) -> Result, TransactionValidityError> { + let tip = self.0; + let fee = Pallet::::compute_fee(len as u32, info, tip); + + <::OnChargeTransaction as OnChargeTransaction>::can_withdraw_fee( + who, call, info, fee, tip, + )?; + Ok(fee) + } + /// Get an appropriate priority for a transaction with the given `DispatchInfo`, encoded length /// and user-included tip. /// @@ -764,7 +787,8 @@ where let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; // bounded_weight is used as a divisor later so we keep it non-zero. - let bounded_weight = info.weight.max(Weight::from_parts(1, 1)).min(max_block_weight); + let bounded_weight = + info.total_weight().max(Weight::from_parts(1, 1)).min(max_block_weight); let bounded_length = (len as u64).clamp(1, max_block_length); // returns the scarce resource, i.e. the one that is limiting the number of transactions. @@ -825,68 +849,130 @@ impl core::fmt::Debug for ChargeTransactionPayment { } } -impl SignedExtension for ChargeTransactionPayment +/// The info passed between the validate and prepare steps for the `ChargeAssetTxPayment` extension. +#[derive(RuntimeDebugNoBound)] +pub enum Val { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // transaction fee + fee: BalanceOf, + }, + NoCharge, +} + +/// The info passed between the prepare and post-dispatch steps for the `ChargeAssetTxPayment` +/// extension. +pub enum Pre { + Charge { + tip: BalanceOf, + // who paid the fee + who: T::AccountId, + // imbalance resulting from withdrawing the fee + imbalance: <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, + }, + NoCharge { + // weight initially estimated by the extension, to be refunded + refund: Weight, + }, +} + +impl core::fmt::Debug for Pre { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + Pre::Charge { tip, who, imbalance: _ } => { + write!(f, "Charge {{ tip: {:?}, who: {:?}, imbalance: }}", tip, who) + }, + Pre::NoCharge { refund } => write!(f, "NoCharge {{ refund: {:?} }}", refund), + } + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + f.write_str("") + } +} + +impl TransactionExtension for ChargeTransactionPayment where - BalanceOf: Send + Sync + From, T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; - type AccountId = T::AccountId; - type Call = T::RuntimeCall; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - this is an option to allow for a Default impl. - Self::AccountId, - // imbalance resulting from withdrawing the fee - <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, - ); - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) + type Implicit = (); + type Val = Val; + type Pre = Pre; + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + T::WeightInfo::charge_transaction_payment() } fn validate( &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, - ) -> TransactionValidity { - let (final_fee, _) = self.withdraw_fee(who, call, info, len)?; + _: (), + _implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let Ok(who) = frame_system::ensure_signed(origin.clone()) else { + return Ok((ValidTransaction::default(), Val::NoCharge, origin)); + }; + let final_fee = self.can_withdraw_fee(&who, call, info, len)?; let tip = self.0; - Ok(ValidTransaction { - priority: Self::get_priority(info, len, tip, final_fee), - ..Default::default() - }) + Ok(( + ValidTransaction { + priority: Self::get_priority(info, len, tip, final_fee), + ..Default::default() + }, + Val::Charge { tip: self.0, who, fee: final_fee }, + origin, + )) } - fn pre_dispatch( + fn prepare( self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + val: Self::Val, + _origin: &::RuntimeOrigin, + call: &T::RuntimeCall, + info: &DispatchInfoOf, + _len: usize, ) -> Result { - let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; - Ok((self.0, who.clone(), imbalance)) + match val { + Val::Charge { tip, who, fee } => { + // Mutating call to `withdraw_fee` to actually charge for the transaction. + let (_final_fee, imbalance) = self.withdraw_fee(&who, call, info, fee)?; + Ok(Pre::Charge { tip, who, imbalance }) + }, + Val::NoCharge => Ok(Pre::NoCharge { refund: self.weight(call) }), + } } - fn post_dispatch( - maybe_pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - if let Some((tip, who, imbalance)) = maybe_pre { - let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); - T::OnChargeTransaction::correct_and_deposit_fee( - &who, info, post_info, actual_fee, tip, imbalance, - )?; - Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); - } - Ok(()) + ) -> Result { + let (tip, who, imbalance) = match pre { + Pre::Charge { tip, who, imbalance } => (tip, who, imbalance), + Pre::NoCharge { refund } => { + // No-op: Refund everything + return Ok(refund) + }, + }; + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, &post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, &post_info, actual_fee, tip, imbalance, + )?; + Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); + Ok(Weight::zero()) } } diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 8767024ee235308b6515ed041b76fe419b7628f2..3995c41e8b197afb97adc9540c421d4e5a4d6082 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -119,6 +119,15 @@ impl OnUnbalanced::AccountId, } } +/// Weights used in testing. +pub struct MockWeights; + +impl WeightInfo for MockWeights { + fn charge_transaction_payment() -> Weight { + Weight::from_parts(10, 0) + } +} + impl Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = FungibleAdapter; @@ -126,4 +135,14 @@ impl Config for Runtime { type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; type FeeMultiplierUpdate = (); + type WeightInfo = MockWeights; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + crate::tests::ExtBuilder::default() + .base_weight(Weight::from_parts(100, 0)) + .byte_fee(10) + .balance_factor(0) + .build() } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 0fe616782903c822eb1f1af19323af75f442adb2..4b39cd3fe53bbc6a68017ae7ba2badf9f9b1a8d6 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -20,14 +20,14 @@ use crate::Config; use core::marker::PhantomData; use sp_runtime::{ - traits::{DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, + traits::{CheckedSub, DispatchInfoOf, PostDispatchInfoOf, Saturating, Zero}, transaction_validity::InvalidTransaction, }; use frame_support::{ traits::{ fungible::{Balanced, Credit, Debt, Inspect}, - tokens::Precision, + tokens::{Precision, WithdrawConsequence}, Currency, ExistenceRequirement, Imbalance, OnUnbalanced, WithdrawReasons, }, unsigned::TransactionValidityError, @@ -55,6 +55,17 @@ pub trait OnChargeTransaction { tip: Self::Balance, ) -> Result; + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError>; + /// After the transaction was executed the actual fee can be calculated. /// This function should refund any overpaid fees and optionally deposit /// the corrected amount. @@ -68,6 +79,12 @@ pub trait OnChargeTransaction { tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, ) -> Result<(), TransactionValidityError>; + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance); + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance; } /// Implements transaction payment for a pallet implementing the [`frame_support::traits::fungible`] @@ -110,6 +127,23 @@ where } } + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _dispatch_info: &DispatchInfoOf, + fee: Self::Balance, + _tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + match F::can_withdraw(who, fee) { + WithdrawConsequence::Success => Ok(()), + _ => Err(InvalidTransaction::Payment.into()), + } + } + fn correct_and_deposit_fee( who: &::AccountId, _dispatch_info: &DispatchInfoOf<::RuntimeCall>, @@ -141,6 +175,16 @@ where Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = F::deposit(who, amount, Precision::BestEffort); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + F::minimum_balance() + } } /// Implements the transaction payment for a pallet implementing the [`Currency`] @@ -202,6 +246,33 @@ where } } + /// Check if the predicted fee from the transaction origin can be withdrawn. + /// + /// Note: The `fee` already includes the `tip`. + fn can_withdraw_fee( + who: &T::AccountId, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result<(), TransactionValidityError> { + if fee.is_zero() { + return Ok(()) + } + + let withdraw_reason = if tip.is_zero() { + WithdrawReasons::TRANSACTION_PAYMENT + } else { + WithdrawReasons::TRANSACTION_PAYMENT | WithdrawReasons::TIP + }; + + let new_balance = + C::free_balance(who).checked_sub(&fee).ok_or(InvalidTransaction::Payment)?; + C::ensure_can_withdraw(who, fee, withdraw_reason, new_balance) + .map(|_| ()) + .map_err(|_| InvalidTransaction::Payment.into()) + } + /// Hand the fee and the tip over to the `[OnUnbalanced]` implementation. /// Since the predicted fee might have been too high, parts of the fee may /// be refunded. @@ -234,4 +305,14 @@ where } Ok(()) } + + #[cfg(feature = "runtime-benchmarks")] + fn endow_account(who: &T::AccountId, amount: Self::Balance) { + let _ = C::deposit_creating(who, amount); + } + + #[cfg(feature = "runtime-benchmarks")] + fn minimum_balance() -> Self::Balance { + C::minimum_balance() + } } diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index bac89967d6af89040d6ddf763825fa525009276c..e8f5ab99529f9d06b3b24d74eb215bee9e988bbb 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -21,13 +21,16 @@ use crate as pallet_transaction_payment; use codec::Encode; use sp_runtime::{ - testing::TestXt, traits::One, transaction_validity::InvalidTransaction, BuildStorage, + generic::UncheckedExtrinsic, + traits::{DispatchTransaction, One}, + transaction_validity::InvalidTransaction, + BuildStorage, }; use frame_support::{ - assert_noop, assert_ok, + assert_ok, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, - traits::Currency, + traits::{Currency, OriginTrait}, weights::Weight, }; use frame_system as system; @@ -113,7 +116,7 @@ impl ExtBuilder { /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } + DispatchInfo { call_weight: w, ..Default::default() } } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { @@ -128,88 +131,82 @@ fn default_post_info() -> PostDispatchInfo { PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } +type Ext = ChargeTransactionPayment; + #[test] -fn signed_extension_transaction_payment_work() { +fn transaction_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(5, 0)), len) - .unwrap(); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(5, 0)), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); + let mut info = info_from_weight(Weight::from_parts(5, 0)); + let ext = Ext::from(0); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + ext.test_run(Some(1).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); + Ok(default_post_info()) + }) + .unwrap() + .unwrap(); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10 + 10); assert_eq!(TipUnbalancedAmount::get(), 0); FeeUnbalancedAmount::mutate(|a| *a = 0); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let mut info = info_from_weight(Weight::from_parts(100, 0)); + info.extension_weight = ext_weight; + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 10 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 10 - 5); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50 + 10); assert_eq!(TipUnbalancedAmount::get(), 5); }); } #[test] -fn signed_extension_transaction_payment_multiplied_refund_works() { +fn transaction_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) - .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); - // 75 (3/2 of the returned 50 units of weight) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); + let len = 10; + let origin = Some(2).into(); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let ext = Ext::from(5 /* tipped */); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + ext.test_run(origin, CALL, &info, len, |_| { + // 5 base fee, 10 byte fee, 3/2 * (100 call weight fee + 10 ext weight fee), 5 + // tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 165 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() + .unwrap(); + + // 75 (3/2 of the returned 50 units of call weight, 0 returned of ext weight) is + // refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - (165 - 75) - 5); }); } #[test] -fn signed_extension_transaction_payment_is_bounded() { +fn transaction_extension_transaction_payment_is_bounded() { ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( - &1, - CALL, - &info_from_weight(Weight::MAX), - 10 - )); + let info = info_from_weight(Weight::MAX); + assert_ok!(Ext::from(0).validate_and_prepare(Some(1).into(), CALL, &info, 10)); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -220,7 +217,7 @@ fn signed_extension_transaction_payment_is_bounded() { } #[test] -fn signed_extension_allows_free_transactions() { +fn transaction_extension_allows_free_transactions() { ExtBuilder::default() .base_weight(Weight::from_parts(100, 0)) .balance_factor(0) @@ -232,38 +229,30 @@ fn signed_extension_allows_free_transactions() { let len = 100; // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { - weight: Weight::from_parts(0, 0), + let op_tx = DispatchInfo { + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert_ok!(ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &operational_transaction, - len - )); + assert_ok!(Ext::from(0).validate_only(Some(1).into(), CALL, &op_tx, len)); // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: Weight::from_parts(0, 0), + let free_tx = DispatchInfo { + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - assert_noop!( - ChargeTransactionPayment::::from(0).validate( - &1, - CALL, - &free_transaction, - len - ), + assert_eq!( + Ext::from(0).validate_only(Some(1).into(), CALL, &free_tx, len).unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); } #[test] -fn signed_ext_length_fee_is_also_updated_per_congestion() { +fn transaction_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() .base_weight(Weight::from_parts(5, 0)) .balance_factor(10) @@ -272,18 +261,15 @@ fn signed_ext_length_fee_is_also_updated_per_congestion() { // all fees should be x1.5 NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - - assert_ok!( - ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_parts(3, 0)), len) - ); + let info = info_from_weight(Weight::from_parts(3, 0)); + assert_ok!(Ext::from(10).validate_and_prepare(Some(1).into(), CALL, &info, len)); assert_eq!( Balances::free_balance(1), 100 // original - - 10 // tip - - 5 // base - - 10 // len - - (3 * 3 / 2) // adjusted weight + - 10 // tip + - 5 // base + - 10 // len + - (3 * 3 / 2) // adjusted weight ); }) } @@ -293,62 +279,62 @@ fn query_info_and_fee_details_works() { let call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 2, value: 69 }); let origin = 111111; let extra = (); - let xt = TestXt::new(call.clone(), Some((origin, extra))); + let xt = UncheckedExtrinsic::new_signed(call.clone(), origin, (), extra); let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - let unsigned_xt = TestXt::<_, ()>::new(call, None); + let unsigned_xt = UncheckedExtrinsic::::new_bare(call); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 - }), - tip: 0, - }, - ); - - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.total_weight(), + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.total_weight().min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.call_weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .total_weight() + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] @@ -359,39 +345,39 @@ fn query_call_info_and_fee_details_works() { let len = encoded_call.len() as u32; ExtBuilder::default() - .base_weight(Weight::from_parts(5, 0)) - .weight_fee(2) - .build() - .execute_with(|| { - // all fees should be x1.5 - NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); - - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ - + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ - }, - ); - - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info - .weight - .min(BlockWeights::get().max_block) - .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multiplier */ - }), - tip: 0, - }, - ); - }); + .base_weight(Weight::from_parts(5, 0)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.total_weight(), + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.total_weight().min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .total_weight() + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] @@ -407,14 +393,16 @@ fn compute_fee_works_without_multiplier() { // Tip only, no fees works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::No, }; assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -425,7 +413,8 @@ fn compute_fee_works_without_multiplier() { assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { - weight: Weight::from_parts(1000, 0), + call_weight: Weight::from_parts(1000, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -445,7 +434,8 @@ fn compute_fee_works_with_multiplier() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -453,7 +443,8 @@ fn compute_fee_works_with_multiplier() { // Everything works together :) let dispatch_info = DispatchInfo { - weight: Weight::from_parts(123, 0), + call_weight: Weight::from_parts(123, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -478,7 +469,8 @@ fn compute_fee_works_with_negative_multiplier() { // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { - weight: Weight::from_parts(0, 0), + call_weight: Weight::from_parts(0, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -486,7 +478,8 @@ fn compute_fee_works_with_negative_multiplier() { // Everything works together. let dispatch_info = DispatchInfo { - weight: Weight::from_parts(123, 0), + call_weight: Weight::from_parts(123, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -508,7 +501,8 @@ fn compute_fee_does_not_overflow() { .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { - weight: Weight::MAX, + call_weight: Weight::MAX, + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -528,27 +522,23 @@ fn refund_does_not_recreate_account() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |origin| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer_allow_death( + origin, + 3, + Balances::free_balance(2) + )); + assert_eq!(Balances::free_balance(2), 0); + + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert_ok!(Balances::transfer_allow_death( - Some(2).into(), - 3, - Balances::free_balance(2) - )); - assert_eq!(Balances::free_balance(2), 0); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(50, 0)), - len, - &Ok(()) - )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event System::assert_has_event(RuntimeEvent::Balances(pallet_balances::Event::Transfer { @@ -570,20 +560,15 @@ fn actual_weight_higher_than_max_refunds_nothing() { .base_weight(Weight::from_parts(5, 0)) .build() .execute_with(|| { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_parts(100, 0)), len) + let info = info_from_weight(Weight::from_parts(100, 0)); + Ext::from(5 /* tipped */) + .test_run(Some(2).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + Ok(post_info_from_weight(Weight::from_parts(101, 0))) + }) + .unwrap() .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info_from_weight(Weight::from_parts(100, 0)), - &post_info_from_weight(Weight::from_parts(101, 0)), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); }); } @@ -596,25 +581,21 @@ fn zero_transfer_on_free_transaction() { .execute_with(|| { // So events are emitted System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { - weight: Weight::from_parts(100, 0), + let info = DispatchInfo { + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), pays_fee: Pays::No, class: DispatchClass::Normal, }; let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) + Ext::from(0) + .test_run(Some(user).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::total_balance(&user), 0); + Ok(default_post_info()) + }) + .unwrap() .unwrap(); assert_eq!(Balances::total_balance(&user), 0); - assert_ok!(ChargeTransactionPayment::::post_dispatch( - Some(pre), - &dispatch_info, - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event System::assert_has_event(RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { @@ -633,33 +614,33 @@ fn refund_consistent_with_actual_weight() { .base_weight(Weight::from_parts(7, 0)) .build() .execute_with(|| { - let info = info_from_weight(Weight::from_parts(100, 0)); - let post_info = post_info_from_weight(Weight::from_parts(33, 0)); + let mut info = info_from_weight(Weight::from_parts(100, 0)); + let tip = 5; + let ext = Ext::from(tip); + let ext_weight = ext.weight(CALL); + info.extension_weight = ext_weight; + let mut post_info = post_info_from_weight(Weight::from_parts(33, 0)); let prev_balance = Balances::free_balance(2); let len = 10; - let tip = 5; NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let actual_post_info = ext + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); + post_info + .actual_weight + .as_mut() + .map(|w| w.saturating_accrue(Ext::from(tip).weight(CALL))); + assert_eq!(post_info, actual_post_info); let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = - Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); + Pallet::::compute_actual_fee(len as u32, &info, &actual_post_info, tip); - // 33 weight, 10 length, 7 base, 5 tip - assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); + // 33 call weight, 10 ext weight, 10 length, 7 base, 5 tip + assert_eq!(actual_fee, 7 + 10 + ((33 + 10) * 5 / 4) + 5); assert_eq!(refund_based_fee, actual_fee); }); } @@ -671,41 +652,35 @@ fn should_alter_operational_priority() { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 60); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 110); }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5810); - let priority = ChargeTransactionPayment::(2 * tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(2 * tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 6110); }); } @@ -717,28 +692,25 @@ fn no_tip_has_some_priority() { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; - + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; assert_eq!(priority, 10); }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - let priority = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + let priority = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; assert_eq!(priority, 5510); }); } @@ -746,34 +718,32 @@ fn no_tip_has_some_priority() { #[test] fn higher_tip_have_higher_priority() { let get_priorities = |tip: u64| { - let mut priority1 = 0; - let mut priority2 = 0; + let mut pri1 = 0; + let mut pri2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { let normal = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - priority1 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &normal, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri1 = ext.validate_only(Some(2).into(), CALL, &normal, len).unwrap().0.priority; }); ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: Weight::from_parts(100, 0), + call_weight: Weight::from_parts(100, 0), + extension_weight: Weight::zero(), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; - priority2 = ChargeTransactionPayment::(tip) - .validate(&2, CALL, &op, len) - .unwrap() - .priority; + let ext = Ext::from(tip); + pri2 = ext.validate_only(Some(2).into(), CALL, &op, len).unwrap().0.priority; }); - (priority1, priority2) + (pri1, pri2) }; let mut prev_priorities = get_priorities(0); @@ -801,19 +771,11 @@ fn post_info_can_change_pays_fee() { NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + let post_info = ChargeTransactionPayment::::from(tip) + .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) + .unwrap() .unwrap(); - ChargeTransactionPayment::::post_dispatch( - Some(pre), - &info, - &post_info, - len, - &Ok(()), - ) - .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); let actual_fee = Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); @@ -843,3 +805,40 @@ fn genesis_default_works() { assert_eq!(NextFeeMultiplier::::get(), Multiplier::saturating_from_integer(1)); }); } + +#[test] +fn no_fee_and_no_weight_for_other_origins() { + ExtBuilder::default().build().execute_with(|| { + let ext = Ext::from(0); + + let mut info = CALL.get_dispatch_info(); + info.extension_weight = ext.weight(CALL); + + // Ensure we test the refund. + assert!(info.extension_weight != Weight::zero()); + + let len = CALL.encoded_size(); + + let origin = frame_system::RawOrigin::Root.into(); + let (pre, origin) = ext.validate_and_prepare(origin, CALL, &info, len).unwrap(); + + assert!(origin.as_system_ref().unwrap().is_root()); + + let pd_res = Ok(()); + let mut post_info = frame_support::dispatch::PostDispatchInfo { + actual_weight: Some(info.total_weight()), + pays_fee: Default::default(), + }; + + >::post_dispatch( + pre, + &info, + &mut post_info, + len, + &pd_res, + ) + .unwrap(); + + assert_eq!(post_info.actual_weight, Some(info.call_weight)); + }) +} diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index 67c7311d0cab5bf8e8e4f83ac874a49279e42d14..d6b4a6557447d4022c95787fadd59f90cf2a2f82 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -111,7 +111,7 @@ pub struct RuntimeDispatchInfo /// The inclusion fee of this dispatch. /// /// This does not include a tip or anything else that - /// depends on the signature (i.e. depends on a `SignedExtension`). + /// depends on the signature (i.e. depends on a `TransactionExtension`). #[cfg_attr(feature = "std", serde(with = "serde_balance"))] pub partial_fee: Balance, } diff --git a/substrate/frame/transaction-payment/src/weights.rs b/substrate/frame/transaction-payment/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..bcffb2eb331acf77f694f74510f61d8653db0f95 --- /dev/null +++ b/substrate/frame/transaction-payment/src/weights.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/production/substrate-node +// benchmark +// pallet +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./substrate/frame/transaction-payment/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_transaction_payment`. +pub trait WeightInfo { + fn charge_transaction_payment() -> Weight; +} + +/// Weights for `pallet_transaction_payment` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Authorship::Author` (r:1 w:0) + /// Proof: `Authorship::Author` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:0) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `248` + // Estimated: `1733` + // Minimum execution time: 40_506_000 picoseconds. + Weight::from_parts(41_647_000, 1733) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } +} diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs index dc27c335623406e21e811c0af8ed2e626908637c..84f122c08bb7b4c10ee177d78dcfb5e69501e201 100644 --- a/substrate/frame/uniques/src/lib.rs +++ b/substrate/frame/uniques/src/lib.rs @@ -223,7 +223,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "ClassMetadataOf"] /// Metadata of a collection. - pub(super) type CollectionMetadataOf, I: 'static = ()> = StorageMap< + pub type CollectionMetadataOf, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::CollectionId, @@ -234,7 +234,7 @@ pub mod pallet { #[pallet::storage] #[pallet::storage_prefix = "InstanceMetadataOf"] /// Metadata of an item. - pub(super) type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< + pub type ItemMetadataOf, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::CollectionId, diff --git a/substrate/frame/uniques/src/types.rs b/substrate/frame/uniques/src/types.rs index a2e804f245f77ea4ca120af2318ebdfd7f6d89ef..e2e170c72f21ae65912147546f789db74b60550b 100644 --- a/substrate/frame/uniques/src/types.rs +++ b/substrate/frame/uniques/src/types.rs @@ -40,26 +40,26 @@ pub(super) type ItemPrice = #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct CollectionDetails { /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. - pub(super) owner: AccountId, + pub owner: AccountId, /// Can mint tokens. - pub(super) issuer: AccountId, + pub issuer: AccountId, /// Can thaw tokens, force transfers and burn tokens from any account. - pub(super) admin: AccountId, + pub admin: AccountId, /// Can freeze tokens. - pub(super) freezer: AccountId, + pub freezer: AccountId, /// The total balance deposited for the all storage associated with this collection. /// Used by `destroy`. - pub(super) total_deposit: DepositBalance, + pub total_deposit: DepositBalance, /// If `true`, then no deposit is needed to hold items of this collection. - pub(super) free_holding: bool, + pub free_holding: bool, /// The total number of outstanding items of this collection. - pub(super) items: u32, + pub items: u32, /// The total number of outstanding item metadata of this collection. - pub(super) item_metadatas: u32, + pub item_metadatas: u32, /// The total number of attributes for this collection. - pub(super) attributes: u32, + pub attributes: u32, /// Whether the collection is frozen for non-admin transfers. - pub(super) is_frozen: bool, + pub is_frozen: bool, } /// Witness data for the destroy transactions. @@ -90,14 +90,14 @@ impl CollectionDetails { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] pub struct ItemDetails { /// The owner of this item. - pub(super) owner: AccountId, + pub owner: AccountId, /// The approved transferrer of this item, if one is set. - pub(super) approved: Option, + pub approved: Option, /// Whether the item can be transferred or not. - pub(super) is_frozen: bool, + pub is_frozen: bool, /// The amount held in the pallet's default account for this item. Free-hold items will have /// this as zero. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -107,13 +107,13 @@ pub struct CollectionMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, /// General information concerning this collection. Limited in length by `StringLimit`. This /// will generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, /// Whether the collection's metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, + pub is_frozen: bool, } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default, TypeInfo, MaxEncodedLen)] @@ -123,11 +123,11 @@ pub struct ItemMetadata> { /// The balance deposited for this metadata. /// /// This pays for the data stored in this struct. - pub(super) deposit: DepositBalance, + pub deposit: DepositBalance, /// General information concerning this item. Limited in length by `StringLimit`. This will /// generally be either a JSON dump or the hash of some JSON which can be found on a /// hash-addressable global publication system such as IPFS. - pub(super) data: BoundedVec, + pub data: BoundedVec, /// Whether the item metadata may be changed by a non Force origin. - pub(super) is_frozen: bool, + pub is_frozen: bool, } diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index a4f66298f3fa24bba5a33d6d48cff74a7e2fb20a..26c38d1f0459d77ae7fe61b8907cb3b19ffaa008 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -249,7 +249,7 @@ pub mod pallet { T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -354,7 +354,7 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() - .saturating_add(dispatch_info.weight), + .saturating_add(dispatch_info.call_weight), dispatch_info.class, ) })] @@ -466,7 +466,7 @@ pub mod pallet { (Weight::zero(), DispatchClass::Operational), |(total_weight, dispatch_class): (Weight, DispatchClass), di| { ( - total_weight.saturating_add(di.weight), + total_weight.saturating_add(di.call_weight), // If not all are `Operational`, we want to use `DispatchClass::Normal`. if di.class == DispatchClass::Normal { di.class } else { dispatch_class }, ) diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 9755efaea41a2f7eee8ee230729e5c2d9d1534fa..274a90d77cf06c90fba30fe1b74c97e2f4530ee5 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -296,7 +296,7 @@ fn as_derivative_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -308,7 +308,7 @@ fn as_derivative_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff); // Full weight when err let inner_call = call_foobar(true, start_weight, None); @@ -323,7 +323,7 @@ fn as_derivative_handles_weight_refund() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { // No weight is refunded - actual_weight: Some(info.weight), + actual_weight: Some(info.call_weight), pays_fee: Pays::Yes, }, error: DispatchError::Other("The cake is a lie."), @@ -343,7 +343,7 @@ fn as_derivative_handles_weight_refund() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { // Diff is refunded - actual_weight: Some(info.weight - diff), + actual_weight: Some(info.call_weight - diff), pays_fee: Pays::Yes, }, error: DispatchError::Other("The cake is a lie."), @@ -456,14 +456,14 @@ fn batch_weight_calculation_doesnt_overflow() { let big_call = RuntimeCall::RootTesting(RootTestingCall::fill_block { ratio: Perbill::from_percent(50), }); - assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); + assert_eq!(big_call.get_dispatch_info().call_weight, Weight::MAX / 2); // 3 * 50% saturates to 100% let batch_call = RuntimeCall::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); - assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); + assert_eq!(batch_call.get_dispatch_info().call_weight, Weight::MAX); }); } @@ -482,7 +482,7 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -492,7 +492,7 @@ fn batch_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Full weight when err let good_call = call_foobar(false, start_weight, None); @@ -506,7 +506,7 @@ fn batch_handles_weight_refund() { utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); // No weight is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when err let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -520,7 +520,7 @@ fn batch_handles_weight_refund() { System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Partial batch completion let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -571,7 +571,7 @@ fn batch_all_revert() { DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some( - ::WeightInfo::batch_all(2) + info.weight * 2 + ::WeightInfo::batch_all(2) + info.call_weight * 2 ), pays_fee: Pays::Yes }, @@ -598,7 +598,7 @@ fn batch_all_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); @@ -608,7 +608,7 @@ fn batch_all_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Full weight when err let good_call = call_foobar(false, start_weight, None); @@ -619,7 +619,7 @@ fn batch_all_handles_weight_refund() { let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); // No weight is refunded - assert_eq!(extract_actual_weight(&result, &info), info.weight); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight); // Refund weight when err let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -630,7 +630,7 @@ fn batch_all_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); - assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); + assert_eq!(extract_actual_weight(&result, &info), info.call_weight - diff * batch_len); // Partial batch completion let good_call = call_foobar(false, start_weight, Some(end_weight)); @@ -664,7 +664,9 @@ fn batch_all_does_not_nest() { Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_all.clone()]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), + actual_weight: Some( + ::WeightInfo::batch_all(1) + info.call_weight + ), pays_fee: Pays::Yes }, error: frame_system::Error::::CallFiltered.into(), @@ -789,7 +791,7 @@ fn batch_all_doesnt_work_with_inherents() { batch_all.dispatch(RuntimeOrigin::signed(1)), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(info.weight), + actual_weight: Some(info.call_weight), pays_fee: Pays::Yes }, error: frame_system::Error::::CallFiltered.into(), @@ -805,7 +807,7 @@ fn batch_works_with_council_origin() { calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Council::propose( @@ -842,7 +844,7 @@ fn force_batch_works_with_council_origin() { calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], }); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let proposal_weight = proposal.get_dispatch_info().weight; + let proposal_weight = proposal.get_dispatch_info().call_weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Council::propose( @@ -892,7 +894,7 @@ fn with_weight_works() { })); // Weight before is max. assert_eq!( - upgrade_code_call.get_dispatch_info().weight, + upgrade_code_call.get_dispatch_info().call_weight, ::SystemWeightInfo::set_code() ); assert_eq!( @@ -905,7 +907,7 @@ fn with_weight_works() { weight: Weight::from_parts(123, 456), }; // Weight after is set by Root. - assert_eq!(with_weight_call.get_dispatch_info().weight, Weight::from_parts(123, 456)); + assert_eq!(with_weight_call.get_dispatch_info().call_weight, Weight::from_parts(123, 456)); assert_eq!( with_weight_call.get_dispatch_info().class, frame_support::dispatch::DispatchClass::Operational diff --git a/substrate/frame/verify-signature/Cargo.toml b/substrate/frame/verify-signature/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..3c5fd5e651575fdc7fba0ebdced070be25f17589 --- /dev/null +++ b/substrate/frame/verify-signature/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "pallet-verify-signature" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME verify signature pallet" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { features = ["serde"], workspace = true } + +[dev-dependencies] +pallet-balances = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-collective/std", + "pallet-root-testing/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-weights/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "pallet-collective/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/verify-signature/README.md b/substrate/frame/verify-signature/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7748315c61cc9cd53944acbe7da84b93be9aff76 --- /dev/null +++ b/substrate/frame/verify-signature/README.md @@ -0,0 +1,19 @@ +# Verify Signature Module +A module that provides a `TransactionExtension` that validates a signature against a payload and +authorizes the origin. + +## Overview + +This module serves two purposes: +- `VerifySignature`: A `TransactionExtension` that checks the provided signature against a payload + constructed through hashing the inherited implication with `blake2b_256`. If the signature is + valid, then the extension authorizes the origin as signed. The extension can be disabled, or + passthrough, allowing users to use other extensions to authorize different origins other than the + traditionally signed origin. +- Benchmarking: The extension is bound within a pallet to leverage the benchmarking functionality in + FRAME. The `Signature` and `Signer` types are specified in the pallet configuration and a + benchmark helper trait is used to create a signature which is then validated in the benchmark. + +[`Config`]: ./trait.Config.html + +License: Apache-2.0 diff --git a/substrate/frame/verify-signature/src/benchmarking.rs b/substrate/frame/verify-signature/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..2b592a4023ec93d55d4f8de3b62c4aa5a80d1010 --- /dev/null +++ b/substrate/frame/verify-signature/src/benchmarking.rs @@ -0,0 +1,65 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Verify Signature Pallet + +#![cfg(feature = "runtime-benchmarks")] + +extern crate alloc; + +use super::*; + +#[allow(unused)] +use crate::{extension::VerifySignature, Config, Pallet as VerifySignaturePallet}; +use alloc::vec; +use frame_benchmarking::{v2::*, BenchmarkError}; +use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; +use frame_system::{Call as SystemCall, RawOrigin}; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::{AsTransactionAuthorizedOrigin, Dispatchable, TransactionExtension}; + +pub trait BenchmarkHelper { + fn create_signature(entropy: &[u8], msg: &[u8]) -> (Signature, Signer); +} + +#[benchmarks(where + T: Config + Send + Sync, + T::RuntimeCall: Dispatchable + GetDispatchInfo, + T::RuntimeOrigin: AsTransactionAuthorizedOrigin, +)] +mod benchmarks { + use super::*; + + #[benchmark] + fn verify_signature() -> Result<(), BenchmarkError> { + let entropy = [42u8; 256]; + let call: T::RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let info = call.get_dispatch_info(); + let msg = call.using_encoded(blake2_256).to_vec(); + let (signature, signer) = T::BenchmarkHelper::create_signature(&entropy, &msg[..]); + let ext = VerifySignature::::new_with_signature(signature, signer); + + #[block] + { + assert!(ext.validate(RawOrigin::None.into(), &call, &info, 0, (), &call).is_ok()); + } + + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/substrate/frame/verify-signature/src/extension.rs b/substrate/frame/verify-signature/src/extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..4490a0a600bb2d3bdf377f82c559f410628c5a7f --- /dev/null +++ b/substrate/frame/verify-signature/src/extension.rs @@ -0,0 +1,157 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extension which validates a signature against a payload constructed from a call and +//! the rest of the transaction extension pipeline. + +use crate::{Config, WeightInfo}; +use codec::{Decode, Encode}; +use frame_support::traits::OriginTrait; +use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + transaction_extension::TransactionExtension, AsTransactionAuthorizedOrigin, DispatchInfoOf, + Dispatchable, Verify, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError, ValidTransaction}, +}; +use sp_weights::Weight; + +/// Extension that, if enabled, validates a signature type against the payload constructed from the +/// call and the rest of the transaction extension pipeline. This extension provides the +/// functionality that traditionally signed transactions had with the implicit signature checking +/// implemented in [`Checkable`](sp_runtime::traits::Checkable). It is meant to be placed ahead of +/// any other extensions that do authorization work in the [`TransactionExtension`] pipeline. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub enum VerifySignature +where + T: Config + Send + Sync, +{ + /// The extension will verify the signature and, if successful, authorize a traditionally + /// signed transaction. + Signed { + /// The signature provided by the transaction submitter. + signature: T::Signature, + /// The account that signed the payload. + account: T::AccountId, + }, + /// The extension is disabled and will be passthrough. + Disabled, +} + +impl core::fmt::Debug for VerifySignature +where + T: Config + Send + Sync, +{ + #[cfg(feature = "std")] + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "VerifySignature") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut core::fmt::Formatter) -> core::fmt::Result { + Ok(()) + } +} + +impl VerifySignature +where + T: Config + Send + Sync, +{ + /// Create a new extension instance that will validate the provided signature. + pub fn new_with_signature(signature: T::Signature, account: T::AccountId) -> Self { + Self::Signed { signature, account } + } + + /// Create a new passthrough extension instance. + pub fn new_disabled() -> Self { + Self::Disabled + } +} + +impl TransactionExtension for VerifySignature +where + T: Config + Send + Sync, + ::RuntimeOrigin: AsTransactionAuthorizedOrigin, +{ + const IDENTIFIER: &'static str = "VerifyMultiSignature"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn weight(&self, _call: &T::RuntimeCall) -> Weight { + match &self { + // The benchmarked weight of the payload construction and signature checking. + Self::Signed { .. } => T::WeightInfo::verify_signature(), + // When the extension is passthrough, it consumes no weight. + Self::Disabled => Weight::zero(), + } + } + + fn validate( + &self, + mut origin: ::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _: (), + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + // If the extension is disabled, return early. + let (signature, account) = match &self { + Self::Signed { signature, account } => (signature, account), + Self::Disabled => return Ok((Default::default(), (), origin)), + }; + + // This extension must receive an unauthorized origin as it is meant to headline the + // authorization extension pipeline. Any extensions that precede this one must not authorize + // any origin and serve some other functional purpose. + if origin.is_transaction_authorized() { + return Err(InvalidTransaction::BadSigner.into()); + } + + // Construct the payload that the signature will be validated against. The inherited + // implication contains the encoded bytes of the call and all of the extension data of the + // extensions that follow in the `TransactionExtension` pipeline. + // + // In other words: + // - extensions that precede this extension are ignored in terms of signature validation; + // - extensions that follow this extension are included in the payload to be signed (as if + // they were the entire `SignedExtension` pipeline in the traditional signed transaction + // model). + // + // The encoded bytes of the payload are then hashed using `blake2_256`. + let msg = inherited_implication.using_encoded(blake2_256); + + // The extension was enabled, so the signature must match. + if !signature.verify(&msg[..], account) { + Err(InvalidTransaction::BadProof)? + } + + // Return the signer as the transaction origin. + origin.set_caller_from_signed(account.clone()); + Ok((ValidTransaction::default(), (), origin)) + } + + impl_tx_ext_default!(T::RuntimeCall; prepare); +} diff --git a/substrate/frame/verify-signature/src/lib.rs b/substrate/frame/verify-signature/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..96d83dbef9f734a6665c842415f04e803ba4bcb0 --- /dev/null +++ b/substrate/frame/verify-signature/src/lib.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction extension which validates a signature against a payload constructed from a call and +//! the rest of the transaction extension pipeline. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +pub mod extension; +#[cfg(test)] +mod tests; +pub mod weights; + +extern crate alloc; + +#[cfg(feature = "runtime-benchmarks")] +pub use benchmarking::BenchmarkHelper; +use codec::{Decode, Encode}; +pub use extension::VerifySignature; +use frame_support::Parameter; +pub use weights::WeightInfo; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use sp_runtime::traits::{IdentifyAccount, Verify}; + + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Signature type that the extension of this pallet can verify. + type Signature: Verify + + Parameter + + Encode + + Decode + + Send + + Sync; + /// The account identifier used by this pallet's signature type. + type AccountIdentifier: IdentifyAccount; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + /// Helper to create a signature to be benchmarked. + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper: BenchmarkHelper; + } +} diff --git a/substrate/frame/verify-signature/src/tests.rs b/substrate/frame/verify-signature/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e4c8db12fe2e3f78fb01dd9bc5bf5920579c58a --- /dev/null +++ b/substrate/frame/verify-signature/src/tests.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for Utility Pallet + +#![cfg(test)] + +use super::*; + +use extension::VerifySignature; +use frame_support::{ + derive_impl, + dispatch::GetDispatchInfo, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, + traits::OriginTrait, +}; +use frame_system::Call as SystemCall; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + testing::{TestSignature, UintAuthorityId}, + traits::DispatchTransaction, +}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + VerifySignaturePallet: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct BenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl crate::BenchmarkHelper for BenchmarkHelper { + fn create_signature(_entropy: &[u8], msg: &[u8]) -> (TestSignature, u64) { + (TestSignature(0, msg.to_vec()), 0) + } +} + +impl crate::Config for Test { + type Signature = TestSignature; + type AccountIdentifier = UintAuthorityId; + type WeightInfo = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = BenchmarkHelper; +} + +#[cfg(feature = "runtime-benchmarks")] +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +#[test] +fn verification_works() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let sig = TestSignature(0, call.using_encoded(blake2_256).to_vec()); + let info = call.get_dispatch_info(); + + let (_, _, origin) = VerifySignature::::new_with_signature(sig, who) + .validate_only(None.into(), &call, &info, 0) + .unwrap(); + assert_eq!(origin.as_signer().unwrap(), &who) +} + +#[test] +fn bad_signature() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let sig = TestSignature(0, b"bogus message".to_vec()); + let info = call.get_dispatch_info(); + + assert_eq!( + VerifySignature::::new_with_signature(sig, who) + .validate_only(None.into(), &call, &info, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::BadProof) + ); +} + +#[test] +fn bad_starting_origin() { + let who = 0; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let sig = TestSignature(0, b"bogus message".to_vec()); + let info = call.get_dispatch_info(); + + assert_eq!( + VerifySignature::::new_with_signature(sig, who) + .validate_only(Some(42).into(), &call, &info, 0) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::BadSigner) + ); +} + +#[test] +fn disabled_extension_works() { + let who = 42; + let call: RuntimeCall = SystemCall::remark { remark: vec![] }.into(); + let info = call.get_dispatch_info(); + + let (_, _, origin) = VerifySignature::::new_disabled() + .validate_only(Some(who).into(), &call, &info, 0) + .unwrap(); + assert_eq!(origin.as_signer().unwrap(), &who) +} diff --git a/substrate/frame/verify-signature/src/weights.rs b/substrate/frame/verify-signature/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..2c1f0f7954229807e91b0a15b05bb4b1dc17f03e --- /dev/null +++ b/substrate/frame/verify-signature/src/weights.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_verify_signature` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-24, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/debug/substrate-node +// benchmark +// pallet +// --steps=2 +// --repeat=2 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --pallet=pallet-verify-signature +// --chain=dev +// --output=./substrate/frame/verify-signature/src/weights.rs +// --header=./substrate/HEADER-APACHE2 +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_verify_signature`. +pub trait WeightInfo { + fn verify_signature() -> Weight; +} + +/// Weights for `pallet_verify_signature` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn verify_signature() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 48_953_000 picoseconds. + Weight::from_parts(49_254_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn verify_signature() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 48_953_000 picoseconds. + Weight::from_parts(49_254_000, 0) + } +} diff --git a/substrate/frame/whitelist/src/benchmarking.rs b/substrate/frame/whitelist/src/benchmarking.rs index cbe6ee4becd0acb5d51cc09b7794973d8f79f9af..0d7605d9752bf625fd0aa093153382e4f0911e00 100644 --- a/substrate/frame/whitelist/src/benchmarking.rs +++ b/substrate/frame/whitelist/src/benchmarking.rs @@ -75,7 +75,7 @@ mod benchmarks { .map_err(|_| BenchmarkError::Weightless)?; let remark = alloc::vec![1u8; n as usize]; let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = T::Hashing::hash_of(&call); diff --git a/substrate/frame/whitelist/src/lib.rs b/substrate/frame/whitelist/src/lib.rs index de16c2c2da883e20a62ffd807ef7739df9b46439..28887e0ca4aca3b7b2bf134b786ca8f03fb79623 100644 --- a/substrate/frame/whitelist/src/lib.rs +++ b/substrate/frame/whitelist/src/lib.rs @@ -178,7 +178,7 @@ pub mod pallet { .map_err(|_| Error::::UndecodableCall)?; ensure!( - call.get_dispatch_info().weight.all_lte(call_weight_witness), + call.get_dispatch_info().call_weight.all_lte(call_weight_witness), Error::::InvalidCallWeightWitness ); @@ -191,7 +191,7 @@ pub mod pallet { #[pallet::call_index(3)] #[pallet::weight({ - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let call_len = call.encoded_size() as u32; T::WeightInfo::dispatch_whitelisted_call_with_preimage(call_len) diff --git a/substrate/frame/whitelist/src/tests.rs b/substrate/frame/whitelist/src/tests.rs index 3a60adbcfbedc04dd79edacfb2cb16a70427975b..b53cc93b1953aaa76294cc0677983212705cc36c 100644 --- a/substrate/frame/whitelist/src/tests.rs +++ b/substrate/frame/whitelist/src/tests.rs @@ -73,7 +73,7 @@ fn test_whitelist_call_and_remove() { fn test_whitelist_call_and_execute() { new_test_ext().execute_with(|| { let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -153,7 +153,7 @@ fn test_whitelist_call_and_execute_failing_call() { call_encoded_len: Default::default(), call_weight_witness: Weight::zero(), }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let encoded_call = call.encode(); let call_encoded_len = encoded_call.len() as u32; let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -200,7 +200,7 @@ fn test_whitelist_call_and_execute_without_note_preimage() { fn test_whitelist_call_and_execute_decode_consumes_all() { new_test_ext().execute_with(|| { let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); - let call_weight = call.get_dispatch_info().weight; + let call_weight = call.get_dispatch_info().call_weight; let mut call = call.encode(); // Appending something does not make the encoded call invalid. // This tests that the decode function consumes all data. diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index 8731015f7da2fcb28506a0be52197392b3fdce23..171137a1a04e07c0a2ebfb7456c30ea20abfb6ff 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -23,9 +23,4 @@ sp-runtime = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", -] +std = ["codec/std", "sp-api/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index 43f8c5514f7fb0d897fc0cb1f81a5b11d85cab71..2f993d3167a15770835337402d88decad9abe889 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -23,12 +23,7 @@ sp-timestamp = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "serde/std", - "sp-timestamp/std", -] +std = ["codec/std", "scale-info/std", "serde/std", "sp-timestamp/std"] # Serde support without relying on std features. serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/genesis-builder/src/lib.rs b/substrate/primitives/genesis-builder/src/lib.rs index 07317bc4cb5253b23e069ae9b99fe5f4ab5dff74..4763aa06341e84145087082e15af5aa56c958ef5 100644 --- a/substrate/primitives/genesis-builder/src/lib.rs +++ b/substrate/primitives/genesis-builder/src/lib.rs @@ -17,17 +17,33 @@ #![cfg_attr(not(feature = "std"), no_std)] -//! Substrate genesis config builder +//! # Substrate genesis config builder. //! -//! For FRAME based runtimes, this runtime interface provides means to interact with -//! `RuntimeGenesisConfig`. Runtime provides a default `RuntimeGenesisConfig` structure in a form of -//! the JSON blob. +//! This crate contains [`GenesisBuilder`], a runtime-api to be implemented by runtimes, in order to +//! express their genesis state. //! -//! For non-FRAME runtimes this interface is intended to build genesis state of the runtime basing -//! on some input arbitrary bytes array. This documentation uses term `RuntimeGenesisConfig`, which -//! for non-FRAME runtimes may be understood as the runtime-side entity representing initial runtime -//! configuration. The representation of the preset is an arbitrary `Vec` and does not -//! necessarily have to represent a JSON blob. +//! The overall flow of the methods in [`GenesisBuilder`] is as follows: +//! +//! 1. [`GenesisBuilder::preset_names`]: A runtime exposes a number of different +//! `RuntimeGenesisConfig` variations, each of which is called a `preset`, and is identified by a +//! [`PresetId`]. All runtimes are encouraged to expose at least [`DEV_RUNTIME_PRESET`] and +//! [`LOCAL_TESTNET_RUNTIME_PRESET`] presets for consistency. +//! 2. [`GenesisBuilder::get_preset`]: Given a `PresetId`, this the runtime returns the JSON blob +//! representation of the `RuntimeGenesisConfig` for that preset. This JSON blob is often mixed +//! into the broader `chain_spec`. If `None` is given, [`GenesisBuilder::get_preset`] provides a +//! JSON represention of the default `RuntimeGenesisConfig` (by simply serializing the +//! `RuntimeGenesisConfig::default()` value into JSON format). This is used as a base for +//! applying patches / presets. + +//! 3. [`GenesisBuilder::build_state`]: Given a JSON blob, this method should deserialize it and +//! enact it (using `frame_support::traits::BuildGenesisConfig` for Frame-based runtime), +//! essentially writing it to the state. +//! +//! The first two flows are often done in between a runtime, and the `chain_spec_builder` binary. +//! The latter is used when a new blockchain is launched to enact and store the genesis state. See +//! the documentation of `chain_spec_builder` for more info. +//! +//! ## Patching //! //! The runtime may provide a number of partial predefined `RuntimeGenesisConfig` configurations in //! the form of patches which shall be applied on top of the default `RuntimeGenesisConfig`. The @@ -35,19 +51,22 @@ //! customized in the default runtime genesis config. These predefined configurations are referred //! to as presets. //! -//! This allows the runtime to provide a number of predefined configs (e.g. for different -//! testnets or development) without neccessity to leak the runtime types outside the itself (e.g. -//! node or chain-spec related tools). +//! This allows the runtime to provide a number of predefined configs (e.g. for different testnets +//! or development) without necessarily to leak the runtime types outside itself (e.g. node or +//! chain-spec related tools). +//! +//! ## FRAME vs. non-FRAME +//! +//! For FRAME based runtimes [`GenesisBuilder`] provides means to interact with +//! `RuntimeGenesisConfig`. +//! +//! For non-FRAME runtimes this interface is intended to build genesis state of the runtime basing +//! on some input arbitrary bytes array. This documentation uses term `RuntimeGenesisConfig`, which +//! for non-FRAME runtimes may be understood as the "runtime-side entity representing initial +//! runtime genesis configuration". The representation of the preset is an arbitrary `Vec` and +//! does not necessarily have to represent a JSON blob. //! -//! This Runtime API allows to interact with `RuntimeGenesisConfig`, in particular: -//! - provide the list of available preset names, -//! - provide a number of named presets of `RuntimeGenesisConfig`, -//! - provide a JSON represention of the default `RuntimeGenesisConfig` (by simply serializing the -//! default `RuntimeGenesisConfig` struct into JSON format), -//! - deserialize the full `RuntimeGenesisConfig` from given JSON blob and put the resulting -//! `RuntimeGenesisConfig` structure into the state storage creating the initial runtime's state. -//! Allows to build customized genesis. This operation internally calls `GenesisBuild::build` -//! function for all runtime pallets. +//! ## Genesis Block State //! //! Providing externalities with an empty storage and putting `RuntimeGenesisConfig` into storage //! (by calling `build_state`) allows to construct the raw storage of `RuntimeGenesisConfig` @@ -75,14 +94,15 @@ pub const DEV_RUNTIME_PRESET: &'static str = "development"; pub const LOCAL_TESTNET_RUNTIME_PRESET: &'static str = "local_testnet"; sp_api::decl_runtime_apis! { - /// API to interact with RuntimeGenesisConfig for the runtime + /// API to interact with `RuntimeGenesisConfig` for the runtime pub trait GenesisBuilder { /// Build `RuntimeGenesisConfig` from a JSON blob not using any defaults and store it in the /// storage. /// - /// In the case of a FRAME-based runtime, this function deserializes the full `RuntimeGenesisConfig` from the given JSON blob and - /// puts it into the storage. If the provided JSON blob is incorrect or incomplete or the - /// deserialization fails, an error is returned. + /// In the case of a FRAME-based runtime, this function deserializes the full + /// `RuntimeGenesisConfig` from the given JSON blob and puts it into the storage. If the + /// provided JSON blob is incorrect or incomplete or the deserialization fails, an error + /// is returned. /// /// Please note that provided JSON blob must contain all `RuntimeGenesisConfig` fields, no /// defaults will be used. @@ -91,7 +111,7 @@ sp_api::decl_runtime_apis! { /// Returns a JSON blob representation of the built-in `RuntimeGenesisConfig` identified by /// `id`. /// - /// If `id` is `None` the function returns JSON blob representation of the default + /// If `id` is `None` the function should return JSON blob representation of the default /// `RuntimeGenesisConfig` struct of the runtime. Implementation must provide default /// `RuntimeGenesisConfig`. /// diff --git a/substrate/primitives/inherents/src/lib.rs b/substrate/primitives/inherents/src/lib.rs index 80787669856ffc46af28207d2cec787e5ed78d5c..0ddc12dde061449a0923ab2f86c834b6781a6b1c 100644 --- a/substrate/primitives/inherents/src/lib.rs +++ b/substrate/primitives/inherents/src/lib.rs @@ -98,10 +98,10 @@ //! and production. //! //! ``` -//! # use sp_runtime::testing::ExtrinsicWrapper; +//! # use sp_runtime::testing::{MockCallU64, TestXt}; //! # use sp_inherents::{InherentIdentifier, InherentData}; //! # use futures::FutureExt; -//! # type Block = sp_runtime::testing::Block>; +//! # type Block = sp_runtime::testing::Block>; //! # const INHERENT_IDENTIFIER: InherentIdentifier = *b"testinh0"; //! # struct InherentDataProvider; //! # #[async_trait::async_trait] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index 18b20f2ccaac31a92175a420d0f569ef7dd1677b..4bd13b935afda483e40fe9898ecc5d84e18f95e6 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -86,7 +86,7 @@ mod test { call_ty: meta_type::<()>(), signature_ty: meta_type::<()>(), extra_ty: meta_type::<()>(), - signed_extensions: vec![], + extensions: vec![], }, ty: meta_type::<()>(), apis: vec![], diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index da4f5d7f3711cdf6b85773daa0b90469129b0d49..199b692fbd8c4e01df23da24823e55acb16176e6 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -178,10 +178,11 @@ pub struct ExtrinsicMetadataIR { pub call_ty: T::Type, /// The type of the extrinsic's signature. pub signature_ty: T::Type, - /// The type of the outermost Extra enum. + /// The type of the outermost Extra/Extensions enum. + // TODO: metadata-v16: remove this, the `implicit` type can be found in `extensions::implicit`. pub extra_ty: T::Type, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec>, + /// The transaction extensions in the order they appear in the extrinsic. + pub extensions: Vec>, } impl IntoPortable for ExtrinsicMetadataIR { @@ -195,7 +196,7 @@ impl IntoPortable for ExtrinsicMetadataIR { call_ty: registry.register_type(&self.call_ty), signature_ty: registry.register_type(&self.signature_ty), extra_ty: registry.register_type(&self.extra_ty), - signed_extensions: registry.map_into_portable(self.signed_extensions), + extensions: registry.map_into_portable(self.extensions), } } } @@ -225,23 +226,23 @@ impl IntoPortable for PalletAssociatedTypeMetadataIR { /// Metadata of an extrinsic's signed extension. #[derive(Clone, PartialEq, Eq, Encode, Debug)] -pub struct SignedExtensionMetadataIR { +pub struct TransactionExtensionMetadataIR { /// The unique signed extension identifier, which may be different from the type name. pub identifier: T::String, /// The type of the signed extension, with the data to be included in the extrinsic. pub ty: T::Type, - /// The type of the additional signed data, with the data to be included in the signed payload - pub additional_signed: T::Type, + /// The type of the implicit data, with the data to be included in the signed payload. + pub implicit: T::Type, } -impl IntoPortable for SignedExtensionMetadataIR { - type Output = SignedExtensionMetadataIR; +impl IntoPortable for TransactionExtensionMetadataIR { + type Output = TransactionExtensionMetadataIR; fn into_portable(self, registry: &mut Registry) -> Self::Output { - SignedExtensionMetadataIR { + TransactionExtensionMetadataIR { identifier: self.identifier.into_portable(registry), ty: registry.register_type(&self.ty), - additional_signed: registry.register_type(&self.additional_signed), + implicit: registry.register_type(&self.implicit), } } } diff --git a/substrate/primitives/metadata-ir/src/v14.rs b/substrate/primitives/metadata-ir/src/v14.rs index e1b7a24f76577c09f7f8c8040ae06bf433f9126d..70e84532add9d2e490638d7630e5c9498848c876 100644 --- a/substrate/primitives/metadata-ir/src/v14.rs +++ b/substrate/primitives/metadata-ir/src/v14.rs @@ -20,8 +20,8 @@ use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletCallMetadataIR, PalletConstantMetadataIR, PalletErrorMetadataIR, PalletEventMetadataIR, PalletMetadataIR, PalletStorageMetadataIR, - SignedExtensionMetadataIR, StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, - StorageHasherIR, + StorageEntryMetadataIR, StorageEntryModifierIR, StorageEntryTypeIR, StorageHasherIR, + TransactionExtensionMetadataIR, }; use frame_metadata::v14::{ @@ -137,12 +137,12 @@ impl From for PalletErrorMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, - additional_signed: ir.additional_signed, + additional_signed: ir.implicit, } } } @@ -152,7 +152,7 @@ impl From for ExtrinsicMetadata { ExtrinsicMetadata { ty: ir.ty, version: ir.version, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index a942eb73223b2c80e2d71ef1cb070c45a2ca588f..4b3b6106d27f94422d6122df255956434a33f955 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -21,7 +21,7 @@ use crate::OuterEnumsIR; use super::types::{ ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, - RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, SignedExtensionMetadataIR, + RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, }; use frame_metadata::v15::{ @@ -87,12 +87,12 @@ impl From for PalletMetadata { } } -impl From for SignedExtensionMetadata { - fn from(ir: SignedExtensionMetadataIR) -> Self { +impl From for SignedExtensionMetadata { + fn from(ir: TransactionExtensionMetadataIR) -> Self { SignedExtensionMetadata { identifier: ir.identifier, ty: ir.ty, - additional_signed: ir.additional_signed, + additional_signed: ir.implicit, } } } @@ -105,7 +105,7 @@ impl From for ExtrinsicMetadata { call_ty: ir.call_ty, signature_ty: ir.signature_ty, extra_ty: ir.extra_ty, - signed_extensions: ir.signed_extensions.into_iter().map(Into::into).collect(), + signed_extensions: ir.extensions.into_iter().map(Into::into).collect(), } } } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index c3413775b1cc25b5c0f6bcf4a57cb32b234b415c..8a812c3a5772a4929048ec590440cd0ec27a06db 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -39,6 +39,7 @@ tracing = { workspace = true, features = ["log"], default-features = false } binary-merkle-tree = { workspace = true } simple-mermaid = { version = "0.1.1", optional = true } +tuplex = { version = "0.1.2", default-features = false } [dev-dependencies] rand = { workspace = true, default-features = true } @@ -75,6 +76,7 @@ std = [ "sp-trie/std", "sp-weights/std", "tracing/std", + "tuplex/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/generic/block.rs b/substrate/primitives/runtime/src/generic/block.rs index 8ed79c7c8dcf728094bc47cb7a58fc3eee93f585..a084a3703f9e6a8809b68f910c35c95372a517d9 100644 --- a/substrate/primitives/runtime/src/generic/block.rs +++ b/substrate/primitives/runtime/src/generic/block.rs @@ -99,7 +99,7 @@ where impl traits::Block for Block where Header: HeaderT + MaybeSerializeDeserialize, - Extrinsic: Member + Codec + traits::Extrinsic, + Extrinsic: Member + Codec + traits::ExtrinsicLike, { type Extrinsic = Extrinsic; type Header = Header; diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 44325920beee0c1026c4ec7dc68b6943652dfc92..e2ecd5ed6da7ad5f22be4066bde667b9c579bd84 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,81 +18,117 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. +use codec::Encode; +use sp_weights::Weight; + use crate::{ traits::{ - self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, - SignedExtension, ValidateUnsigned, + self, transaction_extension::TransactionExtension, AsTransactionAuthorizedOrigin, + DispatchInfoOf, DispatchTransaction, Dispatchable, MaybeDisplay, Member, + PostDispatchInfoOf, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity}, }; +/// The kind of extrinsic this is, including any fields required of that kind. This is basically +/// the full extrinsic except the `Call`. +#[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] +pub enum ExtrinsicFormat { + /// Extrinsic is bare; it must pass either the bare forms of `TransactionExtension` or + /// `ValidateUnsigned`, both deprecated, or alternatively a `ProvideInherent`. + Bare, + /// Extrinsic has a default `Origin` of `Signed(AccountId)` and must pass all + /// `TransactionExtension`s regular checks and includes all extension data. + Signed(AccountId, Extension), + /// Extrinsic has a default `Origin` of `None` and must pass all `TransactionExtension`s. + /// regular checks and includes all extension data. + General(Extension), +} + /// Definition of something that the external world might want to say; its existence implies that it /// has been checked and is good, particularly with regards to the signature. /// /// This is typically passed into [`traits::Applyable::apply`], which should execute /// [`CheckedExtrinsic::function`], alongside all other bits and bobs. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] -pub struct CheckedExtrinsic { +pub struct CheckedExtrinsic { /// Who this purports to be from and the number of extrinsics have come before /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Extra)>, + pub format: ExtrinsicFormat, /// The function that should be called. pub function: Call, } -impl traits::Applyable - for CheckedExtrinsic +impl traits::Applyable + for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, - RuntimeOrigin: From>, + Call: Member + Dispatchable + Encode, + Extension: TransactionExtension, + RuntimeOrigin: From> + AsTransactionAuthorizedOrigin, { type Call = Call; - fn validate>( + fn validate>( &self, - // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? - // Perhaps a change for 2.0 to avoid breaking too much APIs? source: TransactionSource, info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info, len) - } else { - let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.function)?; - Ok(valid.combine_with(unsigned_validation)) + match self.format { + ExtrinsicFormat::Bare => { + let inherent_validation = I::validate_unsigned(source, &self.function)?; + #[allow(deprecated)] + let legacy_validation = Extension::bare_validate(&self.function, info, len)?; + Ok(legacy_validation.combine_with(inherent_validation)) + }, + ExtrinsicFormat::Signed(ref signer, ref extension) => { + let origin = Some(signer.clone()).into(); + extension.validate_only(origin, &self.function, info, len).map(|x| x.0) + }, + ExtrinsicFormat::General(ref extension) => + extension.validate_only(None.into(), &self.function, info, len).map(|x| x.0), } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResultWithInfo> { - let (maybe_who, maybe_pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; - (Some(id), Some(pre)) - } else { - Extra::pre_dispatch_unsigned(&self.function, info, len)?; - U::pre_dispatch(&self.function)?; - (None, None) - }; - let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); - let post_info = match res { - Ok(info) => info, - Err(err) => err.post_info, - }; - Extra::post_dispatch( - maybe_pre, - info, - &post_info, - len, - &res.map(|_| ()).map_err(|e| e.error), - )?; - Ok(res) + match self.format { + ExtrinsicFormat::Bare => { + I::pre_dispatch(&self.function)?; + // TODO: Separate logic from `TransactionExtension` into a new `InherentExtension` + // interface. + Extension::bare_validate_and_prepare(&self.function, info, len)?; + let res = self.function.dispatch(None.into()); + let mut post_info = res.unwrap_or_else(|err| err.post_info); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + // TODO: Separate logic from `TransactionExtension` into a new `InherentExtension` + // interface. + Extension::bare_post_dispatch(info, &mut post_info, len, &pd_res)?; + Ok(res) + }, + ExtrinsicFormat::Signed(signer, extension) => + extension.dispatch_transaction(Some(signer).into(), self.function, info, len), + ExtrinsicFormat::General(extension) => + extension.dispatch_transaction(None.into(), self.function, info, len), + } + } +} + +impl> + CheckedExtrinsic +{ + /// Returns the weight of the extension of this transaction, if present. If the transaction + /// doesn't use any extension, the weight returned is equal to zero. + pub fn extension_weight(&self) -> Weight { + match &self.format { + ExtrinsicFormat::Bare => Weight::zero(), + ExtrinsicFormat::Signed(_, ext) | ExtrinsicFormat::General(ext) => + ext.weight(&self.function), + } } } diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index 3687f7cdb3b2b24c087ac47c470b4d39721e2df5..007dee2684b03e5de04209e8feea0b07b2b6802a 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Generic implementations of [`crate::traits::Header`], [`crate::traits::Block`] and -//! [`crate::traits::Extrinsic`]. +//! [`crate::traits::ExtrinsicLike`]. mod block; mod checked_extrinsic; @@ -29,9 +29,10 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, - checked_extrinsic::CheckedExtrinsic, + checked_extrinsic::{CheckedExtrinsic, ExtrinsicFormat}, digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, - unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, + unchecked_extrinsic::{Preamble, SignedPayload, UncheckedExtrinsic, EXTRINSIC_FORMAT_VERSION}, }; +pub use unchecked_extrinsic::UncheckedSignaturePayload; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 499b7c5f5836d21d67f8eb58c0ee41644a119180..8c44e147f90b1214ca9a84eb0c49504329cb133c 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -18,10 +18,10 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. use crate::{ - generic::CheckedExtrinsic, + generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, - SignaturePayload, SignedExtension, + self, transaction_extension::TransactionExtension, Checkable, Dispatchable, ExtrinsicLike, + ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -33,16 +33,170 @@ use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; use core::fmt; use scale_info::{build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter}; use sp_io::hashing::blake2_256; +use sp_weights::Weight; + +/// Type to represent the version of the [Extension](TransactionExtension) used in this extrinsic. +pub type ExtensionVersion = u8; +/// Type to represent the extrinsic format version which defines an [UncheckedExtrinsic]. +pub type ExtrinsicVersion = u8; /// Current version of the [`UncheckedExtrinsic`] encoded format. /// /// This version needs to be bumped if the encoded representation changes. /// It ensures that if the representation is changed and the format is not known, /// the decoding fails. -const EXTRINSIC_FORMAT_VERSION: u8 = 4; +pub const EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 5; +/// Legacy version of the [`UncheckedExtrinsic`] encoded format. +/// +/// This version was used in the signed/unsigned transaction model and is still supported for +/// compatibility reasons. It will be deprecated in favor of v5 extrinsics and an inherent/general +/// transaction model. +pub const LEGACY_EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 4; +/// Current version of the [Extension](TransactionExtension) used in this +/// [extrinsic](UncheckedExtrinsic). +/// +/// This version needs to be bumped if there are breaking changes to the extension used in the +/// [UncheckedExtrinsic] implementation. +const EXTENSION_VERSION: ExtensionVersion = 0; /// The `SignaturePayload` of `UncheckedExtrinsic`. -type UncheckedSignaturePayload = (Address, Signature, Extra); +pub type UncheckedSignaturePayload = (Address, Signature, Extension); + +impl SignaturePayload + for UncheckedSignaturePayload +{ + type SignatureAddress = Address; + type Signature = Signature; + type SignatureExtra = Extension; +} + +/// A "header" for extrinsics leading up to the call itself. Determines the type of extrinsic and +/// holds any necessary specialized data. +#[derive(Eq, PartialEq, Clone)] +pub enum Preamble { + /// An extrinsic without a signature or any extension. This means it's either an inherent or + /// an old-school "Unsigned" (we don't use that terminology any more since it's confusable with + /// the general transaction which is without a signature but does have an extension). + /// + /// NOTE: In the future, once we remove `ValidateUnsigned`, this will only serve Inherent + /// extrinsics and thus can be renamed to `Inherent`. + Bare(ExtrinsicVersion), + /// An old-school transaction extrinsic which includes a signature of some hard-coded crypto. + /// Available only on extrinsic version 4. + Signed(Address, Signature, ExtensionVersion, Extension), + /// A new-school transaction extrinsic which does not include a signature by default. The + /// origin authorization, through signatures or other means, is performed by the transaction + /// extension in this extrinsic. Available starting with extrinsic version 5. + General(ExtensionVersion, Extension), +} + +const VERSION_MASK: u8 = 0b0011_1111; +const TYPE_MASK: u8 = 0b1100_0000; +const BARE_EXTRINSIC: u8 = 0b0000_0000; +const SIGNED_EXTRINSIC: u8 = 0b1000_0000; +const GENERAL_EXTRINSIC: u8 = 0b0100_0000; + +impl Decode for Preamble +where + Address: Decode, + Signature: Decode, + Extension: Decode, +{ + fn decode(input: &mut I) -> Result { + let version_and_type = input.read_byte()?; + + let version = version_and_type & VERSION_MASK; + let xt_type = version_and_type & TYPE_MASK; + + let preamble = match (version, xt_type) { + ( + extrinsic_version @ LEGACY_EXTRINSIC_FORMAT_VERSION..=EXTRINSIC_FORMAT_VERSION, + BARE_EXTRINSIC, + ) => Self::Bare(extrinsic_version), + (LEGACY_EXTRINSIC_FORMAT_VERSION, SIGNED_EXTRINSIC) => { + let address = Address::decode(input)?; + let signature = Signature::decode(input)?; + let ext = Extension::decode(input)?; + Self::Signed(address, signature, 0, ext) + }, + (EXTRINSIC_FORMAT_VERSION, GENERAL_EXTRINSIC) => { + let ext_version = ExtensionVersion::decode(input)?; + let ext = Extension::decode(input)?; + Self::General(ext_version, ext) + }, + (_, _) => return Err("Invalid transaction version".into()), + }; + + Ok(preamble) + } +} + +impl Encode for Preamble +where + Address: Encode, + Signature: Encode, + Extension: Encode, +{ + fn size_hint(&self) -> usize { + match &self { + Preamble::Bare(_) => EXTRINSIC_FORMAT_VERSION.size_hint(), + Preamble::Signed(address, signature, _, ext) => LEGACY_EXTRINSIC_FORMAT_VERSION + .size_hint() + .saturating_add(address.size_hint()) + .saturating_add(signature.size_hint()) + .saturating_add(ext.size_hint()), + Preamble::General(ext_version, ext) => EXTRINSIC_FORMAT_VERSION + .size_hint() + .saturating_add(ext_version.size_hint()) + .saturating_add(ext.size_hint()), + } + } + + fn encode_to(&self, dest: &mut T) { + match &self { + Preamble::Bare(extrinsic_version) => { + (extrinsic_version | BARE_EXTRINSIC).encode_to(dest); + }, + Preamble::Signed(address, signature, _, ext) => { + (LEGACY_EXTRINSIC_FORMAT_VERSION | SIGNED_EXTRINSIC).encode_to(dest); + address.encode_to(dest); + signature.encode_to(dest); + ext.encode_to(dest); + }, + Preamble::General(ext_version, ext) => { + (EXTRINSIC_FORMAT_VERSION | GENERAL_EXTRINSIC).encode_to(dest); + ext_version.encode_to(dest); + ext.encode_to(dest); + }, + } + } +} + +impl Preamble { + /// Returns `Some` if this is a signed extrinsic, together with the relevant inner fields. + pub fn to_signed(self) -> Option<(Address, Signature, Extension)> { + match self { + Self::Signed(a, s, _, e) => Some((a, s, e)), + _ => None, + } + } +} + +impl fmt::Debug for Preamble +where + Address: fmt::Debug, + Extension: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Bare(_) => write!(f, "Bare"), + Self::Signed(address, _, ext_version, tx_ext) => + write!(f, "Signed({:?}, {:?}, {:?})", address, ext_version, tx_ext), + Self::General(ext_version, tx_ext) => + write!(f, "General({:?}, {:?})", ext_version, tx_ext), + } + } +} /// An extrinsic right from the external world. This is unchecked and so can contain a signature. /// @@ -66,41 +220,28 @@ type UncheckedSignaturePayload = (Address, Signature, /// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the /// counterpart of this type after its signature (and other non-negotiable validity checks) have /// passed. -#[derive(PartialEq, Eq, Clone)] -pub struct UncheckedExtrinsic -where - Extra: SignedExtension, -{ - /// The signature, address, number of extrinsics have come before from the same signer and an - /// era describing the longevity of this transaction, if this is a signed extrinsic. - /// - /// `None` if it is unsigned or an inherent. - pub signature: Option>, +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct UncheckedExtrinsic { + /// Information regarding the type of extrinsic this is (inherent or transaction) as well as + /// associated extension (`Extension`) data if it's a transaction and a possible signature. + pub preamble: Preamble, /// The function that should be called. pub function: Call, } -impl SignaturePayload - for UncheckedSignaturePayload -{ - type SignatureAddress = Address; - type Signature = Signature; - type SignatureExtra = Extra; -} - /// Manual [`TypeInfo`] implementation because of custom encoding. The data is a valid encoded /// `Vec`, but requires some logic to extract the signature and payload. /// /// See [`UncheckedExtrinsic::encode`] and [`UncheckedExtrinsic::decode`]. -impl TypeInfo - for UncheckedExtrinsic +impl TypeInfo + for UncheckedExtrinsic where Address: StaticTypeInfo, Call: StaticTypeInfo, Signature: StaticTypeInfo, - Extra: SignedExtension + StaticTypeInfo, + Extension: StaticTypeInfo, { - type Identity = UncheckedExtrinsic; + type Identity = UncheckedExtrinsic; fn type_info() -> Type { Type::builder() @@ -112,7 +253,7 @@ where TypeParameter::new("Address", Some(meta_type::
())), TypeParameter::new("Call", Some(meta_type::())), TypeParameter::new("Signature", Some(meta_type::())), - TypeParameter::new("Extra", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), ]) .docs(&["UncheckedExtrinsic raw bytes, requires custom decoding routine"]) // Because of the custom encoding, we can only accurately describe the encoding as an @@ -122,66 +263,104 @@ where } } -impl - UncheckedExtrinsic -{ - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { - Self { signature: Some((signed, signature, extra)), function } +impl UncheckedExtrinsic { + /// New instance of a bare (ne unsigned) extrinsic. This could be used for an inherent or an + /// old-school "unsigned transaction" (which are new being deprecated in favour of general + /// transactions). + #[deprecated = "Use new_bare instead"] + pub fn new_unsigned(function: Call) -> Self { + Self::new_bare(function) } - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - Self { signature: None, function } + /// Returns `true` if this extrinsic instance is an inherent, `false`` otherwise. + pub fn is_inherent(&self) -> bool { + matches!(self.preamble, Preamble::Bare(_)) } -} -impl - Extrinsic for UncheckedExtrinsic -{ - type Call = Call; + /// Returns `true` if this extrinsic instance is an old-school signed transaction, `false` + /// otherwise. + pub fn is_signed(&self) -> bool { + matches!(self.preamble, Preamble::Signed(..)) + } - type SignaturePayload = UncheckedSignaturePayload; + /// Create an `UncheckedExtrinsic` from a `Preamble` and the actual `Call`. + pub fn from_parts(function: Call, preamble: Preamble) -> Self { + Self { preamble, function } + } - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) + /// New instance of a bare (ne unsigned) extrinsic. + pub fn new_bare(function: Call) -> Self { + Self { preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), function } } - fn new(function: Call, signed_data: Option) -> Option { - Some(if let Some((address, signature, extra)) = signed_data { - Self::new_signed(function, address, signature, extra) - } else { - Self::new_unsigned(function) - }) + /// New instance of a bare (ne unsigned) extrinsic on extrinsic format version 4. + pub fn new_bare_legacy(function: Call) -> Self { + Self { preamble: Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION), function } + } + + /// New instance of an old-school signed transaction on extrinsic format version 4. + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self { preamble: Preamble::Signed(signed, signature, 0, tx_ext), function } + } + + /// New instance of an new-school unsigned transaction. + pub fn new_transaction(function: Call, tx_ext: Extension) -> Self { + Self { preamble: Preamble::General(EXTENSION_VERSION, tx_ext), function } + } +} + +impl ExtrinsicLike + for UncheckedExtrinsic +{ + fn is_bare(&self) -> bool { + matches!(self.preamble, Preamble::Bare(_)) + } + + fn is_signed(&self) -> Option { + Some(matches!(self.preamble, Preamble::Signed(..))) } } -impl Checkable - for UncheckedExtrinsic +// TODO: Migrate existing extension pipelines to support current `Signed` transactions as `General` +// transactions by adding an extension to validate signatures, as they are currently validated in +// the `Checkable` implementation for `Signed` transactions. + +impl Checkable + for UncheckedExtrinsic where LookupSource: Member + MaybeDisplay, - Call: Encode + Member, + Call: Encode + Member + Dispatchable, Signature: Member + traits::Verify, ::Signer: IdentifyAccount, - Extra: SignedExtension, + Extension: Encode + TransactionExtension, AccountId: Member + MaybeDisplay, Lookup: traits::Lookup, { - type Checked = CheckedExtrinsic; + type Checked = CheckedExtrinsic; fn check(self, lookup: &Lookup) -> Result { - Ok(match self.signature { - Some((signed, signature, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, signature, _, tx_ext) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; + // The `Implicit` is "implicitly" included in the payload. + let raw_payload = SignedPayload::new(self.function, tx_ext)?; if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { return Err(InvalidTransaction::BadProof.into()) } - - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + let (function, tx_ext, _) = raw_payload.deconstruct(); + CheckedExtrinsic { format: ExtrinsicFormat::Signed(signed, tx_ext), function } + }, + Preamble::General(_, tx_ext) => CheckedExtrinsic { + format: ExtrinsicFormat::General(tx_ext), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare(_) => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } @@ -190,91 +369,53 @@ where self, lookup: &Lookup, ) -> Result { - Ok(match self.signature { - Some((signed, _, extra)) => { + Ok(match self.preamble { + Preamble::Signed(signed, _, _, extra) => { let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { signed: Some((signed, extra)), function } + CheckedExtrinsic { + format: ExtrinsicFormat::Signed(signed, extra), + function: self.function, + } + }, + Preamble::General(_, extra) => CheckedExtrinsic { + format: ExtrinsicFormat::General(extra), + function: self.function, }, - None => CheckedExtrinsic { signed: None, function: self.function }, + Preamble::Bare(_) => + CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.function }, }) } } -impl ExtrinsicMetadata - for UncheckedExtrinsic -where - Extra: SignedExtension, +impl> + ExtrinsicMetadata for UncheckedExtrinsic { - const VERSION: u8 = EXTRINSIC_FORMAT_VERSION; - type SignedExtensions = Extra; + // TODO: Expose both version 4 and version 5 in metadata v16. + const VERSION: u8 = LEGACY_EXTRINSIC_FORMAT_VERSION; + type TransactionExtensions = Extension; } -/// A payload that has been signed for an unchecked extrinsics. -/// -/// Note that the payload that we sign to produce unchecked extrinsic signature -/// is going to be different than the `SignaturePayload` - so the thing the extrinsic -/// actually contains. -pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); - -impl SignedPayload -where - Call: Encode, - Extra: SignedExtension, +impl> + UncheckedExtrinsic { - /// Create new `SignedPayload`. - /// - /// This function may fail if `additional_signed` of `Extra` is not available. - pub fn new(call: Call, extra: Extra) -> Result { - let additional_signed = extra.additional_signed()?; - let raw_payload = (call, extra, additional_signed); - Ok(Self(raw_payload)) - } - - /// Create new `SignedPayload` from raw components. - pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { - Self((call, extra, additional_signed)) - } - - /// Deconstruct the payload into it's components. - pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { - self.0 - } -} - -impl Encode for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ - /// Get an encoded version of this payload. - /// - /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. - fn using_encoded R>(&self, f: F) -> R { - self.0.using_encoded(|payload| { - if payload.len() > 256 { - f(&blake2_256(payload)[..]) - } else { - f(payload) - } - }) + /// Returns the weight of the extension of this transaction, if present. If the transaction + /// doesn't use any extension, the weight returned is equal to zero. + pub fn extension_weight(&self) -> Weight { + match &self.preamble { + Preamble::Bare(_) => Weight::zero(), + Preamble::Signed(_, _, _, ext) | Preamble::General(_, ext) => + ext.weight(&self.function), + } } } -impl EncodeLike for SignedPayload -where - Call: Encode, - Extra: SignedExtension, -{ -} - -impl Decode for UncheckedExtrinsic +impl Decode + for UncheckedExtrinsic where Address: Decode, Signature: Decode, Call: Decode, - Extra: SignedExtension, + Extension: Decode, { fn decode(input: &mut I) -> Result { // This is a little more complicated than usual since the binary format must be compatible @@ -283,15 +424,7 @@ where let expected_length: Compact = Decode::decode(input)?; let before_length = input.remaining_len()?; - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != EXTRINSIC_FORMAT_VERSION { - return Err("Invalid transaction version".into()) - } - - let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let preamble = Decode::decode(input)?; let function = Decode::decode(input)?; if let Some((before_length, after_length)) = @@ -304,31 +437,20 @@ where } } - Ok(Self { signature, function }) + Ok(Self { preamble, function }) } } #[docify::export(unchecked_extrinsic_encode_impl)] -impl Encode for UncheckedExtrinsic +impl Encode + for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, + Preamble: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { fn encode(&self) -> Vec { - let mut tmp = Vec::with_capacity(core::mem::size_of::()); - - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - tmp.push(EXTRINSIC_FORMAT_VERSION | 0b1000_0000); - s.encode_to(&mut tmp); - }, - None => { - tmp.push(EXTRINSIC_FORMAT_VERSION & 0b0111_1111); - }, - } + let mut tmp = self.preamble.encode(); self.function.encode_to(&mut tmp); let compact_len = codec::Compact::(tmp.len() as u32); @@ -343,19 +465,19 @@ where } } -impl EncodeLike - for UncheckedExtrinsic +impl EncodeLike + for UncheckedExtrinsic where Address: Encode, Signature: Encode, - Call: Encode, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { } #[cfg(feature = "serde")] -impl serde::Serialize - for UncheckedExtrinsic +impl serde::Serialize + for UncheckedExtrinsic { fn serialize(&self, seq: S) -> Result where @@ -366,45 +488,86 @@ impl s } #[cfg(feature = "serde")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> - serde::Deserialize<'a> for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extension: Decode> serde::Deserialize<'a> + for UncheckedExtrinsic { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) + Self::decode(&mut &r[..]) .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) } } -impl fmt::Debug - for UncheckedExtrinsic +/// A payload that has been signed for an unchecked extrinsics. +/// +/// Note that the payload that we sign to produce unchecked extrinsic signature +/// is going to be different than the `SignaturePayload` - so the thing the extrinsic +/// actually contains. +pub struct SignedPayload>( + (Call, Extension, Extension::Implicit), +); + +impl SignedPayload where - Address: fmt::Debug, - Call: fmt::Debug, - Extra: SignedExtension, + Call: Encode + Dispatchable, + Extension: TransactionExtension, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "UncheckedExtrinsic({:?}, {:?})", - self.signature.as_ref().map(|x| (&x.0, &x.2)), - self.function, - ) + /// Create new `SignedPayload` for extrinsic format version 4. + /// + /// This function may fail if `implicit` of `Extension` is not available. + pub fn new(call: Call, tx_ext: Extension) -> Result { + let implicit = Extension::implicit(&tx_ext)?; + let raw_payload = (call, tx_ext, implicit); + Ok(Self(raw_payload)) + } + + /// Create new `SignedPayload` from raw components. + pub fn from_raw(call: Call, tx_ext: Extension, implicit: Extension::Implicit) -> Self { + Self((call, tx_ext, implicit)) + } + + /// Deconstruct the payload into it's components. + pub fn deconstruct(self) -> (Call, Extension, Extension::Implicit) { + self.0 + } +} + +impl Encode for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtension, +{ + /// Get an encoded version of this `blake2_256`-hashed payload. + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(|payload| { + if payload.len() > 256 { + f(&blake2_256(payload)[..]) + } else { + f(payload) + } + }) } } -impl From> - for OpaqueExtrinsic +impl EncodeLike for SignedPayload +where + Call: Encode + Dispatchable, + Extension: TransactionExtension, +{ +} + +impl + From> for OpaqueExtrinsic where Address: Encode, Signature: Encode, Call: Encode, - Extra: SignedExtension, + Extension: Encode, { - fn from(extrinsic: UncheckedExtrinsic) -> Self { + fn from(extrinsic: UncheckedExtrinsic) -> Self { Self::from_bytes(extrinsic.encode().as_slice()).expect( "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ raw Vec encoding; qed", @@ -412,60 +575,196 @@ where } } +#[cfg(test)] +mod legacy { + use codec::{Compact, Decode, Encode, EncodeLike, Error, Input}; + use scale_info::{ + build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter, + }; + + pub type UncheckedSignaturePayloadV4 = (Address, Signature, Extra); + + #[derive(PartialEq, Eq, Clone, Debug)] + pub struct UncheckedExtrinsicV4 { + pub signature: Option>, + pub function: Call, + } + + impl TypeInfo + for UncheckedExtrinsicV4 + where + Address: StaticTypeInfo, + Call: StaticTypeInfo, + Signature: StaticTypeInfo, + Extra: StaticTypeInfo, + { + type Identity = UncheckedExtrinsicV4; + + fn type_info() -> Type { + Type::builder() + .path(Path::new("UncheckedExtrinsic", module_path!())) + // Include the type parameter types, even though they are not used directly in any + // of the described fields. These type definitions can be used by downstream + // consumers to help construct the custom decoding from the opaque bytes (see + // below). + .type_params(vec![ + TypeParameter::new("Address", Some(meta_type::
())), + TypeParameter::new("Call", Some(meta_type::())), + TypeParameter::new("Signature", Some(meta_type::())), + TypeParameter::new("Extra", Some(meta_type::())), + ]) + .docs(&["OldUncheckedExtrinsic raw bytes, requires custom decoding routine"]) + // Because of the custom encoding, we can only accurately describe the encoding as + // an opaque `Vec`. Downstream consumers will need to manually implement the + // codec to encode/decode the `signature` and `function` fields. + .composite(Fields::unnamed().field(|f| f.ty::>())) + } + } + + impl UncheckedExtrinsicV4 { + pub fn new_signed( + function: Call, + signed: Address, + signature: Signature, + extra: Extra, + ) -> Self { + Self { signature: Some((signed, signature, extra)), function } + } + + pub fn new_unsigned(function: Call) -> Self { + Self { signature: None, function } + } + } + + impl Decode + for UncheckedExtrinsicV4 + where + Address: Decode, + Signature: Decode, + Call: Decode, + Extra: Decode, + { + fn decode(input: &mut I) -> Result { + // This is a little more complicated than usual since the binary format must be + // compatible with SCALE's generic `Vec` type. Basically this just means accepting + // that there will be a prefix of vector length. + let expected_length: Compact = Decode::decode(input)?; + let before_length = input.remaining_len()?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != 4u8 { + return Err("Invalid transaction version".into()) + } + + let signature = is_signed.then(|| Decode::decode(input)).transpose()?; + let function = Decode::decode(input)?; + + if let Some((before_length, after_length)) = + input.remaining_len()?.and_then(|a| before_length.map(|b| (b, a))) + { + let length = before_length.saturating_sub(after_length); + + if length != expected_length.0 as usize { + return Err("Invalid length prefix".into()) + } + } + + Ok(Self { signature, function }) + } + } + + #[docify::export(unchecked_extrinsic_encode_impl)] + impl Encode + for UncheckedExtrinsicV4 + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + fn encode(&self) -> Vec { + let mut tmp = Vec::with_capacity(sp_std::mem::size_of::()); + + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + tmp.push(4u8 | 0b1000_0000); + s.encode_to(&mut tmp); + }, + None => { + tmp.push(4u8 & 0b0111_1111); + }, + } + self.function.encode_to(&mut tmp); + + let compact_len = codec::Compact::(tmp.len() as u32); + + // Allocate the output buffer with the correct length + let mut output = Vec::with_capacity(compact_len.size_hint() + tmp.len()); + + compact_len.encode_to(&mut output); + output.extend(tmp); + + output + } + } + + impl EncodeLike + for UncheckedExtrinsicV4 + where + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: Encode, + { + } +} + #[cfg(test)] mod tests { - use super::*; + use super::{legacy::UncheckedExtrinsicV4, *}; use crate::{ codec::{Decode, Encode}, + impl_tx_ext_default, testing::TestSignature as TestSig, - traits::{DispatchInfoOf, IdentityLookup, SignedExtension}, + traits::{FakeDispatchable, IdentityLookup, TransactionExtension}, }; use sp_io::hashing::blake2_256; type TestContext = IdentityLookup; type TestAccountId = u64; - type TestCall = Vec; + type TestCall = FakeDispatchable>; const TEST_ACCOUNT: TestAccountId = 0; // NOTE: this is demonstration. One can simply use `()` for testing. #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd, TypeInfo)] - struct TestExtra; - impl SignedExtension for TestExtra { - const IDENTIFIER: &'static str = "TestExtra"; - type AccountId = u64; - type Call = (); - type AdditionalSigned = (); + struct DummyExtension; + impl TransactionExtension for DummyExtension { + const IDENTIFIER: &'static str = "DummyExtension"; + type Implicit = (); + type Val = (); type Pre = (); - - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) - } + impl_tx_ext_default!(TestCall; weight validate prepare); } - type Ex = UncheckedExtrinsic; - type CEx = CheckedExtrinsic; + type Ex = UncheckedExtrinsic; + type CEx = CheckedExtrinsic; #[test] fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let call: TestCall = vec![0u8; 0].into(); + let ux = Ex::new_bare(call); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); } #[test] fn invalid_length_prefix_is_detected() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_bare(vec![0u8; 0].into()); let mut encoded = ux.encode(); let length = Compact::::decode(&mut &encoded[..]).unwrap(); @@ -474,13 +773,20 @@ mod tests { assert_eq!(Ex::decode(&mut &encoded[..]), Err("Invalid length prefix".into())); } + #[test] + fn transaction_codec_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + #[test] fn signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], DummyExtension).encode()), + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -489,13 +795,13 @@ mod tests { #[test] fn large_signed_codec_should_work() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, TestSig( TEST_ACCOUNT, - (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + (vec![0u8; 257], DummyExtension).using_encoded(blake2_256)[..].to_owned(), ), - TestExtra, + DummyExtension, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -503,44 +809,68 @@ mod tests { #[test] fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &Default::default()).is_ok()); + let ux = Ex::new_bare(vec![0u8; 0].into()); + assert!(ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { format: ExtrinsicFormat::Bare, function: vec![0u8; 0].into() }), + ); } #[test] fn badly_signed_check_should_fail() { let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, vec![0u8; 0]), - TestExtra, + TestSig(TEST_ACCOUNT, vec![0u8; 0].into()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), Err(InvalidTransaction::BadProof.into()), ); } + #[test] + fn transaction_check_should_work() { + let ux = Ex::new_transaction(vec![0u8; 0].into(), DummyExtension); + assert!(!ux.is_inherent()); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { + format: ExtrinsicFormat::General(DummyExtension), + function: vec![0u8; 0].into() + }), + ); + } + #[test] fn signed_check_should_work() { + let sig_payload = SignedPayload::from_raw( + FakeDispatchable::from(vec![0u8; 0]), + DummyExtension, + DummyExtension.implicit().unwrap(), + ); let ux = Ex::new_signed( - vec![0u8; 0], + vec![0u8; 0].into(), TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, + TestSig(TEST_ACCOUNT, sig_payload.encode()), + DummyExtension, ); - assert!(ux.is_signed().unwrap_or(false)); + assert!(!ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), - Ok(CEx { signed: Some((TEST_ACCOUNT, TestExtra)), function: vec![0u8; 0] }), + Ok(CEx { + format: ExtrinsicFormat::Signed(TEST_ACCOUNT, DummyExtension), + function: vec![0u8; 0].into() + }), ); } #[test] fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8; 0]); + let ex = Ex::new_bare(vec![0u8; 0].into()); let encoded = ex.encode(); let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(decoded, ex); @@ -550,7 +880,7 @@ mod tests { #[test] fn conversion_to_opaque() { - let ux = Ex::new_unsigned(vec![0u8; 0]); + let ux = Ex::new_bare(vec![0u8; 0].into()); let encoded = ux.encode(); let opaque: OpaqueExtrinsic = ux.into(); let opaque_encoded = opaque.encode(); @@ -559,10 +889,106 @@ mod tests { #[test] fn large_bad_prefix_should_work() { - let encoded = Compact::::from(u32::MAX).encode(); + let encoded = (Compact::::from(u32::MAX), Preamble::<(), (), ()>::Bare(0)).encode(); + assert!(Ex::decode(&mut &encoded[..]).is_err()); + } + + #[test] + fn legacy_short_signed_encode_decode() { + let call: TestCall = vec![0u8; 4].into(); + let signed = TEST_ACCOUNT; + let extension = DummyExtension; + let implicit = extension.implicit().unwrap(); + let legacy_signature = TestSig(TEST_ACCOUNT, (&call, &extension, &implicit).encode()); + + let old_ux = + UncheckedExtrinsicV4::::new_signed( + call.clone(), + signed, + legacy_signature.clone(), + extension.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); assert_eq!( - Ex::decode(&mut &encoded[..]), - Err(Error::from("Not enough data to fill buffer")) + decoded_old_ux.preamble, + Preamble::Signed(signed, legacy_signature.clone(), 0, extension.clone()) ); + + let new_ux = + Ex::new_signed(call.clone(), signed, legacy_signature.clone(), extension.clone()); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); + } + + #[test] + fn legacy_long_signed_encode_decode() { + let call: TestCall = vec![0u8; 257].into(); + let signed = TEST_ACCOUNT; + let extension = DummyExtension; + let implicit = extension.implicit().unwrap(); + let signature = TestSig( + TEST_ACCOUNT, + blake2_256(&(&call, DummyExtension, &implicit).encode()[..]).to_vec(), + ); + + let old_ux = + UncheckedExtrinsicV4::::new_signed( + call.clone(), + signed, + signature.clone(), + extension.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); + assert_eq!( + decoded_old_ux.preamble, + Preamble::Signed(signed, signature.clone(), 0, extension.clone()) + ); + + let new_ux = Ex::new_signed(call.clone(), signed, signature.clone(), extension.clone()); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); + } + + #[test] + fn legacy_unsigned_encode_decode() { + let call: TestCall = vec![0u8; 0].into(); + + let old_ux = + UncheckedExtrinsicV4::::new_unsigned( + call.clone(), + ); + + let encoded_old_ux = old_ux.encode(); + let decoded_old_ux = Ex::decode(&mut &encoded_old_ux[..]).unwrap(); + + assert_eq!(decoded_old_ux.function, call); + assert_eq!(decoded_old_ux.preamble, Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION)); + + let new_legacy_ux = Ex::new_bare_legacy(call.clone()); + assert_eq!(encoded_old_ux, new_legacy_ux.encode()); + + let new_ux = Ex::new_bare(call.clone()); + let encoded_new_ux = new_ux.encode(); + let decoded_new_ux = Ex::decode(&mut &encoded_new_ux[..]).unwrap(); + assert_eq!(new_ux, decoded_new_ux); + + let new_checked = new_ux.check(&IdentityLookup::::default()).unwrap(); + let old_checked = + decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); + assert_eq!(new_checked, old_checked); } } diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 260c9a91855a4ccc95557842ef837ee7365d536a..6eed57656a6da6d9f20df6a9b7833712914c00f4 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -26,7 +26,7 @@ //! communication between the client and the runtime. This includes: //! //! - A set of traits to declare what any block/header/extrinsic type should provide. -//! - [`traits::Block`], [`traits::Header`], [`traits::Extrinsic`] +//! - [`traits::Block`], [`traits::Header`], [`traits::ExtrinsicLike`] //! - A set of types that implement these traits, whilst still providing a high degree of //! configurability via generics. //! - [`generic::Block`], [`generic::Header`], [`generic::UncheckedExtrinsic`] and @@ -131,6 +131,8 @@ pub use sp_arithmetic::{ FixedPointOperand, FixedU128, FixedU64, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, Rounding, UpperOf, }; +/// Re-export this since it's part of the API of this crate. +pub use sp_weights::Weight; pub use either::Either; @@ -955,9 +957,10 @@ impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { } } -impl traits::Extrinsic for OpaqueExtrinsic { - type Call = (); - type SignaturePayload = (); +impl traits::ExtrinsicLike for OpaqueExtrinsic { + fn is_bare(&self) -> bool { + false + } } /// Print something that implements `Printable` from the runtime. diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index a4ce4b5fc1a05b740d641a2d220ed78c60a07ee3..1fc78cce6707c1c16f7cab2ef0e73265d8777ef1 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -19,23 +19,15 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, - generic, + generic::{self, UncheckedExtrinsic}, scale_info::TypeInfo, - traits::{ - self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, - PostDispatchInfoOf, SignaturePayload, SignedExtension, ValidateUnsigned, - }, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, - ApplyExtrinsicResultWithInfo, KeyTypeId, + traits::{self, BlakeTwo256, Dispatchable, OpaqueKeys}, + DispatchResultWithInfo, KeyTypeId, }; -use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; use sp_core::crypto::{key_types, ByteArray, CryptoType, Dummy}; pub use sp_core::{sr25519, H256}; -use std::{ - cell::RefCell, - fmt::{self, Debug}, - ops::Deref, -}; +use std::{cell::RefCell, fmt::Debug}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -80,6 +72,11 @@ impl UintAuthorityId { bytes[0..8].copy_from_slice(&self.0.to_le_bytes()); T::from_slice(&bytes).unwrap() } + + /// Set the list of keys returned by the runtime call for all keys of that type. + pub fn set_all_keys>(keys: impl IntoIterator) { + ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) + } } impl CryptoType for UintAuthorityId { @@ -104,13 +101,6 @@ thread_local! { static ALL_KEYS: RefCell> = RefCell::new(vec![]); } -impl UintAuthorityId { - /// Set the list of keys returned by the runtime call for all keys of that type. - pub fn set_all_keys>(keys: impl IntoIterator) { - ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) - } -} - impl sp_application_crypto::RuntimeAppPublic for UintAuthorityId { const ID: KeyTypeId = key_types::DUMMY; @@ -162,6 +152,18 @@ impl traits::IdentifyAccount for UintAuthorityId { } } +impl traits::Verify for UintAuthorityId { + type Signer = Self; + + fn verify>( + &self, + _msg: L, + signer: &::AccountId, + ) -> bool { + self.0 == *signer + } +} + /// A dummy signature type, to match `UintAuthorityId`. #[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, Encode, Decode, TypeInfo)] pub struct TestSignature(pub u64, pub Vec); @@ -196,42 +198,6 @@ impl Header { } } -/// An opaque extrinsic wrapper type. -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] -pub struct ExtrinsicWrapper(Xt); - -impl traits::Extrinsic for ExtrinsicWrapper { - type Call = (); - type SignaturePayload = (); - - fn is_signed(&self) -> Option { - None - } -} - -impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result - where - S: ::serde::Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } -} - -impl Deref for ExtrinsicWrapper { - type Target = Xt; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, TypeInfo)] pub struct Block { @@ -246,7 +212,16 @@ impl traits::HeaderProvider for Block { } impl< - Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + Xt: 'static + + Codec + + Sized + + Send + + Sync + + Serialize + + Clone + + Eq + + Debug + + traits::ExtrinsicLike, > traits::Block for Block { type Extrinsic = Xt; @@ -281,139 +256,25 @@ where } } -/// The signature payload of a `TestXt`. -type TxSignaturePayload = (u64, Extra); - -impl SignaturePayload for TxSignaturePayload { - type SignatureAddress = u64; - type Signature = (); - type SignatureExtra = Extra; -} - -/// Test transaction, tuple of (sender, call, signed_extra) -/// with index only used if sender is some. -/// -/// If sender is some then the transaction is signed otherwise it is unsigned. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -pub struct TestXt { - /// Signature of the extrinsic. - pub signature: Option>, - /// Call of the extrinsic. - pub call: Call, -} - -impl TestXt { - /// Create a new `TextXt`. - pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { - Self { call, signature } - } -} - -impl Serialize for TestXt -where - TestXt: Encode, -{ - fn serialize(&self, seq: S) -> Result - where - S: Serializer, - { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } -} - -impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, ...)", self.signature.as_ref().map(|x| &x.0)) - } -} - -impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { - Ok(self) - } - - #[cfg(feature = "try-runtime")] - fn unchecked_into_checked_i_know_what_i_am_doing( - self, - _: &Context, - ) -> Result { - unreachable!() - } -} - -impl traits::Extrinsic - for TestXt -{ - type Call = Call; - type SignaturePayload = TxSignaturePayload; +/// Extrinsic type with `u64` accounts and mocked signatures, used in testing. +pub type TestXt = UncheckedExtrinsic; - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } +/// Wrapper over a `u64` that can be used as a `RuntimeCall`. +#[derive(PartialEq, Eq, Debug, Clone, Encode, Decode, TypeInfo)] +pub struct MockCallU64(pub u64); - fn new(c: Call, sig: Option) -> Option { - Some(TestXt { signature: sig, call: c }) +impl Dispatchable for MockCallU64 { + type RuntimeOrigin = u64; + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithInfo { + Ok(()) } } -impl traits::ExtrinsicMetadata for TestXt -where - Call: Codec + Sync + Send, - Extra: SignedExtension, -{ - type SignedExtensions = Extra; - const VERSION: u8 = 0u8; -} - -impl Applyable for TestXt -where - Call: 'static - + Sized - + Send - + Sync - + Clone - + Eq - + Codec - + Debug - + Dispatchable, - Extra: SignedExtension, - Origin: From>, -{ - type Call = Call; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signature { - Extra::validate(extra, id, &self.call, info, len) - } else { - let valid = Extra::validate_unsigned(&self.call, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.call)?; - Ok(valid.combine_with(unsigned_validation)) - } - } - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> ApplyExtrinsicResultWithInfo> { - let maybe_who = if let Some((who, extra)) = self.signature { - Extra::pre_dispatch(extra, &who, &self.call, info, len)?; - Some(who) - } else { - Extra::pre_dispatch_unsigned(&self.call, info, len)?; - U::pre_dispatch(&self.call)?; - None - }; - - Ok(self.call.dispatch(maybe_who.into())) +impl From for MockCallU64 { + fn from(value: u64) -> Self { + Self(value) } } diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits/mod.rs similarity index 91% rename from substrate/primitives/runtime/src/traits.rs rename to substrate/primitives/runtime/src/traits/mod.rs index fc63bc76decb78a53b9878edb8d18d55ddcd6f79..e6906cdb387774ce3e45ecb5bcd02829acb0f45b 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -19,7 +19,7 @@ use crate::{ generic::Digest, - scale_info::{MetaType, StaticTypeInfo, TypeInfo}, + scale_info::{StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, ValidTransaction, @@ -52,6 +52,11 @@ use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; +pub mod transaction_extension; +pub use transaction_extension::{ + DispatchTransaction, TransactionExtension, TransactionExtensionMetadata, ValidateResult, +}; + /// A lazy value. pub trait Lazy { /// Get a reference to the underlying value. @@ -226,8 +231,14 @@ pub trait StaticLookup { } /// A lookup implementation returning the input value. -#[derive(Default, Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct IdentityLookup(PhantomData); +impl Default for IdentityLookup { + fn default() -> Self { + Self(PhantomData::::default()) + } +} + impl StaticLookup for IdentityLookup { type Source = T; type Target = T; @@ -1253,7 +1264,7 @@ pub trait Header: // that is then used to define `UncheckedExtrinsic`. // ```ignore // pub type UncheckedExtrinsic = -// generic::UncheckedExtrinsic; +// generic::UncheckedExtrinsic; // ``` // This `UncheckedExtrinsic` is supplied to the `Block`. // ```ignore @@ -1286,7 +1297,7 @@ pub trait Block: + 'static { /// Type for extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; + type Extrinsic: Member + Codec + ExtrinsicLike + MaybeSerialize; /// Header type. type Header: Header + MaybeSerializeDeserialize; /// Block hash type. @@ -1310,6 +1321,7 @@ pub trait Block: } /// Something that acts like an `Extrinsic`. +#[deprecated = "Use `ExtrinsicLike` along with the `CreateTransaction` trait family instead"] pub trait Extrinsic: Sized { /// The function call. type Call: TypeInfo; @@ -1327,17 +1339,49 @@ pub trait Extrinsic: Sized { None } - /// Create new instance of the extrinsic. - /// - /// Extrinsics can be split into: - /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of - /// calls) 3. Signed Transactions (with signature; a regular transactions with known origin) + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + !self.is_signed().unwrap_or(true) + } + + /// Create a new old-school extrinsic, either a bare extrinsic if `_signed_data` is `None` or + /// a signed transaction is it is `Some`. fn new(_call: Self::Call, _signed_data: Option) -> Option { None } } +/// Something that acts like an `Extrinsic`. +pub trait ExtrinsicLike: Sized { + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + #[deprecated = "Use and implement `!is_bare()` instead"] + fn is_signed(&self) -> Option { + None + } + + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + #[allow(deprecated)] + !self.is_signed().unwrap_or(true) + } +} + +#[allow(deprecated)] +impl ExtrinsicLike for T +where + T: Extrinsic, +{ + fn is_signed(&self) -> Option { + #[allow(deprecated)] + ::is_signed(&self) + } + + fn is_bare(&self) -> bool { + ::is_bare(&self) + } +} + /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an /// [`Extrinsic`]. pub trait SignaturePayload { @@ -1370,8 +1414,8 @@ pub trait ExtrinsicMetadata { /// By format is meant the encoded representation of the `Extrinsic`. const VERSION: u8; - /// Signed extensions attached to this `Extrinsic`. - type SignedExtensions: SignedExtension; + /// Transaction extensions attached to this `Extrinsic`. + type TransactionExtensions; } /// Extract the hashing type for a block. @@ -1435,6 +1479,27 @@ impl Checkable for T { } } +/// A type that can handle weight refunds. +pub trait RefundWeight { + /// Refund some unspent weight. + fn refund(&mut self, weight: sp_weights::Weight); +} + +/// A type that can handle weight refunds and incorporate extension weights into the call weight +/// after dispatch. +pub trait ExtensionPostDispatchWeightHandler: RefundWeight { + /// Accrue some weight pertaining to the extension. + fn set_extension_weight(&mut self, info: &DispatchInfo); +} + +impl RefundWeight for () { + fn refund(&mut self, _weight: sp_weights::Weight) {} +} + +impl ExtensionPostDispatchWeightHandler<()> for () { + fn set_extension_weight(&mut self, _info: &()) {} +} + /// A lazy call (module function and argument values) that can be executed via its `dispatch` /// method. pub trait Dispatchable { @@ -1450,12 +1515,21 @@ pub trait Dispatchable { type Info; /// Additional information that is returned by `dispatch`. Can be used to supply the caller /// with information about a `Dispatchable` that is only known post dispatch. - type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; + type PostInfo: Eq + + PartialEq + + Clone + + Copy + + Encode + + Decode + + Printable + + ExtensionPostDispatchWeightHandler; /// Actually dispatch this call and return the result of it. fn dispatch(self, origin: Self::RuntimeOrigin) -> crate::DispatchResultWithInfo; } +/// Shortcut to reference the `RuntimeOrigin` type of a `Dispatchable`. +pub type DispatchOriginOf = ::RuntimeOrigin; /// Shortcut to reference the `Info` type of a `Dispatchable`. pub type DispatchInfoOf = ::Info; /// Shortcut to reference the `PostInfo` type of a `Dispatchable`. @@ -1474,8 +1548,75 @@ impl Dispatchable for () { } } +/// Dispatchable impl containing an arbitrary value which panics if it actually is dispatched. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct FakeDispatchable(pub Inner); +impl From for FakeDispatchable { + fn from(inner: Inner) -> Self { + Self(inner) + } +} +impl FakeDispatchable { + /// Take `self` and return the underlying inner value. + pub fn deconstruct(self) -> Inner { + self.0 + } +} +impl AsRef for FakeDispatchable { + fn as_ref(&self) -> &Inner { + &self.0 + } +} + +impl Dispatchable for FakeDispatchable { + type RuntimeOrigin = (); + type Config = (); + type Info = (); + type PostInfo = (); + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> crate::DispatchResultWithInfo { + panic!("This implementation should not be used for actual dispatch."); + } +} + +/// Runtime Origin which includes a System Origin variant whose `AccountId` is the parameter. +pub trait AsSystemOriginSigner { + /// Extract a reference of the inner value of the System `Origin::Signed` variant, if self has + /// that variant. + fn as_system_origin_signer(&self) -> Option<&AccountId>; +} + +/// Interface to differentiate between Runtime Origins authorized to include a transaction into the +/// block and dispatch it, and those who aren't. +/// +/// This trait targets transactions, by which we mean extrinsics which are validated through a +/// [`TransactionExtension`]. This excludes bare extrinsics (i.e. inherents), which have their call, +/// not their origin, validated and authorized. +/// +/// Typically, upon validation or application of a transaction, the origin resulting from the +/// transaction extension (see [`TransactionExtension`]) is checked for authorization. The +/// transaction is then rejected or applied. +/// +/// In FRAME, an authorized origin is either an `Origin::Signed` System origin or a custom origin +/// authorized in a [`TransactionExtension`]. +pub trait AsTransactionAuthorizedOrigin { + /// Whether the origin is authorized to include a transaction in a block. + /// + /// In typical FRAME chains, this function returns `false` if the origin is a System + /// `Origin::None` variant, `true` otherwise, meaning only signed or custom origin resulting + /// from the transaction extension pipeline are authorized. + /// + /// NOTE: This function should not be used in the context of bare extrinsics (i.e. inherents), + /// as bare extrinsics do not authorize the origin but rather the call itself, and are not + /// validated through the [`TransactionExtension`] pipeline. + fn is_transaction_authorized(&self) -> bool; +} + /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. +#[deprecated = "Use `TransactionExtension` instead."] pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo { @@ -1493,7 +1634,7 @@ pub trait SignedExtension: /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode + TypeInfo; + type AdditionalSigned: Codec + TypeInfo; /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre; @@ -1532,38 +1673,6 @@ pub trait SignedExtension: len: usize, ) -> Result; - /// Validate an unsigned transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid unsigned transaction, - /// and quickly eliminate ones that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. - fn validate_unsigned( - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a unsigned transaction. - /// - /// Note this function by default delegates to `validate_unsigned`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate_unsigned`. - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) - } - /// Do any post-flight stuff for an extrinsic. /// /// If the transaction is signed, then `_pre` will contain the output of `pre_dispatch`, @@ -1594,125 +1703,46 @@ pub trait SignedExtension: /// /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]s we need to return a `Vec` /// that holds the metadata of each one. Each individual `SignedExtension` must return - /// *exactly* one [`SignedExtensionMetadata`]. + /// *exactly* one [`TransactionExtensionMetadata`]. /// /// This method provides a default implementation that returns a vec containing a single - /// [`SignedExtensionMetadata`]. - fn metadata() -> Vec { - alloc::vec![SignedExtensionMetadata { + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { identifier: Self::IDENTIFIER, ty: scale_info::meta_type::(), - additional_signed: scale_info::meta_type::() + implicit: scale_info::meta_type::() }] } -} - -/// Information about a [`SignedExtension`] for the runtime metadata. -pub struct SignedExtensionMetadata { - /// The unique identifier of the [`SignedExtension`]. - pub identifier: &'static str, - /// The type of the [`SignedExtension`]. - pub ty: MetaType, - /// The type of the [`SignedExtension`] additional signed data for the payload. - pub additional_signed: MetaType, -} - -#[impl_for_tuples(1, 12)] -impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); - type AccountId = AccountId; - type Call = Call; - const IDENTIFIER: &'static str = "You should call `identifier()`!"; - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); - for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); - - fn additional_signed(&self) -> Result { - Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) - } + /// Validate an unsigned transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid unsigned transaction, + /// and quickly eliminate ones that are stale or incorrect. fn validate_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); - Ok(valid) + Ok(ValidTransaction::default()) } + /// Do any pre-flight stuff for an unsigned transaction. + /// + /// Note this function by default delegates to `validate_unsigned`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need not perform the same validation as in + /// `validate_unsigned`. fn pre_dispatch_unsigned( call: &Self::Call, info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { - for_tuples!( #( Tuple::pre_dispatch_unsigned(call, info, len)?; )* ); - Ok(()) - } - - fn post_dispatch( - pre: Option, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - match pre { - Some(x) => { - for_tuples!( #( Tuple::post_dispatch(Some(x.Tuple), info, post_info, len, result)?; )* ); - }, - None => { - for_tuples!( #( Tuple::post_dispatch(None, info, post_info, len, result)?; )* ); - }, - } - Ok(()) - } - - fn metadata() -> Vec { - let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::metadata()); )* ); - ids - } -} - -impl SignedExtension for () { - type AccountId = u64; - type AdditionalSigned = (); - type Call = (); - type Pre = (); - const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> core::result::Result<(), TransactionValidityError> { - Ok(()) - } - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(|_| ()) + Self::validate_unsigned(call, info, len).map(|_| ()).map_err(Into::into) } } @@ -1722,11 +1752,23 @@ impl SignedExtension for () { /// /// Also provides information on to whom this information is attributable and an index that allows /// each piece of attributable information to be disambiguated. +/// +/// IMPORTANT: After validation, in both [validate](Applyable::validate) and +/// [apply](Applyable::apply), all transactions should have *some* authorized origin, except for +/// inherents. This is necessary in order to protect the chain against spam. If no extension in the +/// transaction extension pipeline authorized the transaction with an origin, either a system signed +/// origin or a custom origin, then the transaction must be rejected, as the extensions provided in +/// substrate which protect the chain, such as `CheckNonce`, `ChargeTransactionPayment` etc., rely +/// on the assumption that the system handles system signed transactions, and the pallets handle the +/// custom origin that they authorized. pub trait Applyable: Sized + Send + Sync { /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. type Call: Dispatchable; /// Checks to see if this is a valid *transaction*. It returns information on it if so. + /// + /// IMPORTANT: Ensure that *some* origin has been authorized after validating the transaction. + /// If no origin was authorized, the transaction must be rejected. fn validate>( &self, source: TransactionSource, @@ -1736,6 +1778,9 @@ pub trait Applyable: Sized + Send + Sync { /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. + /// + /// IMPORTANT: Ensure that *some* origin has been authorized after validating the + /// transaction. If no origin was authorized, the transaction must be rejected. fn apply>( self, info: &DispatchInfoOf, diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs new file mode 100644 index 0000000000000000000000000000000000000000..a5179748673fff62f38ea82970059025bc162bd4 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [AsTransactionExtension] adapter struct for adapting [SignedExtension]s to +//! [TransactionExtension]s. + +#![allow(deprecated)] + +use scale_info::TypeInfo; +use sp_core::RuntimeDebug; + +use crate::{ + traits::{AsSystemOriginSigner, SignedExtension, ValidateResult}, + transaction_validity::InvalidTransaction, +}; + +use super::*; + +/// Adapter to use a `SignedExtension` in the place of a `TransactionExtension`. +#[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +#[deprecated = "Convert your SignedExtension to a TransactionExtension."] +pub struct AsTransactionExtension(pub SE); + +impl Default for AsTransactionExtension { + fn default() -> Self { + Self(SE::default()) + } +} + +impl From for AsTransactionExtension { + fn from(value: SE) -> Self { + Self(value) + } +} + +impl TransactionExtension for AsTransactionExtension +where + ::RuntimeOrigin: AsSystemOriginSigner + Clone, +{ + const IDENTIFIER: &'static str = SE::IDENTIFIER; + type Implicit = SE::AdditionalSigned; + + fn implicit(&self) -> Result { + self.0.additional_signed() + } + fn metadata() -> Vec { + SE::metadata() + } + fn weight(&self, _call: &SE::Call) -> Weight { + Weight::zero() + } + type Val = (); + type Pre = SE::Pre; + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> ValidateResult { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + let r = self.0.validate(who, call, info, len)?; + Ok((r, (), origin)) + } + + fn prepare( + self, + _: (), + origin: &::RuntimeOrigin, + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; + self.0.pre_dispatch(who, call, info, len) + } + + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result { + SE::post_dispatch(Some(pre), info, post_info, len, result)?; + Ok(Weight::zero()) + } + + fn bare_validate( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + SE::validate_unsigned(call, info, len) + } + + fn bare_validate_and_prepare( + call: &SE::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + SE::pre_dispatch_unsigned(call, info, len) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + SE::post_dispatch(None, info, post_info, len, result) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2fb556bf9d341a564c9dd6924ba1f31f0ccede1 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The [DispatchTransaction] trait. + +use crate::{traits::AsTransactionAuthorizedOrigin, transaction_validity::InvalidTransaction}; + +use super::*; + +/// Single-function utility trait with a blanket impl over [`TransactionExtension`] in order to +/// provide transaction dispatching functionality. We avoid implementing this directly on the trait +/// since we never want it to be overriden by the trait implementation. +pub trait DispatchTransaction { + /// The origin type of the transaction. + type Origin; + /// The info type. + type Info; + /// The resultant type. + type Result; + /// The `Val` of the extension. + type Val; + /// The `Pre` of the extension. + type Pre; + /// Just validate a transaction. + /// + /// The is basically the same as [validate](TransactionExtension::validate), except that there + /// is no need to supply the bond data. + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(ValidTransaction, Self::Val, Self::Origin), TransactionValidityError>; + /// Validate and prepare a transaction, ready for dispatch. + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + ) -> Result<(Self::Pre, Self::Origin), TransactionValidityError>; + /// Dispatch a transaction with the given base origin and call. + fn dispatch_transaction( + self, + origin: Self::Origin, + call: Call, + info: &Self::Info, + len: usize, + ) -> Self::Result; + /// Do everything which would be done in a [dispatch_transaction](Self::dispatch_transaction), + /// but instead of executing the call, execute `substitute` instead. Since this doesn't actually + /// dispatch the call, it doesn't need to consume it and so `call` can be passed as a reference. + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result; +} + +impl, Call: Dispatchable + Encode> DispatchTransaction for T +where + ::RuntimeOrigin: AsTransactionAuthorizedOrigin, +{ + type Origin = ::RuntimeOrigin; + type Info = DispatchInfoOf; + type Result = crate::ApplyExtrinsicResultWithInfo>; + type Val = T::Val; + type Pre = T::Pre; + + fn validate_only( + &self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(ValidTransaction, T::Val, Self::Origin), TransactionValidityError> { + match self.validate(origin, call, info, len, self.implicit()?, call) { + // After validation, some origin must have been authorized. + Ok((_, _, origin)) if !origin.is_transaction_authorized() => + Err(InvalidTransaction::UnknownOrigin.into()), + res => res, + } + } + fn validate_and_prepare( + self, + origin: Self::Origin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(T::Pre, Self::Origin), TransactionValidityError> { + let (_, val, origin) = self.validate_only(origin, call, info, len)?; + let pre = self.prepare(val, &origin, &call, info, len)?; + Ok((pre, origin)) + } + fn dispatch_transaction( + self, + origin: ::RuntimeOrigin, + call: Call, + info: &DispatchInfoOf, + len: usize, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let mut res = call.dispatch(origin); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + let post_info = match &mut res { + Ok(info) => info, + Err(err) => &mut err.post_info, + }; + post_info.set_extension_weight(info); + T::post_dispatch(pre, info, post_info, len, &pd_res)?; + Ok(res) + } + fn test_run( + self, + origin: Self::Origin, + call: &Call, + info: &Self::Info, + len: usize, + substitute: impl FnOnce( + Self::Origin, + ) -> crate::DispatchResultWithInfo<::PostInfo>, + ) -> Self::Result { + let (pre, origin) = self.validate_and_prepare(origin, &call, info, len)?; + let mut res = substitute(origin); + let pd_res = res.map(|_| ()).map_err(|e| e.error); + let post_info = match &mut res { + Ok(info) => info, + Err(err) => &mut err.post_info, + }; + post_info.set_extension_weight(info); + T::post_dispatch(pre, info, post_info, len, &pd_res)?; + Ok(res) + } +} diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..58cd0974661a9853811c7ed28dc9bb6deaa6bd72 --- /dev/null +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -0,0 +1,635 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The transaction extension trait. + +use crate::{ + scale_info::{MetaType, StaticTypeInfo}, + transaction_validity::{TransactionValidity, TransactionValidityError, ValidTransaction}, + DispatchResult, +}; +use codec::{Codec, Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; +#[doc(hidden)] +pub use sp_std::marker::PhantomData; +use sp_std::{self, fmt::Debug, prelude::*}; +use sp_weights::Weight; +use tuplex::{PopFront, PushBack}; + +use super::{ + DispatchInfoOf, DispatchOriginOf, Dispatchable, ExtensionPostDispatchWeightHandler, + PostDispatchInfoOf, RefundWeight, +}; + +mod as_transaction_extension; +mod dispatch_transaction; +#[allow(deprecated)] +pub use as_transaction_extension::AsTransactionExtension; +pub use dispatch_transaction::DispatchTransaction; + +/// Shortcut for the result value of the `validate` function. +pub type ValidateResult = + Result<(ValidTransaction, Val, DispatchOriginOf), TransactionValidityError>; + +/// Means by which a transaction may be extended. This type embodies both the data and the logic +/// that should be additionally associated with the transaction. It should be plain old data. +/// +/// The simplest transaction extension would be the Unit type (and empty pipeline) `()`. This +/// executes no additional logic and implies a dispatch of the transaction's call using the +/// inherited origin (either `None` or `Signed`, depending on whether this is a signed or general +/// transaction). +/// +/// Transaction extensions are capable of altering certain associated semantics: +/// +/// - They may define the origin with which the transaction's call should be dispatched. +/// - They may define various parameters used by the transaction queue to determine under what +/// conditions the transaction should be retained and introduced on-chain. +/// - They may define whether this transaction is acceptable for introduction on-chain at all. +/// +/// Each of these semantics are defined by the `validate` function. +/// +/// **NOTE: Transaction extensions cannot under any circumstances alter the call itself.** +/// +/// Transaction extensions are capable of defining logic which is executed additionally to the +/// dispatch of the call: +/// +/// - They may define logic which must be executed prior to the dispatch of the call. +/// - They may also define logic which must be executed after the dispatch of the call. +/// +/// Each of these semantics are defined by the `prepare` and `post_dispatch_details` functions +/// respectively. +/// +/// Finally, transaction extensions may define additional data to help define the implications of +/// the logic they introduce. This additional data may be explicitly defined by the transaction +/// author (in which case it is included as part of the transaction body), or it may be implicitly +/// defined by the transaction extension based around the on-chain state (which the transaction +/// author is assumed to know). This data may be utilized by the above logic to alter how a node's +/// transaction queue treats this transaction. +/// +/// ## Default implementations +/// +/// Of the 6 functions in this trait along with `TransactionExtension`, 2 of them must return a +/// value of an associated type on success, with only `implicit` having a default implementation. +/// This means that default implementations cannot be provided for `validate` and `prepare`. +/// However, a macro is provided [impl_tx_ext_default](crate::impl_tx_ext_default) which is capable +/// of generating default implementations for both of these functions. If you do not wish to +/// introduce additional logic into the transaction pipeline, then it is recommended that you use +/// this macro to implement these functions. Additionally, [weight](TransactionExtension::weight) +/// can return a default value, which would mean the extension is weightless, but it is not +/// implemented by default. Instead, implementers can explicitly choose to implement this default +/// behavior through the same [impl_tx_ext_default](crate::impl_tx_ext_default) macro. +/// +/// If your extension does any post-flight logic, then the functionality must be implemented in +/// [post_dispatch_details](TransactionExtension::post_dispatch_details). This function can return +/// the actual weight used by the extension during an entire dispatch cycle by wrapping said weight +/// value in a `Some`. This is useful in computing fee refunds, similar to how post dispatch +/// information is used to refund fees for calls. Alternatively, a `None` can be returned, which +/// means that the worst case scenario weight, namely the value returned by +/// [weight](TransactionExtension::weight), is the actual weight. This particular piece of logic +/// is embedded in the default implementation of +/// [post_dispatch](TransactionExtension::post_dispatch) so that the weight is assumed to be worst +/// case scenario, but implementers of this trait can correct it with extra effort. Therefore, all +/// users of an extension should use [post_dispatch](TransactionExtension::post_dispatch), with +/// [post_dispatch_details](TransactionExtension::post_dispatch_details) considered an internal +/// function. +/// +/// ## Pipelines, Inherited Implications, and Authorized Origins +/// +/// Requiring a single transaction extension to define all of the above semantics would be +/// cumbersome and would lead to a lot of boilerplate. Instead, transaction extensions are +/// aggregated into pipelines, which are tuples of transaction extensions. Each extension in the +/// pipeline is executed in order, and the output of each extension is aggregated and/or relayed as +/// the input to the next extension in the pipeline. +/// +/// This ordered composition happens with all data types ([Val](TransactionExtension::Val), +/// [Pre](TransactionExtension::Pre) and [Implicit](TransactionExtension::Implicit)) as well as +/// all functions. There are important consequences stemming from how the composition affects the +/// meaning of the `origin` and `implication` parameters as well as the results. Whereas the +/// [prepare](TransactionExtension::prepare) and +/// [post_dispatch](TransactionExtension::post_dispatch) functions are clear in their meaning, the +/// [validate](TransactionExtension::validate) function is fairly sophisticated and warrants further +/// explanation. +/// +/// Firstly, the `origin` parameter. The `origin` passed into the first item in a pipeline is simply +/// that passed into the tuple itself. It represents an authority who has authorized the implication +/// of the transaction, as of the extension it has been passed into *and any further extensions it +/// may pass though, all the way to, and including, the transaction's dispatch call itself. Each +/// following item in the pipeline is passed the origin which the previous item returned. The origin +/// returned from the final item in the pipeline is the origin which is returned by the tuple +/// itself. +/// +/// This means that if a constituent extension returns a different origin to the one it was called +/// with, then (assuming no other extension changes it further) *this new origin will be used for +/// all extensions following it in the pipeline, and will be returned from the pipeline to be used +/// as the origin for the call's dispatch*. The call itself as well as all these extensions +/// following may each imply consequence for this origin. We call this the *inherited implication*. +/// +/// The *inherited implication* is the cumulated on-chain effects born by whatever origin is +/// returned. It is expressed to the [validate](TransactionExtension::validate) function only as the +/// `implication` argument which implements the [Encode] trait. A transaction extension may define +/// its own implications through its own fields and the +/// [implicit](TransactionExtension::implicit) function. This is only utilized by extensions +/// which precede it in a pipeline or, if the transaction is an old-school signed transaction, the +/// underlying transaction verification logic. +/// +/// **The inherited implication passed as the `implication` parameter to +/// [validate](TransactionExtension::validate) does not include the extension's inner data itself +/// nor does it include the result of the extension's `implicit` function.** If you both provide an +/// implication and rely on the implication, then you need to manually aggregate your extensions +/// implication with the aggregated implication passed in. +/// +/// In the post dispatch pipeline, the actual weight of each extension is accrued in the +/// [PostDispatchInfo](PostDispatchInfoOf) of that transaction sequentially with each +/// [post_dispatch](TransactionExtension::post_dispatch) call. This means that an extension handling +/// transaction payment and refunds should be at the end of the pipeline in order to capture the +/// correct amount of weight used during the call. This is because one cannot know the actual weight +/// of an extension after post dispatch without running the post dispatch ahead of time. +pub trait TransactionExtension: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo +{ + /// Unique identifier of this signed extension. + /// + /// This will be exposed in the metadata to identify the signed extension used in an extrinsic. + const IDENTIFIER: &'static str; + + /// Any additional data which was known at the time of transaction construction and can be + /// useful in authenticating the transaction. This is determined dynamically in part from the + /// on-chain environment using the `implicit` function and not directly contained in the + /// transaction itself and therefore is considered "implicit". + type Implicit: Codec + StaticTypeInfo; + + /// Determine any additional data which was known at the time of transaction construction and + /// can be useful in authenticating the transaction. The expected usage of this is to include in + /// any data which is signed and verified as part of transaction validation. Also perform any + /// pre-signature-verification checks and return an error if needed. + fn implicit(&self) -> Result { + use crate::transaction_validity::InvalidTransaction::IndeterminateImplicit; + Ok(Self::Implicit::decode(&mut &[][..]).map_err(|_| IndeterminateImplicit)?) + } + + /// Returns the metadata for this extension. + /// + /// As a [`TransactionExtension`] can be a tuple of [`TransactionExtension`]s we need to return + /// a `Vec` that holds the metadata of each one. Each individual `TransactionExtension` must + /// return *exactly* one [`TransactionExtensionMetadata`]. + /// + /// This method provides a default implementation that returns a vec containing a single + /// [`TransactionExtensionMetadata`]. + fn metadata() -> Vec { + sp_std::vec![TransactionExtensionMetadata { + identifier: Self::IDENTIFIER, + ty: scale_info::meta_type::(), + implicit: scale_info::meta_type::() + }] + } + + /// The type that encodes information that can be passed from `validate` to `prepare`. + type Val; + + /// The type that encodes information that can be passed from `prepare` to `post_dispatch`. + type Pre; + + /// The weight consumed by executing this extension instance fully during transaction dispatch. + fn weight(&self, call: &Call) -> Weight; + + /// Validate a transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue to obtain transaction + /// validity against current state. It should perform all checks that determine a valid + /// transaction, that can pay for its execution and quickly eliminate ones that are stale or + /// incorrect. + /// + /// Parameters: + /// - `origin`: The origin of the transaction which this extension inherited; coming from an + /// "old-school" *signed transaction*, this will be a system `RawOrigin::Signed` value. If the + /// transaction is a "new-school" *General Transaction*, then this will be a system + /// `RawOrigin::None` value. If this extension is an item in a composite, then it could be + /// anything which was previously returned as an `origin` value in the result of a `validate` + /// call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `inherited_implication`: The *implication* which this extension inherits. This is a tuple + /// of the transaction's call and some additional opaque-but-encodable data. Coming directly + /// from a transaction, the latter is [()]. However, if this extension is expressed as part of + /// a composite type, then the latter component is equal to any further implications to which + /// the returned `origin` could potentially apply. See Pipelines, Inherited Implications, and + /// Authorized Origins for more information. + /// + /// Returns a [ValidateResult], which is a [Result] whose success type is a tuple of + /// [ValidTransaction] (defining useful metadata for the transaction queue), the [Self::Val] + /// token of this transaction, which gets passed into [prepare](TransactionExtension::prepare), + /// and the origin of the transaction, which gets passed into + /// [prepare](TransactionExtension::prepare) and is ultimately used for dispatch. + fn validate( + &self, + origin: DispatchOriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> ValidateResult; + + /// Do any pre-flight stuff for a transaction after validation. + /// + /// This is for actions which do not happen in the transaction queue but only immediately prior + /// to the point of dispatch on-chain. This should not return an error, since errors should + /// already have been identified during the [validate](TransactionExtension::validate) call. If + /// an error is returned, the transaction will be considered invalid but no state changes will + /// happen and therefore work done in [validate](TransactionExtension::validate) will not be + /// paid for. + /// + /// Unlike `validate`, this function may consume `self`. + /// + /// Parameters: + /// - `val`: `Self::Val` returned by the result of the `validate` call. + /// - `origin`: The origin returned by the result of the `validate` call. + /// - `call`: The `Call` wrapped by this extension. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// + /// Returns a [Self::Pre] value on success, which gets passed into + /// [post_dispatch](TransactionExtension::post_dispatch) and after the call is dispatched. + /// + /// IMPORTANT: **Checks made in validation need not be repeated here.** + fn prepare( + self, + val: Self::Val, + origin: &DispatchOriginOf, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result; + + /// Do any post-flight stuff for an extrinsic. + /// + /// `_pre` contains the output of `prepare`. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// On success, the caller must return the amount of unspent weight left over by this extension + /// after dispatch. By default, this function returns no unspent weight, which means the entire + /// weight computed for the worst case scenario is consumed. + /// + /// WARNING: This function does not automatically keep track of accumulated "actual" weight. + /// Unless this weight is handled at the call site, use + /// [post_dispatch](TransactionExtension::post_dispatch) + /// instead. + /// + /// Parameters: + /// - `pre`: `Self::Pre` returned by the result of the `prepare` call prior to dispatch. + /// - `info`: Information concerning, and inherent to, the transaction's call. + /// - `post_info`: Information concerning the dispatch of the transaction's call. + /// - `len`: The total length of the encoded transaction. + /// - `result`: The result of the dispatch. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. It + /// can only be used safely when you *know* that the transaction is one that would only be + /// introduced by the current block author. + fn post_dispatch_details( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result { + Ok(Weight::zero()) + } + + /// A wrapper for [`post_dispatch_details`](TransactionExtension::post_dispatch_details) that + /// refunds the unspent weight consumed by this extension into the post dispatch information. + /// + /// If `post_dispatch_details` returns a non-zero unspent weight, which, by definition, must be + /// less than the worst case weight provided by [weight](TransactionExtension::weight), that + /// is the value refunded in `post_info`. + /// + /// If no unspent weight is reported by `post_dispatch_details`, this function assumes the worst + /// case weight and does not refund anything. + /// + /// For more information, look into + /// [post_dispatch_details](TransactionExtension::post_dispatch_details). + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let unspent_weight = Self::post_dispatch_details(pre, info, &post_info, len, result)?; + post_info.refund(unspent_weight); + + Ok(()) + } + + /// Validation logic for bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_validate( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// All pre-flight logic run before dispatching bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_validate_and_prepare( + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Post dispatch logic run after dispatching bare extrinsics. + /// + /// NOTE: This function will be migrated to a separate `InherentExtension` interface. + fn bare_post_dispatch( + _info: &DispatchInfoOf, + _post_info: &mut PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} + +/// Helper macro to be used in a `impl TransactionExtension` block to add default implementations of +/// `weight`, `validate`, `prepare` or any combinations of the them. +/// +/// The macro is to be used with 2 parameters, separated by ";": +/// - the `Call` type; +/// - the functions for which a default implementation should be generated, separated by " "; +/// available options are `weight`, `validate` and `prepare`. +/// +/// Example usage: +/// ```nocompile +/// impl TransactionExtension for EmptyExtension { +/// type Val = (); +/// type Pre = (); +/// +/// impl_tx_ext_default!(FirstCall; weight validate prepare); +/// } +/// +/// impl TransactionExtension for SimpleExtension { +/// type Val = u32; +/// type Pre = (); +/// +/// fn weight(&self, _: &SecondCall) -> Weight { +/// Weight::zero() +/// } +/// +/// fn validate( +/// &self, +/// _origin: ::RuntimeOrigin, +/// _call: &SecondCall, +/// _info: &DispatchInfoOf, +/// _len: usize, +/// _self_implicit: Self::Implicit, +/// _inherited_implication: &impl Encode, +/// ) -> ValidateResult { +/// Ok((Default::default(), 42u32, origin)) +/// } +/// +/// impl_tx_ext_default!(SecondCall; prepare); +/// } +/// ``` +#[macro_export] +macro_rules! impl_tx_ext_default { + ($call:ty ; , $( $rest:tt )*) => { + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; validate $( $rest:tt )*) => { + fn validate( + &self, + origin: $crate::traits::DispatchOriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl $crate::codec::Encode, + ) -> $crate::traits::ValidateResult { + Ok((Default::default(), Default::default(), origin)) + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; prepare $( $rest:tt )*) => { + fn prepare( + self, + _val: Self::Val, + _origin: &$crate::traits::DispatchOriginOf<$call>, + _call: &$call, + _info: &$crate::traits::DispatchInfoOf<$call>, + _len: usize, + ) -> Result { + Ok(Default::default()) + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ; weight $( $rest:tt )*) => { + fn weight(&self, _call: &$call) -> $crate::Weight { + $crate::Weight::zero() + } + impl_tx_ext_default!{$call ; $( $rest )*} + }; + ($call:ty ;) => {}; +} + +/// Information about a [`TransactionExtension`] for the runtime metadata. +pub struct TransactionExtensionMetadata { + /// The unique identifier of the [`TransactionExtension`]. + pub identifier: &'static str, + /// The type of the [`TransactionExtension`]. + pub ty: MetaType, + /// The type of the [`TransactionExtension`] additional signed data for the payload. + pub implicit: MetaType, +} + +#[impl_for_tuples(1, 12)] +impl TransactionExtension for Tuple { + const IDENTIFIER: &'static str = "Use `metadata()`!"; + for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); + fn implicit(&self) -> Result { + Ok(for_tuples!( ( #( Tuple.implicit()? ),* ) )) + } + fn metadata() -> Vec { + let mut ids = Vec::new(); + for_tuples!( #( ids.extend(Tuple::metadata()); )* ); + ids + } + + for_tuples!( type Val = ( #( Tuple::Val ),* ); ); + for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + + fn weight(&self, call: &Call) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple.weight(call)); )* ); + weight + } + + fn validate( + &self, + origin: ::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + self_implicit: Self::Implicit, + inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { + let valid = ValidTransaction::default(); + let val = (); + let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) ); + let following_implicit_implications = self_implicit; + + for_tuples!(#( + // Implication of this pipeline element not relevant for later items, so we pop it. + let (_item, following_explicit_implications) = following_explicit_implications.pop_front(); + let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front(); + let (item_valid, item_val, origin) = { + let implications = ( + // The first is the implications born of the fact we return the mutated + // origin. + inherited_implication, + // This is the explicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_explicit_implications, + // This is the implicitly made implication born of the fact the new origin is + // passed into the next items in this pipeline-tuple. + &following_implicit_implications, + ); + Tuple.validate(origin, call, info, len, item_implicit, &implications)? + }; + let valid = valid.combine_with(item_valid); + let val = val.push_back(item_val); + )* ); + Ok((valid, val, origin)) + } + + fn prepare( + self, + val: Self::Val, + origin: &::RuntimeOrigin, + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + Ok(for_tuples!( ( #( + Tuple::prepare(self.Tuple, val.Tuple, origin, call, info, len)? + ),* ) )) + } + + fn post_dispatch_details( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result { + let mut total_unspent_weight = Weight::zero(); + for_tuples!( #({ + let unspent_weight = Tuple::post_dispatch_details(pre.Tuple, info, post_info, len, result)?; + total_unspent_weight = total_unspent_weight.saturating_add(unspent_weight); + })* ); + Ok(total_unspent_weight) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result)?; )* ); + Ok(()) + } + + fn bare_validate(call: &Call, info: &DispatchInfoOf, len: usize) -> TransactionValidity { + let valid = ValidTransaction::default(); + for_tuples!(#( + let item_valid = Tuple::bare_validate(call, info, len)?; + let valid = valid.combine_with(item_valid); + )* ); + Ok(valid) + } + + fn bare_validate_and_prepare( + call: &Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::bare_validate_and_prepare(call, info, len)?; )* ); + Ok(()) + } + + fn bare_post_dispatch( + info: &DispatchInfoOf, + post_info: &mut PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::bare_post_dispatch(info, post_info, len, result)?; )* ); + Ok(()) + } +} + +impl TransactionExtension for () { + const IDENTIFIER: &'static str = "UnitTransactionExtension"; + type Implicit = (); + fn implicit(&self) -> sp_std::result::Result { + Ok(()) + } + type Val = (); + type Pre = (); + fn weight(&self, _call: &Call) -> Weight { + Weight::zero() + } + fn validate( + &self, + origin: ::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, (), ::RuntimeOrigin), + TransactionValidityError, + > { + Ok((ValidTransaction::default(), (), origin)) + } + fn prepare( + self, + _val: (), + _origin: &::RuntimeOrigin, + _call: &Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } +} diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index 2d800e29b8bb81b8c247010229986940cadb756c..a48c8ee7ba84a8f5dd00232a263393ed00418f79 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -82,6 +82,10 @@ pub enum InvalidTransaction { MandatoryValidation, /// The sending address is disabled or known to be invalid. BadSigner, + /// The implicit data was unable to be calculated. + IndeterminateImplicit, + /// The transaction extension did not authorize any origin. + UnknownOrigin, } impl InvalidTransaction { @@ -113,6 +117,10 @@ impl From for &'static str { "Transaction dispatch is mandatory; transactions must not be validated.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", InvalidTransaction::BadSigner => "Invalid signing address", + InvalidTransaction::IndeterminateImplicit => + "The implicit data was unable to be calculated", + InvalidTransaction::UnknownOrigin => + "The transaction extension did not authorize any origin", } } } @@ -338,7 +346,7 @@ pub struct ValidTransactionBuilder { impl ValidTransactionBuilder { /// Set the priority of a transaction. /// - /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Note that the final priority for `FRAME` is combined from all `TransactionExtension`s. /// Most likely for unsigned transactions you want the priority to be higher /// than for regular transactions. We recommend exposing a base priority for unsigned /// transactions as a runtime module parameter, so that the runtime can tune inter-module diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index 9341d7ac77e242409153fa80edc91df5d52b6c73..e441ddae52efe3102a7a99c0509925eebc7c16f7 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -25,12 +25,7 @@ sp-debug-derive = { workspace = true } [features] default = ["std"] -std = [ - "codec/std", - "impl-serde/std", - "serde/std", - "sp-debug-derive/std", -] +std = ["codec/std", "impl-serde/std", "serde/std", "sp-debug-derive/std"] # Serde support without relying on std features. serde = ["dep:serde", "impl-serde"] diff --git a/substrate/primitives/test-primitives/src/lib.rs b/substrate/primitives/test-primitives/src/lib.rs index 1e3b912eaf48911a3d9ca2ce6d93dec917482071..adc96d773694e409e0eb35337344e33c8bf0d3b9 100644 --- a/substrate/primitives/test-primitives/src/lib.rs +++ b/substrate/primitives/test-primitives/src/lib.rs @@ -28,7 +28,7 @@ use sp_application_crypto::sr25519; use alloc::vec::Vec; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; +use sp_runtime::traits::{BlakeTwo256, ExtrinsicLike, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] @@ -47,10 +47,7 @@ impl serde::Serialize for Extrinsic { } } -impl ExtrinsicT for Extrinsic { - type Call = Extrinsic; - type SignaturePayload = (); - +impl ExtrinsicLike for Extrinsic { fn is_signed(&self) -> Option { if let Extrinsic::IncludeData(_) = *self { Some(false) @@ -59,8 +56,12 @@ impl ExtrinsicT for Extrinsic { } } - fn new(call: Self::Call, _signature_payload: Option) -> Option { - Some(call) + fn is_bare(&self) -> bool { + if let Extrinsic::IncludeData(_) = *self { + true + } else { + false + } } } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 9b830403dbe8d43ab6305f8edec1c5b3a39192d7..c4e1897dbb8ec02d8d37f9964cf2de11ee6bbf28 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -47,6 +47,4 @@ serde = [ "sp-arithmetic/serde", ] -json-schema = [ - "dep:schemars", -] +json-schema = ["dep:schemars"] diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 6dd7cede319f974a074d951d34ec3c970f02c32f..fe5f89a5b56ea109cee20274ce7c1454b72b7009 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -107,6 +107,19 @@ for PALLET in "${PALLETS[@]}"; do FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')"; WEIGHT_FILE="./frame/${FOLDER}/src/weights.rs" + + # Special handling of custom weight paths. + if [ "$PALLET" == "frame_system_extensions" ] || [ "$PALLET" == "frame-system-extensions" ] + then + WEIGHT_FILE="./frame/system/src/extensions/weights.rs" + elif [ "$PALLET" == "pallet_asset_conversion_tx_payment" ] || [ "$PALLET" == "pallet-asset-conversion-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs" + elif [ "$PALLET" == "pallet_asset_tx_payment" ] || [ "$PALLET" == "pallet-asset-tx-payment" ] + then + WEIGHT_FILE="./frame/transaction-payment/asset-tx-payment/src/weights.rs" + fi + echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE"; OUTPUT=$( diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs index 5ae0d8f8f6eca2517b3a2c4db6fc8f4501744511..8f94dd10a8340e3456da38dae6a63030a01ff46f 100644 --- a/substrate/test-utils/runtime/src/extrinsic.rs +++ b/substrate/test-utils/runtime/src/extrinsic.rs @@ -26,7 +26,10 @@ use frame_metadata_hash_extension::CheckMetadataHash; use frame_system::{CheckNonce, CheckWeight}; use sp_core::crypto::Pair as TraitPair; use sp_keyring::AccountKeyring; -use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionPriority, Perbill}; +use sp_runtime::{ + generic::Preamble, traits::TransactionExtension, transaction_validity::TransactionPriority, + Perbill, +}; /// Transfer used in test substrate pallet. Extrinsic is created and signed using this data. #[derive(Clone)] @@ -66,11 +69,11 @@ impl TryFrom<&Extrinsic> for TransferData { match uxt { Extrinsic { function: RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest, value }), - signature: Some((from, _, (CheckNonce(nonce), ..))), + preamble: Preamble::Signed(from, _, _, ((CheckNonce(nonce), ..), ..)), } => Ok(TransferData { from: *from, to: *dest, amount: *value, nonce: *nonce }), Extrinsic { function: RuntimeCall::SubstrateTest(PalletCall::bench_call { transfer }), - signature: None, + preamble: Preamble::Bare(_), } => Ok(transfer.clone()), _ => Err(()), } @@ -203,9 +206,8 @@ impl ExtrinsicBuilder { /// Build `Extrinsic` using embedded parameters pub fn build(self) -> Extrinsic { if let Some(signer) = self.signer { - let extra = ( - CheckNonce::from(self.nonce.unwrap_or(0)), - CheckWeight::new(), + let tx_ext = ( + (CheckNonce::from(self.nonce.unwrap_or(0)), CheckWeight::new()), CheckSubstrateCall {}, self.metadata_hash .map(CheckMetadataHash::new_with_custom_hash) @@ -213,14 +215,14 @@ impl ExtrinsicBuilder { ); let raw_payload = SignedPayload::from_raw( self.function.clone(), - extra.clone(), - extra.additional_signed().unwrap(), + tx_ext.clone(), + tx_ext.implicit().unwrap(), ); let signature = raw_payload.using_encoded(|e| signer.sign(e)); - Extrinsic::new_signed(self.function, signer.public(), signature, extra) + Extrinsic::new_signed(self.function, signer.public(), signature, tx_ext) } else { - Extrinsic::new_unsigned(self.function) + Extrinsic::new_bare(self.function) } } } diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 74965264c0343dabf0e535ba855c3fce23452dda..a4a0d348a39019b130e330ced14b7d6ec9c6dfc0 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -63,9 +63,11 @@ pub use sp_core::hash::H256; use sp_genesis_builder::PresetId; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - create_runtime_str, impl_opaque_keys, - traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, NumberFor, Verify}, - transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + create_runtime_str, impl_opaque_keys, impl_tx_ext_default, + traits::{BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, NumberFor, Verify}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, + }, ApplyExtrinsicResult, ExtrinsicInclusionMode, Perbill, }; #[cfg(any(feature = "std", test))] @@ -147,18 +149,18 @@ pub type Signature = sr25519::Signature; #[cfg(feature = "std")] pub type Pair = sp_core::sr25519::Pair; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - CheckNonce, - CheckWeight, +// TODO: Remove after the Checks are migrated to TxExtension. +/// The extension to the basic transaction logic. +pub type TxExtension = ( + (CheckNonce, CheckWeight), CheckSubstrateCall, frame_metadata_hash_extension::CheckMetadataHash, ); /// The payload being signed in transactions. -pub type SignedPayload = sp_runtime::generic::SignedPayload; +pub type SignedPayload = sp_runtime::generic::SignedPayload; /// Unchecked extrinsic type as expected by this runtime. pub type Extrinsic = - sp_runtime::generic::UncheckedExtrinsic; + sp_runtime::generic::UncheckedExtrinsic; /// An identifier for an account on this system. pub type AccountId = ::Signer; @@ -252,8 +254,17 @@ impl sp_runtime::traits::Printable for CheckSubstrateCall { } } +impl sp_runtime::traits::RefundWeight for CheckSubstrateCall { + fn refund(&mut self, _weight: frame_support::weights::Weight) {} +} +impl sp_runtime::traits::ExtensionPostDispatchWeightHandler + for CheckSubstrateCall +{ + fn set_extension_weight(&mut self, _info: &CheckSubstrateCall) {} +} + impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { - type RuntimeOrigin = CheckSubstrateCall; + type RuntimeOrigin = RuntimeOrigin; type Config = CheckSubstrateCall; type Info = CheckSubstrateCall; type PostInfo = CheckSubstrateCall; @@ -266,42 +277,32 @@ impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { } } -impl sp_runtime::traits::SignedExtension for CheckSubstrateCall { - type AccountId = AccountId; - type Call = RuntimeCall; - type AdditionalSigned = (); - type Pre = (); +impl sp_runtime::traits::TransactionExtension for CheckSubstrateCall { const IDENTIFIER: &'static str = "CheckSubstrateCall"; - - fn additional_signed( - &self, - ) -> core::result::Result { - Ok(()) - } + type Implicit = (); + type Pre = (); + type Val = (); + impl_tx_ext_default!(RuntimeCall; weight prepare); fn validate( &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, + origin: ::RuntimeOrigin, + call: &RuntimeCall, + _info: &DispatchInfoOf, _len: usize, - ) -> TransactionValidity { + _self_implicit: Self::Implicit, + _inherited_implication: &impl Encode, + ) -> Result< + (ValidTransaction, Self::Val, ::RuntimeOrigin), + TransactionValidityError, + > { log::trace!(target: LOG_TARGET, "validate"); - match call { + let v = match call { RuntimeCall::SubstrateTest(ref substrate_test_call) => - substrate_test_pallet::validate_runtime_call(substrate_test_call), - _ => Ok(Default::default()), - } - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &sp_runtime::traits::DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info, len).map(drop) + substrate_test_pallet::validate_runtime_call(substrate_test_call)?, + _ => Default::default(), + }; + Ok((v, (), origin)) } } @@ -670,7 +671,7 @@ impl_runtime_apis! { impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { - let ext = Extrinsic::new_unsigned( + let ext = Extrinsic::new_bare( substrate_test_pallet::pallet::Call::storage_change{ key:b"some_key".encode(), value:Some(header.number.encode()) @@ -1051,7 +1052,7 @@ mod tests { use sp_consensus::BlockOrigin; use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext}; use sp_runtime::{ - traits::{Hash as _, SignedExtension}, + traits::{DispatchTransaction, Hash as _}, transaction_validity::{InvalidTransaction, ValidTransaction}, }; use substrate_test_runtime_client::{ @@ -1205,26 +1206,28 @@ mod tests { let len = 0_usize; assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_with_priority(16).build().function, &info, - len + len, ) .unwrap() + .0 .priority, 16 ); assert_eq!( CheckSubstrateCall {} - .validate( - &x, + .validate_only( + Some(x).into(), &ExtrinsicBuilder::new_call_do_not_propagate().build().function, &info, - len + len, ) .unwrap() + .0 .propagate, false ); diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index 2d19dbfb6d497dbee60d0250b368c3df61f1d081..6a4f38f63e8233a880b28586422f368054c65088 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -450,6 +450,15 @@ impl ChainApi for TestApi { ready(Ok(Ok(validity))) } + fn validate_transaction_blocking( + &self, + _at: ::Hash, + _source: TransactionSource, + _uxt: Arc<::Extrinsic>, + ) -> Result { + unimplemented!(); + } + fn block_id_to_number( &self, at: &BlockId, diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs index 1e5e294acba263aa4f3abf97e4249ce5917d9d42..a044049a0d614e42530f30badee173a63a28f0f5 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -22,7 +22,11 @@ use core::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); +{{#if (eq pallet "frame_system_extensions")}} +impl frame_system::ExtensionsWeightInfo for WeightInfo { +{{else}} impl {{pallet}}::WeightInfo for WeightInfo { +{{/if}} {{#each benchmarks as |benchmark|}} {{#each benchmark.comments as |comment|}} /// {{comment}} diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 955e79008c8cac81cfbe5ef409d30c93eaa8dec6..75a2ac2aef41b7920a66107a96a654191d1d24f7 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -1239,8 +1239,9 @@ where #[cfg(test)] mod test_prelude { pub(crate) use super::*; - pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; - pub(crate) type Block = RawBlock>; + pub(crate) use sp_runtime::testing::{Block as RawBlock, MockCallU64}; + pub(crate) type UncheckedXt = sp_runtime::testing::TestXt; + pub(crate) type Block = RawBlock; pub(crate) fn init_logger() { sp_tracing::try_init_simple(); diff --git a/substrate/utils/frame/rpc/client/src/lib.rs b/substrate/utils/frame/rpc/client/src/lib.rs index 221f260b156699f012e17d5eecd8c311066d2902..0aecad5530535d3f384999c1e2a7fd3268b29d9e 100644 --- a/substrate/utils/frame/rpc/client/src/lib.rs +++ b/substrate/utils/frame/rpc/client/src/lib.rs @@ -199,11 +199,12 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; + use sp_runtime::testing::{Block as TBlock, Header, MockCallU64, TestXt, H256}; use std::sync::Arc; use tokio::sync::Mutex; - type Block = TBlock>; + type UncheckedXt = TestXt; + type Block = TBlock; type BlockNumber = u64; type Hash = H256; diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml index e388af7c94f83baf1f05868c28569dfa8ec592a7..2f0fc7b9fe3b2d29ed1b6793658a635780ba81ea 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml @@ -11,12 +11,12 @@ chain_spec_path = "chain-spec.json" [[relaychain.nodes]] name = "alice" validator = true - args = ["--sync warp"] + args = ["--log=beefy=debug", "--sync warp"] [[relaychain.nodes]] name = "bob" validator = true - args = ["--sync warp"] + args = ["--log=beefy=debug", "--sync warp"] # we need at least 3 nodes for warp sync [[relaychain.nodes]] diff --git a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl index b68bce508c008396d699e67e08b98cdf74ba5138..bc587b0447746ce8275fa23bcf5fc816417dc287 100644 --- a/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl +++ b/substrate/zombienet/0002-validators-warp-sync/test-validators-warp-sync.zndsl @@ -31,11 +31,9 @@ bob: log line matches "Block history download is complete" within 120 seconds alice: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds bob: reports block height is at least {{DB_BLOCK_HEIGHT}} within 10 seconds -alice: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds -bob: reports substrate_beefy_best_block is at least {{DB_BLOCK_HEIGHT}} within 180 seconds - -alice: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds -bob: reports substrate_beefy_best_block is greater than {{DB_BLOCK_HEIGHT}} within 180 seconds +# In the worst case scenario, the validators should vote on 1 mandatory block each 6 seconds. And 1 era = 200 blocks. +alice: reports substrate_beefy_best_block is at least {{200*180/6}} within 180 seconds +bob: reports substrate_beefy_best_block is at least {{200*180/6}} within 180 seconds alice: count of log lines containing "error" is 0 within 10 seconds bob: count of log lines containing "verification failed" is 0 within 10 seconds diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs index 6ba6959202c41c3d90052fdd9abcc0799b62e939..169993e2e93ae0eea7ca8ca38aef7a5690e2d082 100644 --- a/templates/minimal/node/src/service.rs +++ b/templates/minimal/node/src/service.rs @@ -161,9 +161,7 @@ pub fn new_full::Ha })?; if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-worker", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), is_validator: config.role.is_authority(), @@ -175,9 +173,11 @@ pub fn new_full::Ha network_provider: Arc::new(network.clone()), enable_http_requests: true, custom_extensions: |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 7379e33b6b3e8236c7fae32d2af2a82242442523..4f914a823bf6d25e31dc843a51da49007d165b53 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -103,8 +103,8 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } -/// The signed extensions that are added to the runtime. -type SignedExtra = ( +/// The transaction extensions that are added to the runtime. +type TxExtension = ( // Checks that the sender is not the zero address. frame_system::CheckNonZeroSender, // Checks that the runtime version is correct. @@ -207,7 +207,7 @@ impl pallet_transaction_payment::Config for Runtime { // Implements the types required for the template pallet. impl pallet_minimal_template::Config for Runtime {} -type Block = frame::runtime::types_common::BlockOf; +type Block = frame::runtime::types_common::BlockOf; type Header = HeaderFor; type RuntimeExecutive = @@ -324,7 +324,7 @@ impl_runtime_apis! { } fn preset_names() -> Vec { - self::genesis_config_presets::preset_names() + crate::genesis_config_presets::preset_names() } } } diff --git a/templates/parachain/node/src/command.rs b/templates/parachain/node/src/command.rs index 938bda837e0dc58bf553ed38f93ea4f761eb25cb..5d9308aed154c699ae21636ac19ce5863363ee30 100644 --- a/templates/parachain/node/src/command.rs +++ b/templates/parachain/node/src/command.rs @@ -220,13 +220,15 @@ pub fn run() -> Result<()> { runner.run_node_until_exit(|config| async move { let hwbench = (!cli.no_hardware_benchmarks) - .then_some(config.database.path().map(|database_path| { - let _ = std::fs::create_dir_all(database_path); - sc_sysinfo::gather_hwbench( - Some(database_path), - &SUBSTRATE_REFERENCE_HARDWARE, - ) - })) + .then(|| { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(database_path); + sc_sysinfo::gather_hwbench( + Some(database_path), + &SUBSTRATE_REFERENCE_HARDWARE, + ) + }) + }) .flatten(); let para_id = chain_spec::Extensions::try_get(&*config.chain_spec) diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index dd7dff2ebf164b894ac73cea26bcf7df9503c2d4..57ffcb9049d8ff83973665c50b3a0cadf0026bb9 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -287,9 +287,7 @@ pub async fn start_parachain_node( if parachain_config.offchain_worker.enabled { use futures::FutureExt; - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), keystore: Some(params.keystore_container.keystore()), @@ -301,9 +299,11 @@ pub async fn start_parachain_node( is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs index 0cf6497fd95e42bc60fe065d494d920693fbac21..ba4c71c7f218705c4e5035d1fcac67fce10e2485 100644 --- a/templates/parachain/runtime/src/configs/mod.rs +++ b/templates/parachain/runtime/src/configs/mod.rs @@ -177,6 +177,7 @@ impl pallet_transaction_payment::Config for Runtime { type LengthToFee = ConstantMultiplier; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = (); } impl pallet_sudo::Config for Runtime { diff --git a/templates/parachain/runtime/src/genesis_config_presets.rs b/templates/parachain/runtime/src/genesis_config_presets.rs index 322c91f4f136aaa007b15b00fa3f3c5f8eaad4a3..394bde0be77000d7d06675401eff5ec06a97c36f 100644 --- a/templates/parachain/runtime/src/genesis_config_presets.rs +++ b/templates/parachain/runtime/src/genesis_config_presets.rs @@ -76,6 +76,8 @@ fn local_testnet_genesis() -> Value { ], Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), Sr25519Keyring::Alice.to_account_id(), + // TODO: this is super opaque, how should one know they should configure this? add to + // README! 1000.into(), ) } diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index cb30eb0e80d211ec1305b048819706f534cfeb37..78dc38ef427f7cebea51a81394f876a4dff2444b 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -72,9 +72,9 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. +/// The extension to the basic transaction logic. #[docify::export(template_signed_extra)] -pub type SignedExtra = ( +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -89,7 +89,7 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// All migrations of the runtime, aside from the ones declared in the pallets. /// diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index a0285e048d1b41647b34a6447204327f9c1b32ae..8a3c7d0ac78002590e64d06e72460c7afa6b2270 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -72,6 +72,7 @@ std = ["solochain-template-runtime/std"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sc-service/runtime-benchmarks", "solochain-template-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/templates/solochain/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs index 1bd3578af6891f1fda9af7a97d36222c6d4ae65a..0d60230cd19c0347ca54bd0feae8a5ca7f2da66c 100644 --- a/templates/solochain/node/src/benchmarking.rs +++ b/templates/solochain/node/src/benchmarking.rs @@ -109,7 +109,7 @@ pub fn create_benchmark_extrinsic( .checked_next_power_of_two() .map(|c| c / 2) .unwrap_or(2) as u64; - let extra: runtime::SignedExtra = ( + let tx_ext: runtime::TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -126,7 +126,7 @@ pub fn create_benchmark_extrinsic( let raw_payload = runtime::SignedPayload::from_raw( call.clone(), - extra.clone(), + tx_ext.clone(), ( (), runtime::VERSION.spec_version, @@ -145,7 +145,7 @@ pub fn create_benchmark_extrinsic( call, sp_runtime::AccountId32::from(sender.public()).into(), runtime::Signature::Sr25519(signature), - extra, + tx_ext, ) } diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs index 4192128b6724a3f2080c25a469aec1def08e41b4..2524906fd508f54630c51f7bf9b74f6be25d47e6 100644 --- a/templates/solochain/node/src/service.rs +++ b/templates/solochain/node/src/service.rs @@ -197,9 +197,7 @@ pub fn new_full< })?; if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-worker", + let offchain_workers = sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { runtime_api_provider: client.clone(), is_validator: config.role.is_authority(), @@ -211,9 +209,11 @@ pub fn new_full< network_provider: Arc::new(network.clone()), enable_http_requests: true, custom_extensions: |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), ); } diff --git a/templates/solochain/runtime/Cargo.toml b/templates/solochain/runtime/Cargo.toml index 8a7fa74a59779b87ceeb5309128fab27b678d452..837849e844b16f82a5b6eb6375ce00367a2dcd3c 100644 --- a/templates/solochain/runtime/Cargo.toml +++ b/templates/solochain/runtime/Cargo.toml @@ -81,18 +81,14 @@ substrate-wasm-builder = { optional = true, workspace = true, default-features = default = ["std"] std = [ "codec/std", - "scale-info/std", - + "frame-benchmarking?/std", "frame-executive/std", "frame-metadata-hash-extension/std", "frame-support/std", "frame-system-benchmarking?/std", "frame-system-rpc-runtime-api/std", "frame-system/std", - - "frame-benchmarking?/std", "frame-try-runtime?/std", - "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", @@ -101,7 +97,8 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", - + "scale-info/std", + "serde_json/std", "sp-api/std", "sp-block-builder/std", "sp-consensus-aura/std", @@ -109,15 +106,13 @@ std = [ "sp-core/std", "sp-genesis-builder/std", "sp-inherents/std", + "sp-keyring/std", "sp-offchain/std", "sp-runtime/std", "sp-session/std", "sp-storage/std", "sp-transaction-pool/std", "sp-version/std", - - "serde_json/std", - "sp-keyring/std", "substrate-wasm-builder", ] @@ -131,6 +126,7 @@ runtime-benchmarks = [ "pallet-sudo/runtime-benchmarks", "pallet-template/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/templates/solochain/runtime/src/apis.rs b/templates/solochain/runtime/src/apis.rs index 87a09a5f7feefabcb3b5608fe52fc3edb1ac0fde..f21eaa3444316af31564c05a19133b35b35e83e8 100644 --- a/templates/solochain/runtime/src/apis.rs +++ b/templates/solochain/runtime/src/apis.rs @@ -223,6 +223,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use super::*; @@ -240,6 +241,7 @@ impl_runtime_apis! { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use baseline::Pallet as BaselineBench; use super::*; diff --git a/templates/solochain/runtime/src/benchmarks.rs b/templates/solochain/runtime/src/benchmarks.rs index f39c2bd295935645703a3e56723b0c4da5695ece..59012e0b047edf7b454a643637e71900784216b7 100644 --- a/templates/solochain/runtime/src/benchmarks.rs +++ b/templates/solochain/runtime/src/benchmarks.rs @@ -26,6 +26,7 @@ frame_benchmarking::define_benchmarks!( [frame_benchmarking, BaselineBench::] [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] [pallet_balances, Balances] [pallet_timestamp, Timestamp] [pallet_sudo, Sudo] diff --git a/templates/solochain/runtime/src/configs/mod.rs b/templates/solochain/runtime/src/configs/mod.rs index 02d44967513ec9496e29315938e239bb0f26c66c..e34b3cb8215897589afa2625a46fce4bdfc7fa44 100644 --- a/templates/solochain/runtime/src/configs/mod.rs +++ b/templates/solochain/runtime/src/configs/mod.rs @@ -148,6 +148,7 @@ impl pallet_transaction_payment::Config for Runtime { type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; type FeeMultiplierUpdate = ConstFeeMultiplier; + type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight; } impl pallet_sudo::Config for Runtime { diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index bc47f3aeac85d5f4c5e8b551ca345950bcb6491e..f2eb49592be958b7ad54df023ed25e072c9c98c7 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -146,8 +146,8 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( +/// The `TransactionExtension` to the basic transaction logic. +pub type TxExtension = ( frame_system::CheckNonZeroSender, frame_system::CheckSpecVersion, frame_system::CheckTxVersion, @@ -161,10 +161,10 @@ pub type SignedExtra = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// All migrations of the runtime, aside from the ones declared in the pallets. /// diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index e05bf129bbd5efe9e286d5f7a68e898ea728246a..370673622b95de7fdc6e7116c6c5b1ca85664c86 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -148,6 +148,7 @@ std = [ "pallet-tx-pause?/std", "pallet-uniques?/std", "pallet-utility?/std", + "pallet-verify-signature?/std", "pallet-vesting?/std", "pallet-whitelist?/std", "pallet-xcm-benchmarks?/std", @@ -251,6 +252,7 @@ runtime-benchmarks = [ "frame-system?/runtime-benchmarks", "pallet-alliance?/runtime-benchmarks", "pallet-asset-conversion-ops?/runtime-benchmarks", + "pallet-asset-conversion-tx-payment?/runtime-benchmarks", "pallet-asset-conversion?/runtime-benchmarks", "pallet-asset-rate?/runtime-benchmarks", "pallet-asset-tx-payment?/runtime-benchmarks", @@ -321,11 +323,13 @@ runtime-benchmarks = [ "pallet-sudo?/runtime-benchmarks", "pallet-timestamp?/runtime-benchmarks", "pallet-tips?/runtime-benchmarks", + "pallet-transaction-payment?/runtime-benchmarks", "pallet-transaction-storage?/runtime-benchmarks", "pallet-treasury?/runtime-benchmarks", "pallet-tx-pause?/runtime-benchmarks", "pallet-uniques?/runtime-benchmarks", "pallet-utility?/runtime-benchmarks", + "pallet-verify-signature?/runtime-benchmarks", "pallet-vesting?/runtime-benchmarks", "pallet-whitelist?/runtime-benchmarks", "pallet-xcm-benchmarks?/runtime-benchmarks", @@ -438,6 +442,7 @@ try-runtime = [ "pallet-recovery?/try-runtime", "pallet-referenda?/try-runtime", "pallet-remark?/try-runtime", + "pallet-revive-mock-network?/try-runtime", "pallet-revive?/try-runtime", "pallet-root-offences?/try-runtime", "pallet-root-testing?/try-runtime", @@ -460,6 +465,7 @@ try-runtime = [ "pallet-tx-pause?/try-runtime", "pallet-uniques?/try-runtime", "pallet-utility?/try-runtime", + "pallet-verify-signature?/try-runtime", "pallet-vesting?/try-runtime", "pallet-whitelist?/try-runtime", "pallet-xcm-bridge-hub-router?/try-runtime", @@ -492,7 +498,6 @@ serde = [ "pallet-parameters?/serde", "pallet-referenda?/serde", "pallet-remark?/serde", - "pallet-revive?/serde", "pallet-state-trie-migration?/serde", "pallet-tips?/serde", "pallet-transaction-payment?/serde", @@ -536,7 +541,7 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-fixtures", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -1332,6 +1337,11 @@ path = "../substrate/frame/utility" default-features = false optional = true +[dependencies.pallet-verify-signature] +path = "../substrate/frame/verify-signature" +default-features = false +optional = true + [dependencies.pallet-vesting] path = "../substrate/frame/vesting" default-features = false diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 7778706d515d07f60f6f349fcdb59f0425658cf9..f3fc949c66ec5ab13e60d994065bbfdf5b755497 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -705,6 +705,10 @@ pub use pallet_uniques; #[cfg(feature = "pallet-utility")] pub use pallet_utility; +/// FRAME verify signature pallet. +#[cfg(feature = "pallet-verify-signature")] +pub use pallet_verify_signature; + /// FRAME pallet for manage vesting. #[cfg(feature = "pallet-vesting")] pub use pallet_vesting;